diff --git a/.coveragerc b/.coveragerc index 32a8da5..5577e49 100644 --- a/.coveragerc +++ b/.coveragerc @@ -3,3 +3,7 @@ omit = src/blib2to3/* tests/data/* */site-packages/* + .tox/* + +[run] +relative_files = True diff --git a/.flake8 b/.flake8 index 656c0df..ae11a13 100644 --- a/.flake8 +++ b/.flake8 @@ -1,11 +1,7 @@ [flake8] -extend-ignore = E203, E266, E501 +ignore = E203, E266, E501, W503 # line length is intentionally set to 80 here because black uses Bugbear -# See https://github.com/psf/black/blob/master/docs/the_black_code_style.md#line-length for more details +# See https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#line-length for more details max-line-length = 80 max-complexity = 18 select = B,C,E,F,W,T4,B9 -# We need to configure the mypy.ini because the flake8-mypy's default -# options don't properly override it, so if we don't specify it we get -# half of the config from mypy.ini and half from flake8-mypy. -mypy_config = mypy.ini diff --git a/.gitignore b/.gitignore index 224e7f0..249499b 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,26 @@ -.pc/ +.venv +.coverage +.coverage.* +_build +.DS_Store +.vscode +docs/_static/pypi.svg +.tox +__pycache__ + +# Packaging artifacts +black.egg-info +black.dist-info +build/ +dist/ +pip-wheel-metadata/ +.eggs + +src/_black_version.py +.idea + +.dmypy.json +*.swp +.hypothesis/ +venv/ +.ipynb_checkpoints/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e461aa3..0be8dc4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,31 +1,65 @@ # Note: don't use this config for your own repositories. Instead, see -# "Version control integration" in README.md. +# "Version control integration" in docs/integrations/source_version_control.md exclude: ^(src/blib2to3/|profiling/|tests/data/) repos: - repo: local hooks: - - id: black - name: black - language: system - entry: black - minimum_pre_commit_version: 2.9.2 - require_serial: true - types_or: [python, pyi] + - id: check-pre-commit-rev-in-example + name: Check pre-commit rev in example + language: python + entry: python -m scripts.check_pre_commit_rev_in_example + files: '(CHANGES\.md|source_version_control\.md)$' + additional_dependencies: + &version_check_dependencies [ + commonmark==0.9.1, + pyyaml==5.4.1, + beautifulsoup4==4.9.3, + ] - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.0 + - id: check-version-in-the-basics-example + name: Check black version in the basics example + language: python + entry: python -m scripts.check_version_in_basics_example + files: '(CHANGES\.md|the_basics\.md)$' + additional_dependencies: *version_check_dependencies + + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 hooks: - id: flake8 - additional_dependencies: [flake8-bugbear] + additional_dependencies: + - flake8-bugbear + - flake8-comprehensions + - flake8-simplify - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.812 + rev: v0.971 hooks: - id: mypy exclude: ^docs/conf.py + additional_dependencies: + - types-dataclasses >= 0.1.3 + - types-PyYAML + - tomli >= 0.2.6, < 2.0.0 + - types-typed-ast >= 1.4.1 + - click >= 8.1.0 + - platformdirs >= 2.1.0 + - pytest + - hypothesis - repo: https://github.com/pre-commit/mirrors-prettier - rev: v2.2.1 + rev: v2.7.1 hooks: - id: prettier - args: [--prose-wrap=always, --print-width=88] + exclude: \.github/workflows/diff_shades\.yml + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace diff --git a/.pre-commit-hooks.yaml b/.pre-commit-hooks.yaml index de2eb67..1379570 100644 --- a/.pre-commit-hooks.yaml +++ b/.pre-commit-hooks.yaml @@ -3,7 +3,16 @@ description: "Black: The uncompromising Python code formatter" entry: black language: python - language_version: python3 minimum_pre_commit_version: 2.9.2 require_serial: true types_or: [python, pyi] +- id: black-jupyter + name: black-jupyter + description: + "Black: The uncompromising Python code formatter (with Jupyter Notebook support)" + entry: black + language: python + minimum_pre_commit_version: 2.9.2 + require_serial: true + types_or: [python, pyi, jupyter] + additional_dependencies: [".[jupyter]"] diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 0000000..beda5ba --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1,3 @@ +proseWrap: always +printWidth: 88 +endOfLine: auto diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..fff2d6e --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,18 @@ +version: 2 + +formats: + - htmlzip + +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +python: + install: + - requirements: docs/requirements.txt + + - method: pip + path: . + extra_requirements: + - d diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index e782272..0000000 --- a/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -language: python -cache: - pip: true - directories: - - $HOME/.cache/pre-commit -env: - - TEST_CMD="tox -e py" -install: - - pip install coverage coveralls pre-commit tox - - pip install -e '.[d]' -script: - - $TEST_CMD -after_success: - - coveralls -notifications: - on_success: change - on_failure: always -matrix: - include: - - name: "lint" - python: 3.7 - env: - - TEST_CMD="pre-commit run --all-files --show-diff-on-failure" - - name: "3.6" - python: 3.6 - - name: "3.7" - python: 3.7 - - name: "3.8" - python: 3.8 - - name: "3.9" - python: 3.9 diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000..f2d599d --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,194 @@ +# Authors + +Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). + +Maintained with: + +- [Carol Willing](mailto:carolcode@willingconsulting.com) +- [Carl Meyer](mailto:carl@oddbird.net) +- [Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com) +- [Mika Naylor](mailto:mail@autophagy.io) +- [Zsolt Dollenstein](mailto:zsol.zsol@gmail.com) +- [Cooper Lees](mailto:me@cooperlees.com) +- Richard Si +- [Felix Hildén](mailto:felix.hilden@gmail.com) +- [Batuhan Taskaya](mailto:batuhan@python.org) + +Multiple contributions by: + +- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) +- [Adam Johnson](mailto:me@adamj.eu) +- [Adam Williamson](mailto:adamw@happyassassin.net) +- [Alexander Huynh](mailto:ahrex-gh-psf-black@e.sc) +- [Alexandr Artemyev](mailto:mogost@gmail.com) +- [Alex Vandiver](mailto:github@chmrr.net) +- [Allan Simon](mailto:allan.simon@supinfo.com) +- Anders-Petter Ljungquist +- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) +- [Andrew Zhou](mailto:andrewfzhou@gmail.com) +- [Andrey](mailto:dyuuus@yandex.ru) +- [Andy Freeland](mailto:andy@andyfreeland.net) +- [Anthony Sottile](mailto:asottile@umich.edu) +- [Antonio Ossa Guerra](mailto:aaossa+black@uc.cl) +- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) +- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) +- [Artem Malyshev](mailto:proofit404@gmail.com) +- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) +- [Augie Fackler](mailto:raf@durin42.com) +- [Aviskar KC](mailto:aviskarkc10@gmail.com) +- Batuhan Taşkaya +- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) +- [Benjamin Woodruff](mailto:github@benjam.info) +- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) +- [Brandt Bucher](mailto:brandtbucher@gmail.com) +- [Brett Cannon](mailto:brett@python.org) +- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) +- [Bryan Forbes](mailto:bryan@reigndropsfall.net) +- [Calum Lind](mailto:calumlind@gmail.com) +- [Charles](mailto:peacech@gmail.com) +- Charles Reid +- [Christian Clauss](mailto:cclauss@bluewin.ch) +- [Christian Heimes](mailto:christian@python.org) +- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) +- [Chris Rose](mailto:offline@offby1.net) +- Codey Oxley +- [Cong](mailto:congusbongus@gmail.com) +- [Cooper Ry Lees](mailto:me@cooperlees.com) +- [Dan Davison](mailto:dandavison7@gmail.com) +- [Daniel Hahler](mailto:github@thequod.de) +- [Daniel M. Capella](mailto:polycitizen@gmail.com) +- Daniele Esposti +- [David Hotham](mailto:david.hotham@metaswitch.com) +- [David Lukes](mailto:dafydd.lukes@gmail.com) +- [David Szotten](mailto:davidszotten@gmail.com) +- [Denis Laxalde](mailto:denis@laxalde.org) +- [Douglas Thor](mailto:dthor@transphormusa.com) +- dylanjblack +- [Eli Treuherz](mailto:eli@treuherz.com) +- [Emil Hessman](mailto:emil@hessman.se) +- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) +- [Florent Thiery](mailto:fthiery@gmail.com) +- Francisco +- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) +- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) +- [Gregory P. Smith](mailto:greg@krypto.org) +- Gustavo Camargo +- hauntsaninja +- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) +- [Hassan Abouelela](mailto:hassan@hassanamr.com) +- [Heaford](mailto:dan@heaford.com) +- [Hugo Barrera](mailto::hugo@barrera.io) +- Hugo van Kemenade +- [Hynek Schlawack](mailto:hs@ox.cx) +- [Ionite](mailto:dev@ionite.io) +- [Ivan Katanić](mailto:ivan.katanic@gmail.com) +- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) +- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) +- [Jan Hnátek](mailto:jan.hnatek@gmail.com) +- [Jason Fried](mailto:me@jasonfried.info) +- [Jason Friedland](mailto:jason@friedland.id.au) +- [jgirardet](mailto:ijkl@netc.fr) +- Jim Brännlund +- [Jimmy Jia](mailto:tesrin@gmail.com) +- [Joe Antonakakis](mailto:jma353@cornell.edu) +- [Jon Dufresne](mailto:jon.dufresne@gmail.com) +- [Jonas Obrist](mailto:ojiidotch@gmail.com) +- [Jonty Wareing](mailto:jonty@jonty.co.uk) +- [Jose Nazario](mailto:jose.monkey.org@gmail.com) +- [Joseph Larson](mailto:larson.joseph@gmail.com) +- [Josh Bode](mailto:joshbode@fastmail.com) +- [Josh Holland](mailto:anowlcalledjosh@gmail.com) +- [Joshua Cannon](mailto:joshdcannon@gmail.com) +- [José Padilla](mailto:jpadilla@webapplicate.com) +- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) +- [kaiix](mailto:kvn.hou@gmail.com) +- [Katie McLaughlin](mailto:katie@glasnt.com) +- Katrin Leinweber +- [Keith Smiley](mailto:keithbsmiley@gmail.com) +- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) +- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) +- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) +- [Kyle Sunden](mailto:sunden@wisc.edu) +- Lawrence Chan +- [Linus Groh](mailto:mail@linusgroh.de) +- [Loren Carvalho](mailto:comradeloren@gmail.com) +- [Luka Sterbic](mailto:luka.sterbic@gmail.com) +- [LukasDrude](mailto:mail@lukas-drude.de) +- Mahmoud Hossam +- Mariatta +- [Matt VanEseltine](mailto:vaneseltine@gmail.com) +- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) +- [Matthew Walster](mailto:matthew@walster.org) +- Max Smolens +- [Michael Aquilina](mailto:michaelaquilina@gmail.com) +- [Michael Flaxman](mailto:michael.flaxman@gmail.com) +- [Michael J. Sullivan](mailto:sully@msully.net) +- [Michael McClimon](mailto:michael@mcclimon.org) +- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) +- [Mike](mailto:roshi@fedoraproject.org) +- [mikehoyio](mailto:mikehoy@gmail.com) +- [Min ho Kim](mailto:minho42@gmail.com) +- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) +- MomIsBestFriend +- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) +- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) +- [Neraste](mailto:neraste.herr10@gmail.com) +- [Nikolaus Waxweiler](mailto:madigens@gmail.com) +- [Ofek Lev](mailto:ofekmeister@gmail.com) +- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) +- [otstrel](mailto:otstrel@gmail.com) +- [Pablo Galindo](mailto:Pablogsal@gmail.com) +- [Paul Ganssle](mailto:p.ganssle@gmail.com) +- [Paul Meinhardt](mailto:mnhrdt@gmail.com) +- [Peter Bengtsson](mailto:mail@peterbe.com) +- [Peter Grayson](mailto:pete@jpgrayson.net) +- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) +- pmacosta +- [Quentin Pradet](mailto:quentin@pradet.me) +- [Ralf Schmitt](mailto:ralf@systemexit.de) +- [Ramón Valles](mailto:mroutis@protonmail.com) +- [Richard Fearn](mailto:richardfearn@gmail.com) +- [Rishikesh Jha](mailto:rishijha424@gmail.com) +- [Rupert Bedford](mailto:rupert@rupertb.com) +- Russell Davis +- [Sagi Shadur](mailto:saroad2@gmail.com) +- [Rémi Verschelde](mailto:rverschelde@gmail.com) +- [Sami Salonen](mailto:sakki@iki.fi) +- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) +- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) +- Sergi +- [Scott Stevenson](mailto:scott@stevenson.io) +- Shantanu +- [shaoran](mailto:shaoran@sakuranohana.org) +- [Shinya Fujino](mailto:shf0811@gmail.com) +- springstan +- [Stavros Korokithakis](mailto:hi@stavros.io) +- [Stephen Rosen](mailto:sirosen@globus.org) +- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) +- [Sunil Kapil](mailto:snlkapil@gmail.com) +- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) +- [Tal Amuyal](mailto:TalAmuyal@gmail.com) +- [Terrance](mailto:git@terrance.allofti.me) +- [Thom Lu](mailto:thomas.c.lu@gmail.com) +- [Thomas Grainger](mailto:tagrain@gmail.com) +- [Tim Gates](mailto:tim.gates@iress.com) +- [Tim Swast](mailto:swast@google.com) +- [Timo](mailto:timo_tk@hotmail.com) +- Toby Fleming +- [Tom Christie](mailto:tom@tomchristie.com) +- [Tony Narlock](mailto:tony@git-pull.com) +- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) +- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) +- [Tzu-ping Chung](mailto:uranusjr@gmail.com) +- [Utsav Shah](mailto:ukshah2@illinois.edu) +- utsav-dbx +- vezeli +- [Ville Skyttä](mailto:ville.skytta@iki.fi) +- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) +- [Vlad Emelianov](mailto:volshebnyi@gmail.com) +- [williamfzc](mailto:178894043@qq.com) +- [wouter bolsterlee](mailto:wouter@bolsterl.ee) +- Yazdan +- [Yngve Høiseth](mailto:yngve@hoiseth.net) +- [Yurii Karabas](mailto:1998uriyyo@gmail.com) +- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) diff --git a/CHANGES.md b/CHANGES.md index 6675c48..3a8cf4d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,570 @@ -## Change Log +# Change Log -### 21.4b2 +## Unreleased -#### _Black_ +### Highlights + + + +### Stable style + + + +### Preview style + + + +### Configuration + + + +### Packaging + + + +### Parser + + + +### Performance + + + +### Output + + + +### _Blackd_ + + + +### Integrations + + + +### Documentation + + + +## 22.10.0 + +### Highlights + +- Runtime support for Python 3.6 has been removed. Formatting 3.6 code will still be + supported until further notice. + +### Stable style + +- Fix a crash when `# fmt: on` is used on a different block level than `# fmt: off` + (#3281) + +### Preview style + +- Fix a crash when formatting some dicts with parenthesis-wrapped long string keys + (#3262) + +### Configuration + +- `.ipynb_checkpoints` directories are now excluded by default (#3293) +- Add `--skip-source-first-line` / `-x` option to ignore the first line of source code + while formatting (#3299) + +### Packaging + +- Executables made with PyInstaller will no longer crash when formatting several files + at once on macOS. Native x86-64 executables for macOS are available once again. + (#3275) +- Hatchling is now used as the build backend. This will not have any effect for users + who install Black with its wheels from PyPI. (#3233) +- Faster compiled wheels are now available for CPython 3.11 (#3276) + +### _Blackd_ + +- Windows style (CRLF) newlines will be preserved (#3257). + +### Integrations + +- Vim plugin: add flag (`g:black_preview`) to enable/disable the preview style (#3246) +- Update GitHub Action to support formatting of Jupyter Notebook files via a `jupyter` + option (#3282) +- Update GitHub Action to support use of version specifiers (e.g. `<23`) for Black + version (#3265) + +## 22.8.0 + +### Highlights + +- Python 3.11 is now supported, except for _blackd_ as aiohttp does not support 3.11 as + of publishing (#3234) +- This is the last release that supports running _Black_ on Python 3.6 (formatting 3.6 + code will continue to be supported until further notice) +- Reword the stability policy to say that we may, in rare cases, make changes that + affect code that was not previously formatted by _Black_ (#3155) + +### Stable style + +- Fix an infinite loop when using `# fmt: on/off` in the middle of an expression or code + block (#3158) +- Fix incorrect handling of `# fmt: skip` on colon (`:`) lines (#3148) +- Comments are no longer deleted when a line had spaces removed around power operators + (#2874) + +### Preview style + +- Single-character closing docstring quotes are no longer moved to their own line as + this is invalid. This was a bug introduced in version 22.6.0. (#3166) +- `--skip-string-normalization` / `-S` now prevents docstring prefixes from being + normalized as expected (#3168) +- When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from + subscript expressions with more than 1 element (#3209) +- Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside + parentheses (#3162) +- Fix a string merging/split issue when a comment is present in the middle of implicitly + concatenated strings on its own line (#3227) + +### _Blackd_ + +- `blackd` now supports enabling the preview style via the `X-Preview` header (#3217) + +### Configuration + +- Black now uses the presence of debug f-strings to detect target version (#3215) +- Fix misdetection of project root and verbose logging of sources in cases involving + `--stdin-filename` (#3216) +- Immediate `.gitignore` files in source directories given on the command line are now + also respected, previously only `.gitignore` files in the project root and + automatically discovered directories were respected (#3237) + +### Documentation + +- Recommend using BlackConnect in IntelliJ IDEs (#3150) + +### Integrations + +- Vim plugin: prefix messages with `Black: ` so it's clear they come from Black (#3194) +- Docker: changed to a /opt/venv installation + added to PATH to be available to + non-root users (#3202) + +### Output + +- Change from deprecated `asyncio.get_event_loop()` to create our event loop which + removes DeprecationWarning (#3164) +- Remove logging from internal `blib2to3` library since it regularly emits error logs + about failed caching that can and should be ignored (#3193) + +### Parser + +- Type comments are now included in the AST equivalence check consistently so accidental + deletion raises an error. Though type comments can't be tracked when running on PyPy + 3.7 due to standard library limitations. (#2874) + +### Performance + +- Reduce Black's startup time when formatting a single file by 15-30% (#3211) + +## 22.6.0 + +### Style + +- Fix unstable formatting involving `#fmt: skip` and `# fmt:skip` comments (notice the + lack of spaces) (#2970) + +### Preview style + +- Docstring quotes are no longer moved if it would violate the line length limit (#3044) +- Parentheses around return annotations are now managed (#2990) +- Remove unnecessary parentheses around awaited objects (#2991) +- Remove unnecessary parentheses in `with` statements (#2926) +- Remove trailing newlines after code block open (#3035) + +### Integrations + +- Add `scripts/migrate-black.py` script to ease introduction of Black to a Git project + (#3038) + +### Output + +- Output Python version and implementation as part of `--version` flag (#2997) + +### Packaging + +- Use `tomli` instead of `tomllib` on Python 3.11 builds where `tomllib` is not + available (#2987) + +### Parser + +- [PEP 654](https://peps.python.org/pep-0654/#except) syntax (for example, + `except *ExceptionGroup:`) is now supported (#3016) +- [PEP 646](https://peps.python.org/pep-0646) syntax (for example, + `Array[Batch, *Shape]` or `def fn(*args: *T) -> None`) is now supported (#3071) + +### Vim Plugin + +- Fix `strtobool` function. It didn't parse true/on/false/off. (#3025) + +## 22.3.0 + +### Preview style + +- Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from `except` statements (#2939) +- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) +- Avoid magic-trailing-comma in single-element subscripts (#2942) + +### Configuration + +- Do not format `__pypackages__` directories by default (#2836) +- Add support for specifying stable version with `--required-version` (#2832). +- Avoid crashing when the user has no homedir (#2814) +- Avoid crashing when md5 is not available (#2905) +- Fix handling of directory junctions on Windows (#2904) + +### Documentation + +- Update pylint config documentation (#2931) + +### Integrations + +- Move test to disable plugin in Vim/Neovim, which speeds up loading (#2896) + +### Output + +- In verbose mode, log when _Black_ is using user-level config (#2861) + +### Packaging + +- Fix Black to work with Click 8.1.0 (#2966) +- On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` + (#2903) +- `black-primer`, the deprecated internal devtool, has been removed and copied to a + [separate repository](https://github.com/cooperlees/black-primer) (#2924) + +### Parser + +- Black can now parse starred expressions in the target of `for` and `async for` + statements, e.g `for item in *items_1, *items_2: pass` (#2879). + +## 22.1.0 + +At long last, _Black_ is no longer a beta product! This is the first non-beta release +and the first release covered by our new +[stability policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy). + +### Highlights + +- **Remove Python 2 support** (#2740) +- Introduce the `--preview` flag (#2752) + +### Style + +- Deprecate `--experimental-string-processing` and move the functionality under + `--preview` (#2789) +- For stubs, one blank line between class attributes and methods is now kept if there's + at least one pre-existing blank line (#2736) +- Black now normalizes string prefix order (#2297) +- Remove spaces around power operators if both operands are simple (#2726) +- Work around bug that causes unstable formatting in some cases in the presence of the + magic trailing comma (#2807) +- Use parentheses for attribute access on decimal float and int literals (#2799) +- Don't add whitespace for attribute access on hexadecimal, binary, octal, and complex + literals (#2799) +- Treat blank lines in stubs the same inside top-level `if` statements (#2820) +- Fix unstable formatting with semicolons and arithmetic expressions (#2817) +- Fix unstable formatting around magic trailing comma (#2572) + +### Parser + +- Fix mapping cases that contain as-expressions, like `case {"key": 1 | 2 as password}` + (#2686) +- Fix cases that contain multiple top-level as-expressions, like `case 1 as a, 2 as b` + (#2716) +- Fix call patterns that contain as-expressions with keyword arguments, like + `case Foo(bar=baz as quux)` (#2749) +- Tuple unpacking on `return` and `yield` constructs now implies 3.8+ (#2700) +- Unparenthesized tuples on annotated assignments (e.g + `values: Tuple[int, ...] = 1, 2, 3`) now implies 3.8+ (#2708) +- Fix handling of standalone `match()` or `case()` when there is a trailing newline or a + comment inside of the parentheses. (#2760) +- `from __future__ import annotations` statement now implies Python 3.7+ (#2690) + +### Performance + +- Speed-up the new backtracking parser about 4X in general (enabled when + `--target-version` is set to 3.10 and higher). (#2728) +- _Black_ is now compiled with [mypyc](https://github.com/mypyc/mypyc) for an overall 2x + speed-up. 64-bit Windows, MacOS, and Linux (not including musl) are supported. (#1009, + #2431) + +### Configuration + +- Do not accept bare carriage return line endings in pyproject.toml (#2408) +- Add configuration option (`python-cell-magics`) to format cells with custom magics in + Jupyter Notebooks (#2744) +- Allow setting custom cache directory on all platforms with environment variable + `BLACK_CACHE_DIR` (#2739). +- Enable Python 3.10+ by default, without any extra need to specify + `--target-version=py310`. (#2758) +- Make passing `SRC` or `--code` mandatory and mutually exclusive (#2804) + +### Output + +- Improve error message for invalid regular expression (#2678) +- Improve error message when parsing fails during AST safety check by embedding the + underlying SyntaxError (#2693) +- No longer color diff headers white as it's unreadable in light themed terminals + (#2691) +- Text coloring added in the final statistics (#2712) +- Verbose mode also now describes how a project root was discovered and which paths will + be formatted. (#2526) + +### Packaging + +- All upper version bounds on dependencies have been removed (#2718) +- `typing-extensions` is no longer a required dependency in Python 3.10+ (#2772) +- Set `click` lower bound to `8.0.0` (#2791) + +### Integrations + +- Update GitHub action to support containerized runs (#2748) + +### Documentation + +- Change protocol in pip installation instructions to `https://` (#2761) +- Change HTML theme to Furo primarily for its responsive design and mobile support + (#2793) +- Deprecate the `black-primer` tool (#2809) +- Document Python support policy (#2819) + +## 21.12b0 + +### _Black_ + +- Fix determination of f-string expression spans (#2654) +- Fix bad formatting of error messages about EOF in multi-line statements (#2343) +- Functions and classes in blocks now have more consistent surrounding spacing (#2472) + +#### Jupyter Notebook support + +- Cell magics are now only processed if they are known Python cell magics. Earlier, all + cell magics were tokenized, leading to possible indentation errors e.g. with + `%%writefile`. (#2630) +- Fix assignment to environment variables in Jupyter Notebooks (#2642) + +#### Python 3.10 support + +- Point users to using `--target-version py310` if we detect 3.10-only syntax (#2668) +- Fix `match` statements with open sequence subjects, like `match a, b:` or + `match a, *b:` (#2639) (#2659) +- Fix `match`/`case` statements that contain `match`/`case` soft keywords multiple + times, like `match re.match()` (#2661) +- Fix `case` statements with an inline body (#2665) +- Fix styling of starred expressions inside `match` subject (#2667) +- Fix parser error location on invalid syntax in a `match` statement (#2649) +- Fix Python 3.10 support on platforms without ProcessPoolExecutor (#2631) +- Improve parsing performance on code that uses `match` under `--target-version py310` + up to ~50% (#2670) + +### Packaging + +- Remove dependency on `regex` (#2644) (#2663) + +## 21.11b1 + +### _Black_ + +- Bumped regex version minimum to 2021.4.4 to fix Pattern class usage (#2621) + +## 21.11b0 + +### _Black_ + +- Warn about Python 2 deprecation in more cases by improving Python 2 only syntax + detection (#2592) +- Add experimental PyPy support (#2559) +- Add partial support for the match statement. As it's experimental, it's only enabled + when `--target-version py310` is explicitly specified (#2586) +- Add support for parenthesized with (#2586) +- Declare support for Python 3.10 for running Black (#2562) + +### Integrations + +- Fixed vim plugin with Python 3.10 by removing deprecated distutils import (#2610) +- The vim plugin now parses `skip_magic_trailing_comma` from pyproject.toml (#2613) + +## 21.10b0 + +### _Black_ + +- Document stability policy, that will apply for non-beta releases (#2529) +- Add new `--workers` parameter (#2514) +- Fixed feature detection for positional-only arguments in lambdas (#2532) +- Bumped typed-ast version minimum to 1.4.3 for 3.10 compatibility (#2519) +- Fixed a Python 3.10 compatibility issue where the loop argument was still being passed + even though it has been removed (#2580) +- Deprecate Python 2 formatting support (#2523) + +### _Blackd_ + +- Remove dependency on aiohttp-cors (#2500) +- Bump required aiohttp version to 3.7.4 (#2509) + +### _Black-Primer_ + +- Add primer support for --projects (#2555) +- Print primer summary after individual failures (#2570) + +### Integrations + +- Allow to pass `target_version` in the vim plugin (#1319) +- Install build tools in docker file and use multi-stage build to keep the image size + down (#2582) + +## 21.9b0 + +### Packaging + +- Fix missing modules in self-contained binaries (#2466) +- Fix missing toml extra used during installation (#2475) + +## 21.8b0 + +### _Black_ + +- Add support for formatting Jupyter Notebook files (#2357) +- Move from `appdirs` dependency to `platformdirs` (#2375) +- Present a more user-friendly error if .gitignore is invalid (#2414) +- The failsafe for accidentally added backslashes in f-string expressions has been + hardened to handle more edge cases during quote normalization (#2437) +- Avoid changing a function return type annotation's type to a tuple by adding a + trailing comma (#2384) +- Parsing support has been added for unparenthesized walruses in set literals, set + comprehensions, and indices (#2447). +- Pin `setuptools-scm` build-time dependency version (#2457) +- Exclude typing-extensions version 3.10.0.1 due to it being broken on Python 3.10 + (#2460) + +### _Blackd_ + +- Replace sys.exit(-1) with raise ImportError as it plays more nicely with tools that + scan installed packages (#2440) + +### Integrations + +- The provided pre-commit hooks no longer specify `language_version` to avoid overriding + `default_language_version` (#2430) + +## 21.7b0 + +### _Black_ + +- Configuration files using TOML features higher than spec v0.5.0 are now supported + (#2301) +- Add primer support and test for code piped into black via STDIN (#2315) +- Fix internal error when `FORCE_OPTIONAL_PARENTHESES` feature is enabled (#2332) +- Accept empty stdin (#2346) +- Provide a more useful error when parsing fails during AST safety checks (#2304) + +### Docker + +- Add new `latest_release` tag automation to follow latest black release on docker + images (#2374) + +### Integrations + +- The vim plugin now searches upwards from the directory containing the current buffer + instead of the current working directory for pyproject.toml. (#1871) +- The vim plugin now reads the correct string normalization option in pyproject.toml + (#1869) +- The vim plugin no longer crashes Black when there's boolean values in pyproject.toml + (#1869) + +## 21.6b0 + +### _Black_ + +- Fix failure caused by `fmt: skip` and indentation (#2281) +- Account for += assignment when deciding whether to split string (#2312) +- Correct max string length calculation when there are string operators (#2292) +- Fixed option usage when using the `--code` flag (#2259) +- Do not call `uvloop.install()` when _Black_ is used as a library (#2303) +- Added `--required-version` option to require a specific version to be running (#2300) +- Fix incorrect custom breakpoint indices when string group contains fake f-strings + (#2311) +- Fix regression where `R` prefixes would be lowercased for docstrings (#2285) +- Fix handling of named escapes (`\N{...}`) when `--experimental-string-processing` is + used (#2319) + +### Integrations + +- The official Black action now supports choosing what version to use, and supports the + major 3 OSes. (#1940) + +## 21.5b2 + +### _Black_ + +- A space is no longer inserted into empty docstrings (#2249) +- Fix handling of .gitignore files containing non-ASCII characters on Windows (#2229) +- Respect `.gitignore` files in all levels, not only `root/.gitignore` file (apply + `.gitignore` rules like `git` does) (#2225) +- Restored compatibility with Click 8.0 on Python 3.6 when LANG=C used (#2227) +- Add extra uvloop install + import support if in python env (#2258) +- Fix --experimental-string-processing crash when matching parens are not found (#2283) +- Make sure to split lines that start with a string operator (#2286) +- Fix regular expression that black uses to identify f-expressions (#2287) + +### _Blackd_ + +- Add a lower bound for the `aiohttp-cors` dependency. Only 0.4.0 or higher is + supported. (#2231) + +### Packaging + +- Release self-contained x86_64 MacOS binaries as part of the GitHub release pipeline + (#2198) +- Always build binaries with the latest available Python (#2260) + +### Documentation + +- Add discussion of magic comments to FAQ page (#2272) +- `--experimental-string-processing` will be enabled by default in the future (#2273) +- Fix typos discovered by codespell (#2228) +- Fix Vim plugin installation instructions. (#2235) +- Add new Frequently Asked Questions page (#2247) +- Fix encoding + symlink issues preventing proper build on Windows (#2262) + +## 21.5b1 + +### _Black_ + +- Refactor `src/black/__init__.py` into many files (#2206) + +### Documentation + +- Replaced all remaining references to the + [`master`](https://github.com/psf/black/tree/main) branch with the + [`main`](https://github.com/psf/black/tree/main) branch. Some additional changes in + the source code were also made. (#2210) +- Sigificantly reorganized the documentation to make much more sense. Check them out by + heading over to [the stable docs on RTD](https://black.readthedocs.io/en/stable/). + (#2174) + +## 21.5b0 + +### _Black_ + +- Set `--pyi` mode if `--stdin-filename` ends in `.pyi` (#2169) +- Stop detecting target version as Python 3.9+ with pre-PEP-614 decorators that are + being called but with no arguments (#2182) + +### _Black-Primer_ + +- Add `--no-diff` to black-primer to suppress formatting changes (#2187) + +## 21.4b2 + +### _Black_ - Fix crash if the user configuration directory is inaccessible. (#2158) @@ -10,15 +572,18 @@ [circumstances](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) in which _Black_ may change the AST (#2159) -#### _Packaging_ +- Allow `.gitignore` rules to be overridden by specifying `exclude` in `pyproject.toml` + or on the command line. (#2170) + +### _Packaging_ - Install `primer.json` (used by `black-primer` by default) with black. (#2154) -### 21.4b1 +## 21.4b1 -#### _Black_ +### _Black_ -- Fix crash on docstrings ending with "\ ". (#2142) +- Fix crash on docstrings ending with "\\ ". (#2142) - Fix crash when atypical whitespace is cleaned out of dostrings (#2120) @@ -29,13 +594,13 @@ - Don't remove necessary parentheses from assignment expression containing assert / return statements. (#2143) -#### _Packaging_ +### _Packaging_ - Bump pathspec to >= 0.8.1 to solve invalid .gitignore exclusion handling -### 21.4b0 +## 21.4b0 -#### _Black_ +### _Black_ - Fixed a rare but annoying formatting instability created by the combination of optional trailing commas inserted by `Black` and optional parentheses looking at @@ -91,21 +656,21 @@ - Fixed "Black produced code that is not equivalent to the source" when formatting Python 2 docstrings (#2037) -#### _Packaging_ +### _Packaging_ - Self-contained native _Black_ binaries are now provided for releases via GitHub Releases (#1743) -### 20.8b1 +## 20.8b1 -#### _Packaging_ +### _Packaging_ - explicitly depend on Click 7.1.2 or newer as `Black` no longer works with versions older than 7.0 -### 20.8b0 +## 20.8b0 -#### _Black_ +### _Black_ - re-implemented support for explicit trailing commas: now it works consistently within any bracket pair, including nested structures (#1288 and duplicates) @@ -149,11 +714,11 @@ this is an undocumented and unsupported feature, you lose Internet points for depending on it (#1609) -#### Vim plugin +### Vim plugin - prefer virtualenv packages over global packages (#1383) -### 19.10b0 +## 19.10b0 - added support for PEP 572 assignment expressions (#711) @@ -212,7 +777,7 @@ - `blackd` can now output the diff of formats on source code when the `X-Diff` header is provided (#969) -### 19.3b0 +## 19.3b0 - new option `--target-version` to control which Python versions _Black_-formatted code should target (#618) @@ -237,7 +802,7 @@ - `blackd` now supports CORS (#622) -### 18.9b0 +## 18.9b0 - numeric literals are now formatted by _Black_ (#452, #461, #464, #469): @@ -252,7 +817,9 @@ - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) -- added `blackd`, see [its documentation](#blackd) for more info (#349) +- added `blackd`, see + [its documentation](https://github.com/psf/black/blob/18.9b0/README.md#blackd) for + more info (#349) - adjacent string literals are now correctly split into multiple lines (#463) @@ -279,11 +846,11 @@ - note: the Vim plugin stopped registering `,=` as a default chord as it turned out to be a bad idea (#415) -### 18.6b4 +## 18.6b4 - hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) -### 18.6b3 +## 18.6b3 - typing stub files (`.pyi`) now have blank lines added after constants (#340) @@ -311,7 +878,7 @@ - fixed a crash due to symbolic links pointing outside of the project directory (#338) -### 18.6b2 +## 18.6b2 - added `--config` (#65) @@ -325,13 +892,13 @@ - fixed unnecessary slowdown in comment placement calculation on lines without comments -### 18.6b1 +## 18.6b1 - hotfix: don't output human-facing information on stdout (#299) - hotfix: don't output cake emoji on non-zero return code (#300) -### 18.6b0 +## 18.6b0 - added `--include` and `--exclude` (#270) @@ -349,7 +916,7 @@ - _Black_ now preserves line endings when formatting a file in place (#258) -### 18.5b1 +## 18.5b1 - added `--pyi` (#249) @@ -378,7 +945,7 @@ - fixed extra empty line between a function signature and an inner function or inner class (#196) -### 18.5b0 +## 18.5b0 - call chains are now formatted according to the [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) @@ -433,11 +1000,11 @@ - fixed crash when dead symlinks where encountered -### 18.4a4 +## 18.4a4 - don't populate the cache on `--check` (#175) -### 18.4a3 +## 18.4a3 - added a "cache"; files already reformatted that haven't changed on disk won't be reformatted again (#109) @@ -465,7 +1032,7 @@ - fixed missing splits of ternary expressions (#141) -### 18.4a2 +## 18.4a2 - fixed parsing of unaligned standalone comments (#99, #112) @@ -476,7 +1043,7 @@ - fixed unstable formatting when encountering unnecessarily escaped quotes in a string (#120) -### 18.4a1 +## 18.4a1 - added `--quiet` (#78) @@ -488,7 +1055,7 @@ - fixed removing backslash escapes from raw strings (#100, #105) -### 18.4a0 +## 18.4a0 - added `--diff` (#87) @@ -513,7 +1080,7 @@ - only allow up to two empty lines on module level and only single empty lines within functions (#74) -### 18.3a4 +## 18.3a4 - `# fmt: off` and `# fmt: on` are implemented (#5) @@ -535,7 +1102,7 @@ [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) (#68) -### 18.3a3 +## 18.3a3 - don't remove single empty lines outside of bracketed expressions (#19) @@ -545,7 +1112,7 @@ - even better handling of numpy-style array indexing (#33, again) -### 18.3a2 +## 18.3a2 - changed positioning of binary operators to occur at beginning of lines instead of at the end, following @@ -569,7 +1136,7 @@ - fixed spurious space after star-based unary expressions (#31) -### 18.3a1 +## 18.3a1 - added `--check` @@ -588,10 +1155,10 @@ - fixed spurious space after unary operators when the operand was a complex expression (#15) -### 18.3a0 +## 18.3a0 - first published version, Happy 🍰 Day 2018! - alpha quality -- date-versioned (see: https://calver.org/) +- date-versioned (see: ) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f2a4ee2..10f6042 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,114 +3,8 @@ Welcome! Happy to see you willing to make the project better. Have you read the entire [user documentation](https://black.readthedocs.io/en/latest/) yet? -## Bird's eye view +Our [contributing documentation](https://black.readthedocs.org/en/latest/contributing/) +contains details on all you need to know about contributing to _Black_, the basics to +the internals of _Black_. -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. - -Bug reports and fixes are always welcome! Please follow the -[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. - -Before you suggest a new feature or configuration knob, ask yourself why you want it. If -it enables better integration with some workflow, fixes an inconsistency, speeds things -up, and so on - go for it! On the other hand, if your answer is "because I don't like a -particular formatting" then you're not ready to embrace _Black_ yet. Such changes are -unlikely to get accepted. You can still try but prepare to be disappointed. - -## Technicalities - -Development on the latest version of Python is preferred. As of this writing it's 3.9. -You can use any operating system. I am using macOS myself and CentOS at work. - -Install all development dependencies using: - -```console -$ pipenv install --dev -$ pipenv shell -$ pre-commit install -``` - -If you haven't used `pipenv` before but are comfortable with virtualenvs, just run -`pip install pipenv` in the virtualenv you're already using and invoke the command above -from the cloned _Black_ repo. It will do the correct thing. - -Non pipenv install works too: - -```console -$ pip install -r test_requirements -$ pip install -e .[d] -``` - -Before submitting pull requests, run lints and tests with the following commands from -the root of the black repo: - -```console -# Linting -$ pre-commit run -a - -# Unit tests -$ tox -e py - -# Optional Fuzz testing -$ tox -e fuzz - -# Optional CI run to test your changes on many popular python projects -$ black-primer [-k -w /tmp/black_test_repos] -``` - -### News / Changelog Requirement - -`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If -you feel this PR not require a changelog entry please state that in a comment and a -maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you -have a line in the following format: - -```md -- `Black` is now more awesome (#X) -``` - -To workout X, please use -[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This -is not perfect but saves a lot of release overhead as now the releaser does not need to -go back and workout what to add to the `CHANGES.md` for each release. - -### Style Changes - -If a change would affect the advertised code style, please modify the documentation (The -_Black_ code style) to reflect that change. Patches that fix unintended bugs in -formatting don't need to be mentioned separately though. - -### Docs Testing - -If you make changes to docs, you can test they still build locally too. - -```console -$ pip install -r docs/requirements.txt -$ pip install [-e] .[d] -$ sphinx-build -a -b html -W docs/ docs/_build/ -``` - -## black-primer - -`black-primer` is used by CI to pull down well-known _Black_ formatted projects and see -if we get source code changes. It will error on formatting changes or errors. Please run -before pushing your PR to see if you get the actions you would expect from _Black_ with -your PR. You may need to change -[primer.json](https://github.com/psf/black/blob/master/src/black_primer/primer.json) -configuration for it to pass. - -For more `black-primer` information visit the -[documentation](https://github.com/psf/black/blob/master/docs/black_primer.md). - -## Hygiene - -If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, -run it again to confirm it's really fixed. - -If adding a new feature, add a test. In fact, always add a test. But wait, before adding -any large feature, first open an issue for us to discuss the idea first. - -## Finally - -Thanks again for your interest in improving the project! You're taking action when most -people decide to sit and watch. +We look forward to your contributions! diff --git a/Dockerfile b/Dockerfile index 9542479..4e8f12f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,14 +1,19 @@ -FROM python:3-slim +FROM python:3-slim AS builder RUN mkdir /src COPY . /src/ -RUN pip install --no-cache-dir --upgrade pip setuptools wheel \ - && apt update && apt install -y git \ +ENV VIRTUAL_ENV=/opt/venv +RUN python -m venv $VIRTUAL_ENV +RUN . /opt/venv/bin/activate && pip install --no-cache-dir --upgrade pip setuptools wheel \ + # Install build tools to compile dependencies that don't have prebuilt wheels + && apt update && apt install -y git build-essential \ && cd /src \ - && pip install --no-cache-dir .[colorama,d] \ - && rm -rf /src \ - && apt remove -y git \ - && apt autoremove -y \ - && rm -rf /var/lib/apt/lists/* + && pip install --no-cache-dir .[colorama,d] + +FROM python:3-slim + +# copy only Python packages to limit the image size +COPY --from=builder /opt/venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" -CMD ["black"] +CMD ["/opt/venv/bin/black"] diff --git a/Pipfile b/Pipfile deleted file mode 100644 index 40f3d60..0000000 --- a/Pipfile +++ /dev/null @@ -1,34 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.python.org/simple" -verify_ssl = true - -[dev-packages] -Sphinx = ">=3.1.2" -coverage = "*" -docutils = "==0.15" # not a direct dependency, see https://github.com/pypa/pipenv/issues/3865 -flake8 = "*" -flake8-bugbear = "*" -mypy = ">=0.812" -pre-commit = "*" -readme_renderer = "*" -recommonmark = "*" -setuptools = ">=39.2.0" -setuptools-scm = "*" -twine = ">=1.11.0" -wheel = ">=0.31.1" -black = {editable = true, extras = ["d"], path = "."} - -[packages] -aiohttp = ">=3.3.2" -aiohttp-cors = "*" -appdirs = "*" -click = ">=7.1.2" -mypy_extensions = ">=0.4.3" -pathspec = ">=0.8.1" -regex = ">=2020.1.8" -toml = ">=0.10.1" -typed-ast = "==1.4.2" -typing_extensions = {"python_version <" = "3.8","version >=" = "3.7.4"} -black = {editable = true,extras = ["d"],path = "."} -dataclasses = {"python_version <" = "3.7","version >" = "0.6"} diff --git a/Pipfile.lock b/Pipfile.lock deleted file mode 100644 index 8a16a75..0000000 --- a/Pipfile.lock +++ /dev/null @@ -1,1217 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "1b075e0e344dad54944ed4474351c3f311accb67cbea2e0c381da3700b71f415" - }, - "pipfile-spec": 6, - "requires": {}, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.python.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "aiohttp": { - "hashes": [ - "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe", - "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe", - "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5", - "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8", - "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd", - "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb", - "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c", - "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87", - "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0", - "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290", - "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5", - "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287", - "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde", - "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf", - "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8", - "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16", - "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf", - "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809", - "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213", - "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f", - "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013", - "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b", - "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9", - "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5", - "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb", - "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df", - "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4", - "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439", - "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f", - "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22", - "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f", - "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5", - "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970", - "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009", - "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc", - "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a", - "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95" - ], - "index": "pypi", - "version": "==3.7.4.post0" - }, - "aiohttp-cors": { - "hashes": [ - "sha256:0451ba59fdf6909d0e2cd21e4c0a43752bc0703d33fc78ae94d9d9321710193e", - "sha256:4d39c6d7100fd9764ed1caf8cebf0eb01bf5e3f24e2e073fda6234bc48b19f5d" - ], - "index": "pypi", - "version": "==0.7.0" - }, - "appdirs": { - "hashes": [ - "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", - "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" - ], - "index": "pypi", - "version": "==1.4.4" - }, - "async-timeout": { - "hashes": [ - "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", - "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" - ], - "markers": "python_full_version >= '3.5.3'", - "version": "==3.0.1" - }, - "attrs": { - "hashes": [ - "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", - "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.3.0" - }, - "black": { - "editable": true, - "extras": [ - "d" - ], - "path": "." - }, - "chardet": { - "hashes": [ - "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", - "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==4.0.0" - }, - "click": { - "hashes": [ - "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", - "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" - ], - "index": "pypi", - "version": "==7.1.2" - }, - "dataclasses": { - "hashes": [ - "sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f", - "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84" - ], - "index": "pypi", - "python_version <": "3.7", - "version": "==0.6", - "version >": "0.6" - }, - "idna": { - "hashes": [ - "sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16", - "sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1" - ], - "markers": "python_version >= '3.4'", - "version": "==3.1" - }, - "multidict": { - "hashes": [ - "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a", - "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93", - "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632", - "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656", - "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79", - "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7", - "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d", - "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5", - "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224", - "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26", - "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea", - "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348", - "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6", - "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76", - "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1", - "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f", - "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952", - "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a", - "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37", - "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9", - "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359", - "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8", - "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da", - "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3", - "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d", - "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf", - "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841", - "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d", - "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93", - "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f", - "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647", - "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635", - "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456", - "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda", - "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5", - "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281", - "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80" - ], - "markers": "python_version >= '3.6'", - "version": "==5.1.0" - }, - "mypy-extensions": { - "hashes": [ - "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d", - "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8" - ], - "index": "pypi", - "version": "==0.4.3" - }, - "pathspec": { - "hashes": [ - "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd", - "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d" - ], - "index": "pypi", - "version": "==0.8.1" - }, - "regex": { - "hashes": [ - "sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5", - "sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79", - "sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31", - "sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500", - "sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11", - "sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14", - "sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3", - "sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439", - "sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c", - "sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82", - "sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711", - "sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093", - "sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a", - "sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb", - "sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8", - "sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17", - "sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000", - "sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d", - "sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480", - "sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc", - "sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0", - "sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9", - "sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765", - "sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e", - "sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a", - "sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07", - "sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f", - "sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac", - "sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7", - "sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed", - "sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968", - "sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7", - "sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2", - "sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4", - "sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87", - "sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8", - "sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10", - "sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29", - "sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605", - "sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6", - "sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042" - ], - "index": "pypi", - "version": "==2021.4.4" - }, - "setuptools-scm": { - "hashes": [ - "sha256:c3bd5f701c8def44a5c0bfe8d407bef3f80342217ef3492b951f3777bd2d915c", - "sha256:d1925a69cb07e9b29416a275b9fadb009a23c148ace905b2fb220649a6c18e92" - ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" - }, - "toml": { - "hashes": [ - "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", - "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" - ], - "index": "pypi", - "version": "==0.10.2" - }, - "typed-ast": { - "hashes": [ - "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1", - "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d", - "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6", - "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd", - "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37", - "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151", - "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07", - "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440", - "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70", - "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496", - "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea", - "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400", - "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc", - "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606", - "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc", - "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581", - "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412", - "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a", - "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2", - "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787", - "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f", - "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937", - "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64", - "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487", - "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b", - "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41", - "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a", - "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3", - "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166", - "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10" - ], - "index": "pypi", - "version": "==1.4.2" - }, - "typing-extensions": { - "hashes": [ - "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", - "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", - "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" - ], - "index": "pypi", - "python_version <": "3.8", - "version": "==3.7.4.3", - "version >=": "3.7.4" - }, - "yarl": { - "hashes": [ - "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e", - "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434", - "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366", - "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3", - "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec", - "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959", - "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e", - "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c", - "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6", - "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a", - "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6", - "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424", - "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e", - "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f", - "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50", - "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2", - "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc", - "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4", - "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970", - "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10", - "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0", - "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406", - "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896", - "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643", - "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721", - "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478", - "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724", - "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e", - "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8", - "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96", - "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25", - "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76", - "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2", - "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2", - "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c", - "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a", - "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71" - ], - "markers": "python_version >= '3.6'", - "version": "==1.6.3" - } - }, - "develop": { - "aiohttp": { - "hashes": [ - "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe", - "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe", - "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5", - "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8", - "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd", - "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb", - "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c", - "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87", - "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0", - "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290", - "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5", - "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287", - "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde", - "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf", - "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8", - "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16", - "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf", - "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809", - "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213", - "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f", - "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013", - "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b", - "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9", - "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5", - "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb", - "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df", - "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4", - "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439", - "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f", - "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22", - "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f", - "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5", - "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970", - "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009", - "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc", - "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a", - "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95" - ], - "index": "pypi", - "version": "==3.7.4.post0" - }, - "aiohttp-cors": { - "hashes": [ - "sha256:0451ba59fdf6909d0e2cd21e4c0a43752bc0703d33fc78ae94d9d9321710193e", - "sha256:4d39c6d7100fd9764ed1caf8cebf0eb01bf5e3f24e2e073fda6234bc48b19f5d" - ], - "index": "pypi", - "version": "==0.7.0" - }, - "alabaster": { - "hashes": [ - "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359", - "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02" - ], - "version": "==0.7.12" - }, - "appdirs": { - "hashes": [ - "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41", - "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128" - ], - "index": "pypi", - "version": "==1.4.4" - }, - "async-timeout": { - "hashes": [ - "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", - "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" - ], - "markers": "python_full_version >= '3.5.3'", - "version": "==3.0.1" - }, - "attrs": { - "hashes": [ - "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", - "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.3.0" - }, - "babel": { - "hashes": [ - "sha256:9d35c22fcc79893c3ecc85ac4a56cde1ecf3f19c540bba0922308a6c06ca6fa5", - "sha256:da031ab54472314f210b0adcff1588ee5d1d1d0ba4dbd07b94dba82bde791e05" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.9.0" - }, - "black": { - "editable": true, - "extras": [ - "d" - ], - "path": "." - }, - "bleach": { - "hashes": [ - "sha256:6123ddc1052673e52bab52cdc955bcb57a015264a1c57d37bea2f6b817af0125", - "sha256:98b3170739e5e83dd9dc19633f074727ad848cbedb6026708c8ac2d3b697a433" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==3.3.0" - }, - "certifi": { - "hashes": [ - "sha256:1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c", - "sha256:719a74fb9e33b9bd44cc7f3a8d94bc35e4049deebe19ba7d8e108280cfd59830" - ], - "version": "==2020.12.5" - }, - "cfgv": { - "hashes": [ - "sha256:32e43d604bbe7896fe7c248a9c2276447dbef840feb28fe20494f62af110211d", - "sha256:cf22deb93d4bcf92f345a5c3cd39d3d41d6340adc60c78bbbd6588c384fda6a1" - ], - "markers": "python_full_version >= '3.6.1'", - "version": "==3.2.0" - }, - "chardet": { - "hashes": [ - "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", - "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==4.0.0" - }, - "click": { - "hashes": [ - "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", - "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" - ], - "index": "pypi", - "version": "==7.1.2" - }, - "colorama": { - "hashes": [ - "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b", - "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.4.4" - }, - "commonmark": { - "hashes": [ - "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60", - "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9" - ], - "version": "==0.9.1" - }, - "coverage": { - "hashes": [ - "sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c", - "sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6", - "sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45", - "sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a", - "sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03", - "sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529", - "sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a", - "sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a", - "sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2", - "sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6", - "sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759", - "sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53", - "sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a", - "sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4", - "sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff", - "sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502", - "sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793", - "sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb", - "sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905", - "sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821", - "sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b", - "sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81", - "sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0", - "sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b", - "sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3", - "sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184", - "sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701", - "sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a", - "sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82", - "sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638", - "sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5", - "sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083", - "sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6", - "sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90", - "sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465", - "sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a", - "sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3", - "sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e", - "sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066", - "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf", - "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b", - "sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae", - "sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669", - "sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873", - "sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b", - "sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6", - "sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb", - "sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160", - "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c", - "sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079", - "sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d", - "sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6" - ], - "index": "pypi", - "version": "==5.5" - }, - "distlib": { - "hashes": [ - "sha256:8c09de2c67b3e7deef7184574fc060ab8a793e7adbb183d942c389c8b13c52fb", - "sha256:edf6116872c863e1aa9d5bb7cb5e05a022c519a4594dc703843343a9ddd9bff1" - ], - "version": "==0.3.1" - }, - "docutils": { - "hashes": [ - "sha256:54a349c622ff31c91cbec43b0b512f113b5b24daf00e2ea530bb1bd9aac14849", - "sha256:ba4584f9107571ced0d2c7f56a5499c696215ba90797849c92d395979da68521", - "sha256:d2ddba74835cb090a1b627d3de4e7835c628d07ee461f7b4480f51af2fe4d448" - ], - "index": "pypi", - "version": "==0.15" - }, - "filelock": { - "hashes": [ - "sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59", - "sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836" - ], - "version": "==3.0.12" - }, - "flake8": { - "hashes": [ - "sha256:1aa8990be1e689d96c745c5682b687ea49f2e05a443aff1f8251092b0014e378", - "sha256:3b9f848952dddccf635be78098ca75010f073bfe14d2c6bda867154bea728d2a" - ], - "index": "pypi", - "version": "==3.9.1" - }, - "flake8-bugbear": { - "hashes": [ - "sha256:2346c81f889955b39e4a368eb7d508de723d9de05716c287dc860a4073dc57e7", - "sha256:4f305dca96be62bf732a218fe6f1825472a621d3452c5b994d8f89dae21dbafa" - ], - "index": "pypi", - "version": "==21.4.3" - }, - "identify": { - "hashes": [ - "sha256:9bcc312d4e2fa96c7abebcdfb1119563b511b5e3985ac52f60d9116277865b2e", - "sha256:ad9f3fa0c2316618dc4d840f627d474ab6de106392a4f00221820200f490f5a8" - ], - "markers": "python_full_version >= '3.6.1'", - "version": "==2.2.4" - }, - "idna": { - "hashes": [ - "sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16", - "sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1" - ], - "markers": "python_version >= '3.4'", - "version": "==3.1" - }, - "imagesize": { - "hashes": [ - "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1", - "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.2.0" - }, - "importlib-metadata": { - "hashes": [ - "sha256:8c501196e49fb9df5df43833bdb1e4328f64847763ec8a50703148b73784d581", - "sha256:d7eb1dea6d6a6086f8be21784cc9e3bcfa55872b52309bc5fad53a8ea444465d" - ], - "markers": "python_version >= '3.6'", - "version": "==4.0.1" - }, - "jinja2": { - "hashes": [ - "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419", - "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.11.3" - }, - "keyring": { - "hashes": [ - "sha256:045703609dd3fccfcdb27da201684278823b72af515aedec1a8515719a038cb8", - "sha256:8f607d7d1cc502c43a932a275a56fe47db50271904513a379d39df1af277ac48" - ], - "markers": "python_version >= '3.6'", - "version": "==23.0.1" - }, - "markupsafe": { - "hashes": [ - "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473", - "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161", - "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235", - "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5", - "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42", - "sha256:195d7d2c4fbb0ee8139a6cf67194f3973a6b3042d742ebe0a9ed36d8b6f0c07f", - "sha256:22c178a091fc6630d0d045bdb5992d2dfe14e3259760e713c490da5323866c39", - "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff", - "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b", - "sha256:2beec1e0de6924ea551859edb9e7679da6e4870d32cb766240ce17e0a0ba2014", - "sha256:3b8a6499709d29c2e2399569d96719a1b21dcd94410a586a18526b143ec8470f", - "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1", - "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e", - "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183", - "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66", - "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b", - "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1", - "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15", - "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1", - "sha256:6f1e273a344928347c1290119b493a1f0303c52f5a5eae5f16d74f48c15d4a85", - "sha256:6fffc775d90dcc9aed1b89219549b329a9250d918fd0b8fa8d93d154918422e1", - "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e", - "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b", - "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905", - "sha256:7fed13866cf14bba33e7176717346713881f56d9d2bcebab207f7a036f41b850", - "sha256:84dee80c15f1b560d55bcfe6d47b27d070b4681c699c572af2e3c7cc90a3b8e0", - "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735", - "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d", - "sha256:98bae9582248d6cf62321dcb52aaf5d9adf0bad3b40582925ef7c7f0ed85fceb", - "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e", - "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d", - "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c", - "sha256:a6a744282b7718a2a62d2ed9d993cad6f5f585605ad352c11de459f4108df0a1", - "sha256:acf08ac40292838b3cbbb06cfe9b2cb9ec78fce8baca31ddb87aaac2e2dc3bc2", - "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21", - "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2", - "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5", - "sha256:b1dba4527182c95a0db8b6060cc98ac49b9e2f5e64320e2b56e47cb2831978c7", - "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b", - "sha256:b7d644ddb4dbd407d31ffb699f1d140bc35478da613b441c582aeb7c43838dd8", - "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6", - "sha256:bf5aa3cbcfdf57fa2ee9cd1822c862ef23037f5c832ad09cfea57fa846dec193", - "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f", - "sha256:caabedc8323f1e93231b52fc32bdcde6db817623d33e100708d9a68e1f53b26b", - "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f", - "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2", - "sha256:d53bc011414228441014aa71dbec320c66468c1030aae3a6e29778a3382d96e5", - "sha256:d73a845f227b0bfe8a7455ee623525ee656a9e2e749e4742706d80a6065d5e2c", - "sha256:d9be0ba6c527163cbed5e0857c451fcd092ce83947944d6c14bc95441203f032", - "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", - "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be", - "sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.1.1" - }, - "mccabe": { - "hashes": [ - "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", - "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" - ], - "version": "==0.6.1" - }, - "multidict": { - "hashes": [ - "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a", - "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93", - "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632", - "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656", - "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79", - "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7", - "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d", - "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5", - "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224", - "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26", - "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea", - "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348", - "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6", - "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76", - "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1", - "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f", - "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952", - "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a", - "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37", - "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9", - "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359", - "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8", - "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da", - "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3", - "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d", - "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf", - "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841", - "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d", - "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93", - "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f", - "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647", - "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635", - "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456", - "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda", - "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5", - "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281", - "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80" - ], - "markers": "python_version >= '3.6'", - "version": "==5.1.0" - }, - "mypy": { - "hashes": [ - "sha256:0d0a87c0e7e3a9becdfbe936c981d32e5ee0ccda3e0f07e1ef2c3d1a817cf73e", - "sha256:25adde9b862f8f9aac9d2d11971f226bd4c8fbaa89fb76bdadb267ef22d10064", - "sha256:28fb5479c494b1bab244620685e2eb3c3f988d71fd5d64cc753195e8ed53df7c", - "sha256:2f9b3407c58347a452fc0736861593e105139b905cca7d097e413453a1d650b4", - "sha256:33f159443db0829d16f0a8d83d94df3109bb6dd801975fe86bacb9bf71628e97", - "sha256:3f2aca7f68580dc2508289c729bd49ee929a436208d2b2b6aab15745a70a57df", - "sha256:499c798053cdebcaa916eef8cd733e5584b5909f789de856b482cd7d069bdad8", - "sha256:4eec37370483331d13514c3f55f446fc5248d6373e7029a29ecb7b7494851e7a", - "sha256:552a815579aa1e995f39fd05dde6cd378e191b063f031f2acfe73ce9fb7f9e56", - "sha256:5873888fff1c7cf5b71efbe80e0e73153fe9212fafdf8e44adfe4c20ec9f82d7", - "sha256:61a3d5b97955422964be6b3baf05ff2ce7f26f52c85dd88db11d5e03e146a3a6", - "sha256:674e822aa665b9fd75130c6c5f5ed9564a38c6cea6a6432ce47eafb68ee578c5", - "sha256:7ce3175801d0ae5fdfa79b4f0cfed08807af4d075b402b7e294e6aa72af9aa2a", - "sha256:9743c91088d396c1a5a3c9978354b61b0382b4e3c440ce83cf77994a43e8c521", - "sha256:9f94aac67a2045ec719ffe6111df543bac7874cee01f41928f6969756e030564", - "sha256:a26f8ec704e5a7423c8824d425086705e381b4f1dfdef6e3a1edab7ba174ec49", - "sha256:abf7e0c3cf117c44d9285cc6128856106183938c68fd4944763003decdcfeb66", - "sha256:b09669bcda124e83708f34a94606e01b614fa71931d356c1f1a5297ba11f110a", - "sha256:cd07039aa5df222037005b08fbbfd69b3ab0b0bd7a07d7906de75ae52c4e3119", - "sha256:d23e0ea196702d918b60c8288561e722bf437d82cb7ef2edcd98cfa38905d506", - "sha256:d65cc1df038ef55a99e617431f0553cd77763869eebdf9042403e16089fe746c", - "sha256:d7da2e1d5f558c37d6e8c1246f1aec1e7349e4913d8fb3cb289a35de573fe2eb" - ], - "index": "pypi", - "version": "==0.812" - }, - "mypy-extensions": { - "hashes": [ - "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d", - "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8" - ], - "index": "pypi", - "version": "==0.4.3" - }, - "nodeenv": { - "hashes": [ - "sha256:3ef13ff90291ba2a4a7a4ff9a979b63ffdd00a464dbe04acf0ea6471517a4c2b", - "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7" - ], - "version": "==1.6.0" - }, - "packaging": { - "hashes": [ - "sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5", - "sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.9" - }, - "pathspec": { - "hashes": [ - "sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd", - "sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d" - ], - "index": "pypi", - "version": "==0.8.1" - }, - "pkginfo": { - "hashes": [ - "sha256:029a70cb45c6171c329dfc890cde0879f8c52d6f3922794796e06f577bb03db4", - "sha256:9fdbea6495622e022cc72c2e5e1b735218e4ffb2a2a69cde2694a6c1f16afb75" - ], - "version": "==1.7.0" - }, - "pre-commit": { - "hashes": [ - "sha256:70c5ec1f30406250b706eda35e868b87e3e4ba099af8787e3e8b4b01e84f4712", - "sha256:900d3c7e1bf4cf0374bb2893c24c23304952181405b4d88c9c40b72bda1bb8a9" - ], - "index": "pypi", - "version": "==2.12.1" - }, - "pycodestyle": { - "hashes": [ - "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068", - "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.7.0" - }, - "pyflakes": { - "hashes": [ - "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3", - "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.3.1" - }, - "pygments": { - "hashes": [ - "sha256:2656e1a6edcdabf4275f9a3640db59fd5de107d88e8663c5d4e9a0fa62f77f94", - "sha256:534ef71d539ae97d4c3a4cf7d6f110f214b0e687e92f9cb9d2a3b0d3101289c8" - ], - "markers": "python_version >= '3.5'", - "version": "==2.8.1" - }, - "pyparsing": { - "hashes": [ - "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", - "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" - ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.4.7" - }, - "pytz": { - "hashes": [ - "sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da", - "sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798" - ], - "version": "==2021.1" - }, - "pyyaml": { - "hashes": [ - "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", - "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", - "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", - "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", - "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", - "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", - "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", - "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", - "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", - "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", - "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", - "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", - "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", - "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", - "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", - "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", - "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", - "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", - "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", - "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", - "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", - "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", - "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", - "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", - "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", - "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", - "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", - "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", - "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==5.4.1" - }, - "readme-renderer": { - "hashes": [ - "sha256:63b4075c6698fcfa78e584930f07f39e05d46f3ec97f65006e430b595ca6348c", - "sha256:92fd5ac2bf8677f310f3303aa4bce5b9d5f9f2094ab98c29f13791d7b805a3db" - ], - "index": "pypi", - "version": "==29.0" - }, - "recommonmark": { - "hashes": [ - "sha256:1b1db69af0231efce3fa21b94ff627ea33dee7079a01dd0a7f8482c3da148b3f", - "sha256:bdb4db649f2222dcd8d2d844f0006b958d627f732415d399791ee436a3686d67" - ], - "index": "pypi", - "version": "==0.7.1" - }, - "regex": { - "hashes": [ - "sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5", - "sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79", - "sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31", - "sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500", - "sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11", - "sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14", - "sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3", - "sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439", - "sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c", - "sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82", - "sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711", - "sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093", - "sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a", - "sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb", - "sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8", - "sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17", - "sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000", - "sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d", - "sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480", - "sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc", - "sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0", - "sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9", - "sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765", - "sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e", - "sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a", - "sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07", - "sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f", - "sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac", - "sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7", - "sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed", - "sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968", - "sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7", - "sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2", - "sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4", - "sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87", - "sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8", - "sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10", - "sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29", - "sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605", - "sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6", - "sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042" - ], - "index": "pypi", - "version": "==2021.4.4" - }, - "requests": { - "hashes": [ - "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", - "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==2.25.1" - }, - "requests-toolbelt": { - "hashes": [ - "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f", - "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0" - ], - "version": "==0.9.1" - }, - "rfc3986": { - "hashes": [ - "sha256:112398da31a3344dc25dbf477d8df6cb34f9278a94fee2625d89e4514be8bb9d", - "sha256:af9147e9aceda37c91a05f4deb128d4b4b49d6b199775fd2d2927768abdc8f50" - ], - "version": "==1.4.0" - }, - "setuptools-scm": { - "hashes": [ - "sha256:c3bd5f701c8def44a5c0bfe8d407bef3f80342217ef3492b951f3777bd2d915c", - "sha256:d1925a69cb07e9b29416a275b9fadb009a23c148ace905b2fb220649a6c18e92" - ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" - }, - "six": { - "hashes": [ - "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", - "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.15.0" - }, - "snowballstemmer": { - "hashes": [ - "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2", - "sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914" - ], - "version": "==2.1.0" - }, - "sphinx": { - "hashes": [ - "sha256:19010b7b9fa0dc7756a6e105b2aacd3a80f798af3c25c273be64d7beeb482cb1", - "sha256:2320d4e994a191f4b4be27da514e46b3d6b420f2ff895d064f52415d342461e8" - ], - "index": "pypi", - "version": "==3.5.4" - }, - "sphinxcontrib-applehelp": { - "hashes": [ - "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a", - "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58" - ], - "markers": "python_version >= '3.5'", - "version": "==1.0.2" - }, - "sphinxcontrib-devhelp": { - "hashes": [ - "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e", - "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4" - ], - "markers": "python_version >= '3.5'", - "version": "==1.0.2" - }, - "sphinxcontrib-htmlhelp": { - "hashes": [ - "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f", - "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b" - ], - "markers": "python_version >= '3.5'", - "version": "==1.0.3" - }, - "sphinxcontrib-jsmath": { - "hashes": [ - "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", - "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8" - ], - "markers": "python_version >= '3.5'", - "version": "==1.0.1" - }, - "sphinxcontrib-qthelp": { - "hashes": [ - "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72", - "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6" - ], - "markers": "python_version >= '3.5'", - "version": "==1.0.3" - }, - "sphinxcontrib-serializinghtml": { - "hashes": [ - "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc", - "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a" - ], - "markers": "python_version >= '3.5'", - "version": "==1.1.4" - }, - "toml": { - "hashes": [ - "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", - "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f" - ], - "index": "pypi", - "version": "==0.10.2" - }, - "tqdm": { - "hashes": [ - "sha256:daec693491c52e9498632dfbe9ccfc4882a557f5fa08982db1b4d3adbe0887c3", - "sha256:ebdebdb95e3477ceea267decfc0784859aa3df3e27e22d23b83e9b272bf157ae" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==4.60.0" - }, - "twine": { - "hashes": [ - "sha256:16f706f2f1687d7ce30e7effceee40ed0a09b7c33b9abb5ef6434e5551565d83", - "sha256:a56c985264b991dc8a8f4234eb80c5af87fa8080d0c224ad8f2cd05a2c22e83b" - ], - "index": "pypi", - "version": "==3.4.1" - }, - "typed-ast": { - "hashes": [ - "sha256:07d49388d5bf7e863f7fa2f124b1b1d89d8aa0e2f7812faff0a5658c01c59aa1", - "sha256:14bf1522cdee369e8f5581238edac09150c765ec1cb33615855889cf33dcb92d", - "sha256:240296b27397e4e37874abb1df2a608a92df85cf3e2a04d0d4d61055c8305ba6", - "sha256:36d829b31ab67d6fcb30e185ec996e1f72b892255a745d3a82138c97d21ed1cd", - "sha256:37f48d46d733d57cc70fd5f30572d11ab8ed92da6e6b28e024e4a3edfb456e37", - "sha256:4c790331247081ea7c632a76d5b2a265e6d325ecd3179d06e9cf8d46d90dd151", - "sha256:5dcfc2e264bd8a1db8b11a892bd1647154ce03eeba94b461effe68790d8b8e07", - "sha256:7147e2a76c75f0f64c4319886e7639e490fee87c9d25cb1d4faef1d8cf83a440", - "sha256:7703620125e4fb79b64aa52427ec192822e9f45d37d4b6625ab37ef403e1df70", - "sha256:8368f83e93c7156ccd40e49a783a6a6850ca25b556c0fa0240ed0f659d2fe496", - "sha256:84aa6223d71012c68d577c83f4e7db50d11d6b1399a9c779046d75e24bed74ea", - "sha256:85f95aa97a35bdb2f2f7d10ec5bbdac0aeb9dafdaf88e17492da0504de2e6400", - "sha256:8db0e856712f79c45956da0c9a40ca4246abc3485ae0d7ecc86a20f5e4c09abc", - "sha256:9044ef2df88d7f33692ae3f18d3be63dec69c4fb1b5a4a9ac950f9b4ba571606", - "sha256:963c80b583b0661918718b095e02303d8078950b26cc00b5e5ea9ababe0de1fc", - "sha256:987f15737aba2ab5f3928c617ccf1ce412e2e321c77ab16ca5a293e7bbffd581", - "sha256:9ec45db0c766f196ae629e509f059ff05fc3148f9ffd28f3cfe75d4afb485412", - "sha256:9fc0b3cb5d1720e7141d103cf4819aea239f7d136acf9ee4a69b047b7986175a", - "sha256:a2c927c49f2029291fbabd673d51a2180038f8cd5a5b2f290f78c4516be48be2", - "sha256:a38878a223bdd37c9709d07cd357bb79f4c760b29210e14ad0fb395294583787", - "sha256:b4fcdcfa302538f70929eb7b392f536a237cbe2ed9cba88e3bf5027b39f5f77f", - "sha256:c0c74e5579af4b977c8b932f40a5464764b2f86681327410aa028a22d2f54937", - "sha256:c1c876fd795b36126f773db9cbb393f19808edd2637e00fd6caba0e25f2c7b64", - "sha256:c9aadc4924d4b5799112837b226160428524a9a45f830e0d0f184b19e4090487", - "sha256:cc7b98bf58167b7f2db91a4327da24fb93368838eb84a44c472283778fc2446b", - "sha256:cf54cfa843f297991b7388c281cb3855d911137223c6b6d2dd82a47ae5125a41", - "sha256:d003156bb6a59cda9050e983441b7fa2487f7800d76bdc065566b7d728b4581a", - "sha256:d175297e9533d8d37437abc14e8a83cbc68af93cc9c1c59c2c292ec59a0697a3", - "sha256:d746a437cdbca200622385305aedd9aef68e8a645e385cc483bdc5e488f07166", - "sha256:e683e409e5c45d5c9082dc1daf13f6374300806240719f95dc783d1fc942af10" - ], - "index": "pypi", - "version": "==1.4.2" - }, - "typing-extensions": { - "hashes": [ - "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", - "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", - "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" - ], - "index": "pypi", - "python_version <": "3.8", - "version": "==3.7.4.3", - "version >=": "3.7.4" - }, - "urllib3": { - "hashes": [ - "sha256:2f4da4594db7e1e110a944bb1b551fdf4e6c136ad42e4234131391e21eb5b0df", - "sha256:e7b021f7241115872f92f43c6508082facffbd1c048e3c6e2bb9c2a157e28937" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==1.26.4" - }, - "virtualenv": { - "hashes": [ - "sha256:09c61377ef072f43568207dc8e46ddeac6bcdcaf288d49011bda0e7f4d38c4a2", - "sha256:a935126db63128861987a7d5d30e23e8ec045a73840eeccb467c148514e29535" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==20.4.4" - }, - "webencodings": { - "hashes": [ - "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", - "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923" - ], - "version": "==0.5.1" - }, - "wheel": { - "hashes": [ - "sha256:78b5b185f0e5763c26ca1e324373aadd49182ca90e825f7853f4b2509215dc0e", - "sha256:e11eefd162658ea59a60a0f6c7d493a7190ea4b9a85e335b33489d9f17e0245e" - ], - "index": "pypi", - "version": "==0.36.2" - }, - "yarl": { - "hashes": [ - "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e", - "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434", - "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366", - "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3", - "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec", - "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959", - "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e", - "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c", - "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6", - "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a", - "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6", - "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424", - "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e", - "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f", - "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50", - "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2", - "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc", - "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4", - "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970", - "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10", - "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0", - "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406", - "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896", - "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643", - "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721", - "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478", - "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724", - "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e", - "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8", - "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96", - "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25", - "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76", - "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2", - "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2", - "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c", - "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a", - "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71" - ], - "markers": "python_version >= '3.6'", - "version": "==1.6.3" - }, - "zipp": { - "hashes": [ - "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76", - "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098" - ], - "markers": "python_version >= '3.6'", - "version": "==3.4.1" - } - } -} diff --git a/README.md b/README.md index 41cea76..b259302 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,12 @@ -![Black Logo](https://raw.githubusercontent.com/psf/black/master/docs/_static/logo2-readme.png) +[![Black Logo](https://raw.githubusercontent.com/psf/black/main/docs/_static/logo2-readme.png)](https://black.readthedocs.io/en/stable/)

The Uncompromising Code Formatter

-Build Status Actions Status -Actions Status Documentation Status -Coverage Status -License: MIT +Coverage Status +License: MIT PyPI Downloads conda-forge @@ -27,21 +25,12 @@ becomes transparent after a while and you can focus on the content instead. _Black_ makes code review faster by producing the smallest diffs possible. -Try it out now using the [Black Playground](https://black.now.sh). Watch the +Try it out now using the [Black Playground](https://black.vercel.app). Watch the [PyCon 2019 talk](https://youtu.be/esZLCuWs_2Y) to learn more. --- -_Contents:_ **[Installation and usage](#installation-and-usage)** | -**[Code style](#the-black-code-style)** | **[Pragmatism](#pragmatism)** | -**[pyproject.toml](#pyprojecttoml)** | **[Editor integration](#editor-integration)** | -**[blackd](#blackd)** | **[black-primer](#black-primer)** | -**[Version control integration](#version-control-integration)** | -**[GitHub Actions](#github-actions)** | -**[Ignoring unmodified files](#ignoring-unmodified-files)** | **[Used by](#used-by)** | -**[Testimonials](#testimonials)** | **[Show your style](#show-your-style)** | -**[Contributing](#contributing-to-black)** | **[Change log](#change-log)** | -**[Authors](#authors)** +**[Read the documentation on ReadTheDocs!](https://black.readthedocs.io/en/stable)** --- @@ -49,15 +38,12 @@ _Contents:_ **[Installation and usage](#installation-and-usage)** | ### Installation -_Black_ can be installed by running `pip install black`. It requires Python 3.6.2+ to -run. If you want to format Python 2 code as well, install with -`pip install black[python2]`. - -#### Install from GitHub +_Black_ can be installed by running `pip install black`. It requires Python 3.7+ to run. +If you want to format Jupyter Notebooks, install with `pip install 'black[jupyter]'`. If you can't wait for the latest _hotness_ and want to install from GitHub, use: -`pip install git+git://github.com/psf/black` +`pip install git+https://github.com/psf/black` ### Usage @@ -73,404 +59,98 @@ You can run _Black_ as a package if running it as a script doesn't work: python -m black {source_file_or_directory} ``` -### Command line options - -_Black_ doesn't provide many options. You can list them by running `black --help`: - -```text -Usage: black [OPTIONS] [SRC]... - - The uncompromising code formatter. - -Options: - -c, --code TEXT Format the code passed in as a string. - -l, --line-length INTEGER How many characters per line to allow. - [default: 88] - - -t, --target-version [py27|py33|py34|py35|py36|py37|py38|py39] - Python versions that should be supported by - Black's output. [default: per-file auto- - detection] - - --pyi Format all input files like typing stubs - regardless of file extension (useful when - piping source on standard input). - - -S, --skip-string-normalization - Don't normalize string quotes or prefixes. - -C, --skip-magic-trailing-comma - Don't use trailing commas as a reason to - split lines. - - --check Don't write the files back, just return the - status. Return code 0 means nothing would - change. Return code 1 means some files - would be reformatted. Return code 123 means - there was an internal error. - - --diff Don't write the files back, just output a - diff for each file on stdout. - - --color / --no-color Show colored diff. Only applies when - `--diff` is given. - - --fast / --safe If --fast given, skip temporary sanity - checks. [default: --safe] - - --include TEXT A regular expression that matches files and - directories that should be included on - recursive searches. An empty value means - all files are included regardless of the - name. Use forward slashes for directories - on all platforms (Windows, too). Exclusions - are calculated first, inclusions later. - [default: \.pyi?$] - - --exclude TEXT A regular expression that matches files and - directories that should be excluded on - recursive searches. An empty value means no - paths are excluded. Use forward slashes for - directories on all platforms (Windows, too). - Exclusions are calculated first, inclusions - later. [default: /(\.direnv|\.eggs|\.git|\. - hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.sv - n|_build|buck-out|build|dist)/] - - --extend-exclude TEXT Like --exclude, but adds additional files - and directories on top of the excluded - ones (useful if you simply want to add to - the default). - - --force-exclude TEXT Like --exclude, but files and directories - matching this regex will be excluded even - when they are passed explicitly as - arguments. - - - --stdin-filename TEXT The name of the file when passing it through - stdin. Useful to make sure Black will - respect --force-exclude option on some - editors that rely on using stdin. - - -q, --quiet Don't emit non-error messages to stderr. - Errors are still emitted; silence those with - 2>/dev/null. - - -v, --verbose Also emit messages to stderr about files - that were not changed or were ignored due to - exclusion patterns. - - --version Show the version and exit. - --config FILE Read configuration from FILE path. - -h, --help Show this message and exit. -``` - -_Black_ is a well-behaved Unix-style command-line tool: - -- it does nothing if no sources are passed to it; -- it will read from standard input and write to standard output if `-` is used as the - filename; -- it only outputs messages to users on standard error; -- exits with code 0 unless an internal error occurred (or `--check` was used). - -### Using _Black_ with other tools - -While _Black_ enforces formatting that conforms to PEP 8, other tools may raise warnings -about _Black_'s changes or will overwrite _Black_'s changes. A good example of this is -[isort](https://pypi.org/p/isort). Since _Black_ is barely configurable, these tools -should be configured to neither warn about nor overwrite _Black_'s changes. +Further information can be found in our docs: -Actual details on _Black_ compatible configurations for various tools can be found in -[compatible_configs](https://github.com/psf/black/blob/master/docs/compatible_configs.md#black-compatible-configurations). - -### Migrating your code style without ruining git blame - -A long-standing argument against moving to automated code formatters like _Black_ is -that the migration will clutter up the output of `git blame`. This was a valid argument, -but since Git version 2.23, Git natively supports -[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) -with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore -using the `--ignore-revs-file` option. The changes made by the revision will be ignored -when assigning blame. Lines modified by an ignored revision will be blamed on the -previous revision that modified those lines. - -So when migrating your project's code style to _Black_, reformat everything and commit -the changes (preferably in one massive commit). Then put the full 40 characters commit -identifier(s) into a file. - -``` -# Migrate code style to Black -5b4ab991dede475d393e9d69ec388fd6bd949699 -``` - -Afterwards, you can pass that file to `git blame` and see clean and meaningful blame -information. - -```console -$ git blame important.py --ignore-revs-file .git-blame-ignore-revs -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): -abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) -``` - -You can even configure `git` to automatically ignore revisions listed in a file on every -call to `git blame`. - -```console -$ git config blame.ignoreRevsFile .git-blame-ignore-revs -``` - -**The one caveat is that GitHub and GitLab do not yet support ignoring revisions using -their native UI of blame.** So blame information will be cluttered with a reformatting -commit on those platforms. (If you'd like this feature, there's an open issue for -[GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423) and please let GitHub -know!) - -### NOTE: This is a beta product +- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) _Black_ is already [successfully used](https://github.com/psf/black#used-by) by many -projects, small and big. It also sports a decent test suite. However, it is still very -new. Things will probably be wonky for a while. This is made explicit by the "Beta" -trove classifier, as well as by the "b" in the version number. What this means for you -is that **until the formatter becomes stable, you should expect some formatting to -change in the future**. That being said, no drastic stylistic changes are planned, -mostly responses to bug reports. +projects, small and big. _Black_ has a comprehensive test suite, with efficient parallel +tests, and our own auto formatting and parallel Continuous Integration runner. Now that +we have become stable, you should not expect large formatting to changes in the future. +Stylistic changes will mostly be responses to bug reports and support for new Python +syntax. For more information please refer to the +[The Black Code Style](https://black.readthedocs.io/en/stable/the_black_code_style/index.html). Also, as a safety measure which slows down processing, _Black_ will check that the reformatted code still produces a valid AST that is effectively equivalent to the original (see the -[Pragmatism](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#ast-before-and-after-formatting) section for details). If you're feeling confident, use `--fast`. ## The _Black_ code style _Black_ is a PEP 8 compliant opinionated formatter. _Black_ reformats entire files in -place. It is not configurable. It doesn't take previous formatting into account. Your -main option of configuring _Black_ is that it doesn't reformat blocks that start with -`# fmt: off` and end with `# fmt: on`, or lines that ends with `# fmt: skip`. Pay -attention that `# fmt: on/off` have to be on the same level of indentation. To learn -more about _Black_'s opinions, to go -[the_black_code_style](https://github.com/psf/black/blob/master/docs/the_black_code_style.md). +place. Style configuration options are deliberately limited and rarely added. It doesn't +take previous formatting into account (see +[Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) +for exceptions). + +Our documentation covers the current _Black_ code style, but planned changes to it are +also documented. They're both worth taking a look: + +- [The _Black_ Code Style: Current style](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html) +- [The _Black_ Code Style: Future style](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html) + +Changes to the _Black_ code style are bound by the Stability Policy: + +- [The _Black_ Code Style: Stability Policy](https://black.readthedocs.io/en/stable/the_black_code_style/index.html#stability-policy) Please refer to this document before submitting an issue. What seems like a bug might be intended behaviour. -## Pragmatism +### Pragmatism Early versions of _Black_ used to be absolutist in some respects. They took after its initial author. This was fine at the time as it made the implementation simpler and there were not many users anyway. Not many edge cases were reported. As a mature tool, -_Black_ does make some exceptions to rules it otherwise holds. This -[section](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#pragmatism) -of `the_black_code_style` describes what those exceptions are and why this is the case. +_Black_ does make some exceptions to rules it otherwise holds. + +- [The _Black_ code style: Pragmatism](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#pragmatism) Please refer to this document before submitting an issue just like with the document above. What seems like a bug might be intended behaviour. -## pyproject.toml +## Configuration _Black_ is able to read project-specific default values for its command line options from a `pyproject.toml` file. This is especially useful for specifying custom -`--include` and `--exclude`/`--extend-exclude` patterns for your project. - -**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is -"No". _Black_ is all about sensible defaults. - -### What on Earth is a `pyproject.toml` file? - -[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a -configuration file to store build system requirements for Python projects. With the help -of tools like [Poetry](https://python-poetry.org/) or -[Flit](https://flit.readthedocs.io/en/latest/) it can fully replace the need for -`setup.py` and `setup.cfg` files. - -### Where _Black_ looks for the file - -By default _Black_ looks for `pyproject.toml` starting from the common base directory of -all files and directories passed on the command line. If it's not there, it looks in -parent directories. It stops looking when it finds the file, or a `.git` directory, or a -`.hg` directory, or the root of the file system, whichever comes first. +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. -If you're formatting standard input, _Black_ will look for configuration starting from -the current working directory. +You can find more details in our documentation: -You can use a "global" configuration, stored in a specific location in your home -directory. This will be used as a fallback configuration, that is, it will be used if -and only if _Black_ doesn't find any configuration as mentioned above. Depending on your -operating system, this configuration file should be stored as: +- [The basics: Configuration via a file](https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html#configuration-via-a-file) -- Windows: `~\.black` -- Unix-like (Linux, MacOS, etc.): `$XDG_CONFIG_HOME/black` (`~/.config/black` if the - `XDG_CONFIG_HOME` environment variable is not set) - -Note that these are paths to the TOML file itself (meaning that they shouldn't be named -as `pyproject.toml`), not directories where you store the configuration. Here, `~` -refers to the path to your home directory. On Windows, this will be something like -`C:\\Users\UserName`. - -You can also explicitly specify the path to a particular file that you want with -`--config`. In this situation _Black_ will not look for any other file. - -If you're running with `--verbose`, you will see a blue message if a file was found and -used. - -Please note `blackd` will not use `pyproject.toml` configuration. - -### Configuration format - -As the file extension suggests, `pyproject.toml` is a -[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for -different tools. _Black_ is using the `[tool.black]` section. The option keys are the -same as long names of options on the command line. - -Note that you have to use single-quoted strings in TOML for regular expressions. It's -the equivalent of r-strings in Python. Multiline strings are treated as verbose regular -expressions by Black. Use `[ ]` to denote a significant space character. - -

-Example pyproject.toml - -```toml -[tool.black] -line-length = 88 -target-version = ['py37'] -include = '\.pyi?$' -extend-exclude = ''' -# A regex preceded with ^/ will apply only to files and directories -# in the root of the project. -^/foo.py # exclude a file named foo.py in the root of the project (in addition to the defaults) -''' -``` +And if you're looking for more general configuration documentation: -
+- [Usage and Configuration](https://black.readthedocs.io/en/stable/usage_and_configuration/index.html) -### Lookup hierarchy - -Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can -override those defaults. Finally, options provided by the user on the command line -override both. - -_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't -look for multiple files, and doesn't compose configuration from different levels of the -file hierarchy. - -## Editor integration - -_Black_ can be integrated into many editors with plugins. They let you run _Black_ on -your code with the ease of doing it in your editor. To get started using _Black_ in your -editor of choice, please see -[editor_integration](https://github.com/psf/black/blob/master/docs/editor_integration.md). - -Patches are welcome for editors without an editor integration or plugin! More -information can be found in -[editor_integration](https://github.com/psf/black/blob/master/docs/editor_integration.md#other-editors). - -## blackd - -`blackd` is a small HTTP server that exposes Black's functionality over a simple -protocol. The main benefit of using it is to avoid paying the cost of starting up a new -Black process every time you want to blacken a file. Please refer to -[blackd](https://github.com/psf/black/blob/master/docs/blackd.md) to get the ball -rolling. - -## black-primer - -`black-primer` is a tool built for CI (and humans) to have _Black_ `--check` a number of -(configured in `primer.json`) Git accessible projects in parallel. -[black_primer](https://github.com/psf/black/blob/master/docs/black_primer.md) has more -information regarding its usage and configuration. - -(A PR adding Mercurial support will be accepted.) - -## Version control integration - -Use [pre-commit](https://pre-commit.com/). Once you -[have it installed](https://pre-commit.com/#install), add this to the -`.pre-commit-config.yaml` in your repository: - -```yaml -repos: - - repo: https://github.com/psf/black - rev: 20.8b1 # Replace by any tag/version: https://github.com/psf/black/tags - hooks: - - id: black - language_version: python3 # Should be a command that runs python3.6+ -``` - -Then run `pre-commit install` and you're ready to go. - -Avoid using `args` in the hook. Instead, store necessary configuration in -`pyproject.toml` so that editors and command-line usage of Black all behave consistently -for your project. See _Black_'s own -[pyproject.toml](https://github.com/psf/black/blob/master/pyproject.toml) for an -example. - -If you're already using Python 3.7, switch the `language_version` accordingly. Finally, -`stable` is a branch that tracks the latest release on PyPI. If you'd rather run on -master, this is also an option. - -## GitHub Actions - -Create a file named `.github/workflows/black.yml` inside your repository with: - -```yaml -name: Lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: psf/black@stable - with: - args: ". --check" -``` - -### Inputs - -#### `black_args` - -**optional**: Black input arguments. Defaults to `. --check --diff`. - -## Ignoring unmodified files - -_Black_ remembers files it has already formatted, unless the `--diff` flag is used or -code is passed via standard input. This information is stored per-user. The exact -location of the file depends on the _Black_ version and the system on which _Black_ is -run. The file is non-portable. The standard location on common operating systems is: - -- Windows: - `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` -- macOS: - `/Users//Library/Caches/black//cache...pickle` -- Linux: - `/home//.cache/black//cache...pickle` - -`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, -as .pyi, and whether string normalization was omitted. - -To override the location of these files on macOS or Linux, set the environment variable -`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache -in the directory you're running _Black_ from, set `XDG_CACHE_HOME=.cache`. _Black_ will -then write the above files to `.cache/black//`. +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. ## Used by The following notable open-source projects trust _Black_ with enforcing a consistent -code style: pytest, tox, Pyramid, Django Channels, Hypothesis, attrs, SQLAlchemy, -Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), pandas, Pillow, -every Datadog Agent Integration, Home Assistant, Zulip. +code style: pytest, tox, Pyramid, Django, Django Channels, Hypothesis, attrs, +SQLAlchemy, Poetry, PyPA applications (Warehouse, Bandersnatch, Pipenv, virtualenv), +pandas, Pillow, Twisted, LocalStack, every Datadog Agent Integration, Home Assistant, +Zulip, Kedro, OpenOA, FLORIS, ORBIT, WOMBAT, and many more. -The following organizations use _Black_: Facebook, Dropbox, Mozilla, Quora. +The following organizations use _Black_: Facebook, Dropbox, KeepTruckin, Mozilla, Quora, +Duolingo, QuantumBlack, Tesla. Are we missing anyone? Let us know. ## Testimonials +**Mike Bayer**, [author of `SQLAlchemy`](https://www.sqlalchemy.org/): + +> I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself. + **Dusty Phillips**, [writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): @@ -485,8 +165,8 @@ Twisted and CPython: > At least the name is good. -**Kenneth Reitz**, creator of [`requests`](http://python-requests.org/) and -[`pipenv`](https://readthedocs.org/projects/pipenv/): +**Kenneth Reitz**, creator of [`requests`](https://requests.readthedocs.io/en/latest/) +and [`pipenv`](https://readthedocs.org/projects/pipenv/): > This vastly improves the formatting of our code. Thanks a ton! @@ -512,209 +192,39 @@ Looks like this: MIT -## Contributing to _Black_ +## Contributing + +Welcome! Happy to see you willing to make the project better. You can get started by +reading this: -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. +- [Contributing: The basics](https://black.readthedocs.io/en/latest/contributing/the_basics.html) -Bug reports and fixes are always welcome! However, before you suggest a new feature or -configuration knob, ask yourself why you want it. If it enables better integration with -some workflow, fixes an inconsistency, speeds things up, and so on - go for it! On the -other hand, if your answer is "because I don't like a particular formatting" then you're -not ready to embrace _Black_ yet. Such changes are unlikely to get accepted. You can -still try but prepare to be disappointed. +You can also take a look at the rest of the contributing docs or talk with the +developers: -More details can be found in -[CONTRIBUTING](https://github.com/psf/black/blob/master/CONTRIBUTING.md). +- [Contributing documentation](https://black.readthedocs.io/en/latest/contributing/index.html) +- [Chat on Discord](https://discord.gg/RtVdv86PrH) ## Change log -The log's become rather long. It moved to its own file. +The log has become rather long. It moved to its own file. -See [CHANGES](https://github.com/psf/black/blob/master/CHANGES.md). +See [CHANGES](https://black.readthedocs.io/en/latest/change_log.html). ## Authors -Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). - -Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), -[Carl Meyer](mailto:carl@oddbird.net), -[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), -[Mika Naylor](mailto:mail@autophagy.io), -[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), -[Cooper Lees](mailto:me@cooperlees.com), and Richard Si. - -Multiple contributions by: - -- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) -- [Adam Johnson](mailto:me@adamj.eu) -- [Adam Williamson](mailto:adamw@happyassassin.net) -- [Alexander Huynh](mailto:github@grande.coffee) -- [Alex Vandiver](mailto:github@chmrr.net) -- [Allan Simon](mailto:allan.simon@supinfo.com) -- Anders-Petter Ljungquist -- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) -- [Andrew Zhou](mailto:andrewfzhou@gmail.com) -- [Andrey](mailto:dyuuus@yandex.ru) -- [Andy Freeland](mailto:andy@andyfreeland.net) -- [Anthony Sottile](mailto:asottile@umich.edu) -- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) -- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) -- [Artem Malyshev](mailto:proofit404@gmail.com) -- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) -- [Augie Fackler](mailto:raf@durin42.com) -- [Aviskar KC](mailto:aviskarkc10@gmail.com) -- Batuhan Taşkaya -- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) -- [Benjamin Woodruff](mailto:github@benjam.info) -- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) -- [Brandt Bucher](mailto:brandtbucher@gmail.com) -- [Brett Cannon](mailto:brett@python.org) -- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) -- [Bryan Forbes](mailto:bryan@reigndropsfall.net) -- [Calum Lind](mailto:calumlind@gmail.com) -- [Charles](mailto:peacech@gmail.com) -- Charles Reid -- [Christian Clauss](mailto:cclauss@bluewin.ch) -- [Christian Heimes](mailto:christian@python.org) -- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) -- [Chris Rose](mailto:offline@offby1.net) -- Codey Oxley -- [Cong](mailto:congusbongus@gmail.com) -- [Cooper Ry Lees](mailto:me@cooperlees.com) -- [Dan Davison](mailto:dandavison7@gmail.com) -- [Daniel Hahler](mailto:github@thequod.de) -- [Daniel M. Capella](mailto:polycitizen@gmail.com) -- Daniele Esposti -- [David Hotham](mailto:david.hotham@metaswitch.com) -- [David Lukes](mailto:dafydd.lukes@gmail.com) -- [David Szotten](mailto:davidszotten@gmail.com) -- [Denis Laxalde](mailto:denis@laxalde.org) -- [Douglas Thor](mailto:dthor@transphormusa.com) -- dylanjblack -- [Eli Treuherz](mailto:eli@treuherz.com) -- [Emil Hessman](mailto:emil@hessman.se) -- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) -- [Florent Thiery](mailto:fthiery@gmail.com) -- Francisco -- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) -- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) -- [Gregory P. Smith](mailto:greg@krypto.org) -- Gustavo Camargo -- hauntsaninja -- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) -- [Heaford](mailto:dan@heaford.com) -- [Hugo Barrera](mailto::hugo@barrera.io) -- Hugo van Kemenade -- [Hynek Schlawack](mailto:hs@ox.cx) -- [Ivan Katanić](mailto:ivan.katanic@gmail.com) -- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) -- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) -- [Jan Hnátek](mailto:jan.hnatek@gmail.com) -- [Jason Fried](mailto:me@jasonfried.info) -- [Jason Friedland](mailto:jason@friedland.id.au) -- [jgirardet](mailto:ijkl@netc.fr) -- Jim Brännlund -- [Jimmy Jia](mailto:tesrin@gmail.com) -- [Joe Antonakakis](mailto:jma353@cornell.edu) -- [Jon Dufresne](mailto:jon.dufresne@gmail.com) -- [Jonas Obrist](mailto:ojiidotch@gmail.com) -- [Jonty Wareing](mailto:jonty@jonty.co.uk) -- [Jose Nazario](mailto:jose.monkey.org@gmail.com) -- [Joseph Larson](mailto:larson.joseph@gmail.com) -- [Josh Bode](mailto:joshbode@fastmail.com) -- [Josh Holland](mailto:anowlcalledjosh@gmail.com) -- [Joshua Cannon](mailto:joshdcannon@gmail.com) -- [José Padilla](mailto:jpadilla@webapplicate.com) -- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) -- [kaiix](mailto:kvn.hou@gmail.com) -- [Katie McLaughlin](mailto:katie@glasnt.com) -- Katrin Leinweber -- [Keith Smiley](mailto:keithbsmiley@gmail.com) -- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) -- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) -- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) -- [Kyle Sunden](mailto:sunden@wisc.edu) -- Lawrence Chan -- [Linus Groh](mailto:mail@linusgroh.de) -- [Loren Carvalho](mailto:comradeloren@gmail.com) -- [Luka Sterbic](mailto:luka.sterbic@gmail.com) -- [LukasDrude](mailto:mail@lukas-drude.de) -- Mahmoud Hossam -- Mariatta -- [Matt VanEseltine](mailto:vaneseltine@gmail.com) -- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) -- [Matthew Walster](mailto:matthew@walster.org) -- Max Smolens -- [Michael Aquilina](mailto:michaelaquilina@gmail.com) -- [Michael Flaxman](mailto:michael.flaxman@gmail.com) -- [Michael J. Sullivan](mailto:sully@msully.net) -- [Michael McClimon](mailto:michael@mcclimon.org) -- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) -- [Mike](mailto:roshi@fedoraproject.org) -- [mikehoyio](mailto:mikehoy@gmail.com) -- [Min ho Kim](mailto:minho42@gmail.com) -- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) -- MomIsBestFriend -- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) -- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) -- [Neraste](mailto:neraste.herr10@gmail.com) -- [Nikolaus Waxweiler](mailto:madigens@gmail.com) -- [Ofek Lev](mailto:ofekmeister@gmail.com) -- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) -- [otstrel](mailto:otstrel@gmail.com) -- [Pablo Galindo](mailto:Pablogsal@gmail.com) -- [Paul Ganssle](mailto:p.ganssle@gmail.com) -- [Paul Meinhardt](mailto:mnhrdt@gmail.com) -- [Peter Bengtsson](mailto:mail@peterbe.com) -- [Peter Grayson](mailto:pete@jpgrayson.net) -- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) -- pmacosta -- [Quentin Pradet](mailto:quentin@pradet.me) -- [Ralf Schmitt](mailto:ralf@systemexit.de) -- [Ramón Valles](mailto:mroutis@protonmail.com) -- [Richard Fearn](mailto:richardfearn@gmail.com) -- Richard Si -- [Rishikesh Jha](mailto:rishijha424@gmail.com) -- [Rupert Bedford](mailto:rupert@rupertb.com) -- Russell Davis -- [Rémi Verschelde](mailto:rverschelde@gmail.com) -- [Sami Salonen](mailto:sakki@iki.fi) -- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) -- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) -- Sergi -- [Scott Stevenson](mailto:scott@stevenson.io) -- Shantanu -- [shaoran](mailto:shaoran@sakuranohana.org) -- [Shinya Fujino](mailto:shf0811@gmail.com) -- springstan -- [Stavros Korokithakis](mailto:hi@stavros.io) -- [Stephen Rosen](mailto:sirosen@globus.org) -- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) -- [Sunil Kapil](mailto:snlkapil@gmail.com) -- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) -- [Tal Amuyal](mailto:TalAmuyal@gmail.com) -- [Terrance](mailto:git@terrance.allofti.me) -- [Thom Lu](mailto:thomas.c.lu@gmail.com) -- [Thomas Grainger](mailto:tagrain@gmail.com) -- [Tim Gates](mailto:tim.gates@iress.com) -- [Tim Swast](mailto:swast@google.com) -- [Timo](mailto:timo_tk@hotmail.com) -- Toby Fleming -- [Tom Christie](mailto:tom@tomchristie.com) -- [Tony Narlock](mailto:tony@git-pull.com) -- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) -- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) -- [Tzu-ping Chung](mailto:uranusjr@gmail.com) -- [Utsav Shah](mailto:ukshah2@illinois.edu) -- utsav-dbx -- vezeli -- [Ville Skyttä](mailto:ville.skytta@iki.fi) -- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) -- [Vlad Emelianov](mailto:volshebnyi@gmail.com) -- [williamfzc](mailto:178894043@qq.com) -- [wouter bolsterlee](mailto:wouter@bolsterl.ee) -- Yazdan -- [Yngve Høiseth](mailto:yngve@hoiseth.net) -- [Yurii Karabas](mailto:1998uriyyo@gmail.com) -- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) +The author list is quite long nowadays, so it lives in its own file. + +See [AUTHORS.md](./AUTHORS.md) + +## Code of Conduct + +Everyone participating in the _Black_ project, and in particular in the issue tracker, +pull requests, and social media activity, is expected to treat other people with respect +and more generally to follow the guidelines articulated in the +[Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/). + +At the same time, humor is encouraged. In fact, basic familiarity with Monty Python's +Flying Circus is expected. We are not savages. + +And if you _really_ need to slap somebody, do it with a fish while dancing. diff --git a/action.yml b/action.yml index 59b16a9..35705e9 100644 --- a/action.yml +++ b/action.yml @@ -2,13 +2,49 @@ name: "Black" description: "The uncompromising Python code formatter." author: "Łukasz Langa and contributors to Black" inputs: + options: + description: + "Options passed to Black. Use `black --help` to see available options. Default: + '--check --diff'" + required: false + default: "--check --diff" + src: + description: "Source to run Black. Default: '.'" + required: false + default: "." + jupyter: + description: + "Set this option to true to include Jupyter Notebook files. Default: false" + required: false + default: false black_args: - description: "Black input arguments." + description: "[DEPRECATED] Black input arguments." + required: false + default: "" + deprecationMessage: + "Input `with.black_args` is deprecated. Use `with.options` and `with.src` instead." + version: + description: 'Python Version specifier (PEP440) - e.g. "21.5b1"' required: false default: "" branding: color: "black" icon: "check-circle" runs: - using: "docker" - image: "action/Dockerfile" + using: composite + steps: + - run: | + if [ "$RUNNER_OS" == "Windows" ]; then + python $GITHUB_ACTION_PATH/action/main.py + else + python3 $GITHUB_ACTION_PATH/action/main.py + fi + env: + # TODO: Remove once https://github.com/actions/runner/issues/665 is fixed. + INPUT_OPTIONS: ${{ inputs.options }} + INPUT_SRC: ${{ inputs.src }} + INPUT_JUPYTER: ${{ inputs.jupyter }} + INPUT_BLACK_ARGS: ${{ inputs.black_args }} + INPUT_VERSION: ${{ inputs.version }} + pythonioencoding: utf-8 + shell: bash diff --git a/action/Dockerfile b/action/Dockerfile deleted file mode 100644 index eb22099..0000000 --- a/action/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM python:3 - -ENV PYTHONDONTWRITEBYTECODE 1 -ENV PYTHONUNBUFFERED 1 - -RUN pip install --upgrade --no-cache-dir black - -COPY entrypoint.sh /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/action/entrypoint.sh b/action/entrypoint.sh deleted file mode 100755 index 50f9472..0000000 --- a/action/entrypoint.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e - -# If no arguments are given use current working directory -black_args=(".") -if [ "$#" -eq 0 ] && [ "${INPUT_BLACK_ARGS}" != "" ]; then - black_args+=(${INPUT_BLACK_ARGS}) -elif [ "$#" -ne 0 ] && [ "${INPUT_BLACK_ARGS}" != "" ]; then - black_args+=($* ${INPUT_BLACK_ARGS}) -elif [ "$#" -ne 0 ] && [ "${INPUT_BLACK_ARGS}" == "" ]; then - black_args+=($*) -else - # Default (if no args provided). - black_args+=("--check" "--diff") -fi - -sh -c "black . ${black_args[*]}" diff --git a/action/main.py b/action/main.py new file mode 100644 index 0000000..ff9d411 --- /dev/null +++ b/action/main.py @@ -0,0 +1,45 @@ +import os +import shlex +import sys +from pathlib import Path +from subprocess import PIPE, STDOUT, run + +ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"]) +ENV_PATH = ACTION_PATH / ".black-env" +ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin") +OPTIONS = os.getenv("INPUT_OPTIONS", default="") +SRC = os.getenv("INPUT_SRC", default="") +JUPYTER = os.getenv("INPUT_JUPYTER") == "true" +BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="") +VERSION = os.getenv("INPUT_VERSION", default="") + +run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True) + +version_specifier = VERSION +if VERSION and VERSION[0] in "0123456789": + version_specifier = f"=={VERSION}" +if JUPYTER: + extra_deps = "[colorama,jupyter]" +else: + extra_deps = "[colorama]" +req = f"black{extra_deps}{version_specifier}" +pip_proc = run( + [str(ENV_BIN / "python"), "-m", "pip", "install", req], + stdout=PIPE, + stderr=STDOUT, + encoding="utf-8", +) +if pip_proc.returncode: + print(pip_proc.stdout) + print("::error::Failed to install Black.", flush=True) + sys.exit(pip_proc.returncode) + + +base_cmd = [str(ENV_BIN / "black")] +if BLACK_ARGS: + # TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS. + proc = run([*base_cmd, *shlex.split(BLACK_ARGS)]) +else: + proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)]) + +sys.exit(proc.returncode) diff --git a/autoload/black.vim b/autoload/black.vim index f0357b0..e87a1e4 100644 --- a/autoload/black.vim +++ b/autoload/black.vim @@ -3,8 +3,13 @@ import collections import os import sys import vim -from distutils.util import strtobool +def strtobool(text): + if text.lower() in ['y', 'yes', 't', 'true', 'on', '1']: + return True + if text.lower() in ['n', 'no', 'f', 'false', 'off', '0']: + return False + raise ValueError(f"{text} is not convertable to boolean") class Flag(collections.namedtuple("FlagBase", "name, cast")): @property @@ -22,8 +27,10 @@ class Flag(collections.namedtuple("FlagBase", "name, cast")): FLAGS = [ Flag(name="line_length", cast=int), Flag(name="fast", cast=strtobool), - Flag(name="string_normalization", cast=strtobool), + Flag(name="skip_string_normalization", cast=strtobool), Flag(name="quiet", cast=strtobool), + Flag(name="skip_magic_trailing_comma", cast=strtobool), + Flag(name="preview", cast=strtobool), ] @@ -98,13 +105,49 @@ if _initialize_black_env(): import black import time -def Black(): +def get_target_version(tv): + if isinstance(tv, black.TargetVersion): + return tv + ret = None + try: + ret = black.TargetVersion[tv.upper()] + except KeyError: + print(f"WARNING: Target version {tv!r} not recognized by Black, using default target") + return ret + +def Black(**kwargs): + """ + kwargs allows you to override ``target_versions`` argument of + ``black.FileMode``. + + ``target_version`` needs to be cleaned because ``black.FileMode`` + expects the ``target_versions`` argument to be a set of TargetVersion enums. + + Allow kwargs["target_version"] to be a string to allow + to type it more quickly. + + Using also target_version instead of target_versions to remain + consistent to Black's documentation of the structure of pyproject.toml. + """ start = time.time() configs = get_configs() + + black_kwargs = {} + if "target_version" in kwargs: + target_version = kwargs["target_version"] + + if not isinstance(target_version, (list, set)): + target_version = [target_version] + target_version = set(filter(lambda x: x, map(lambda tv: get_target_version(tv), target_version))) + black_kwargs["target_versions"] = target_version + mode = black.FileMode( line_length=configs["line_length"], - string_normalization=configs["string_normalization"], + string_normalization=not configs["skip_string_normalization"], is_pyi=vim.current.buffer.name.endswith('.pyi'), + magic_trailing_comma=not configs["skip_magic_trailing_comma"], + preview=configs["preview"], + **black_kwargs, ) quiet = configs["quiet"] @@ -117,9 +160,9 @@ def Black(): ) except black.NothingChanged: if not quiet: - print(f'Already well formatted, good job. (took {time.time() - start:.4f}s)') + print(f'Black: already well formatted, good job. (took {time.time() - start:.4f}s)') except Exception as exc: - print(exc) + print(f'Black: {exc}') else: current_buffer = vim.current.window.buffer cursors = [] @@ -136,17 +179,18 @@ def Black(): except vim.error: window.cursor = (len(window.buffer), 0) if not quiet: - print(f'Reformatted in {time.time() - start:.4f}s.') + print(f'Black: reformatted in {time.time() - start:.4f}s.') def get_configs(): - path_pyproject_toml = black.find_pyproject_toml(vim.eval("fnamemodify(getcwd(), ':t')")) + filename = vim.eval("@%") + path_pyproject_toml = black.find_pyproject_toml((filename,)) if path_pyproject_toml: toml_config = black.parse_pyproject_toml(path_pyproject_toml) else: toml_config = {} return { - flag.var_name: flag.cast(toml_config.get(flag.name, vim.eval(flag.vim_rc_name))) + flag.var_name: toml_config.get(flag.name, flag.cast(vim.eval(flag.vim_rc_name))) for flag in FLAGS } @@ -159,8 +203,17 @@ def BlackVersion(): EndPython3 -function black#Black() - :py3 Black() +function black#Black(...) + let kwargs = {} + for arg in a:000 + let arg_list = split(arg, '=') + let kwargs[arg_list[0]] = arg_list[1] + endfor +python3 << EOF +import vim +kwargs = vim.eval("kwargs") +EOF + :py3 Black(**kwargs) endfunction function black#BlackUpgrade() diff --git a/debian/black.1 b/debian/black.1 deleted file mode 100644 index 6064c87..0000000 --- a/debian/black.1 +++ /dev/null @@ -1,120 +0,0 @@ -.\" Man page generated from reStructuredText. -. -.TH "1" "" "18.9b0" "" -.SH NAME -black \- uncompromising Python code formatter -. -.nr rst2man-indent-level 0 -. -.de1 rstReportMargin -\\$1 \\n[an-margin] -level \\n[rst2man-indent-level] -level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] -- -\\n[rst2man-indent0] -\\n[rst2man-indent1] -\\n[rst2man-indent2] -.. -.de1 INDENT -.\" .rstReportMargin pre: -. RS \\$1 -. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] -. nr rst2man-indent-level +1 -.\" .rstReportMargin post: -.. -.de UNINDENT -. RE -.\" indent \\n[an-margin] -.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] -.nr rst2man-indent-level -1 -.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] -.in \\n[rst2man-indent\\n[rst2man-indent-level]]u -.. -.SH DESCRIPTION -.sp -\fBblack\fP is the uncompromising Python code formatter. -.SS Summary -.sp -\fBblack\fP is the uncompromising Python code formatter. By using it, -you agree to cede control over minutiae of hand\-formatting. In return, -Black gives you speed, determinism, and freedom from pycodestyle -nagging about formatting. You will save time and mental energy for -more important matters. -.SS Usage -.sp -black [OPTIONS] [SRC]... -.SS Common Options -.sp -Options: -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C -\-l, \-\-line\-length INTEGER How many characters per line to allow. - [default: 88] - -\-\-py36 Allow using Python 3.6\-only syntax on all - input files. This will put trailing commas - in function signatures and calls also after - *args and **kwargs. [default: per\-file - auto\-detection] - -\-\-pyi Format all input files like typing stubs - regardless of file extension (useful when - piping source on standard input). - -\-S, \-\-skip\-string\-normalization - Don\(aqt normalize string quotes or prefixes. - -\-\-check Don\(aqt write the files back, just return the - status. Return code 0 means nothing would - change. Return code 1 means some files - would be reformatted. Return code 123 means - there was an internal error. - -\-\-diff Don\(aqt write the files back, just output a - diff for each file on stdout. - -\-\-fast / \-\-safe If \-\-fast given, skip temporary sanity - checks. [default: \-\-safe] - -\-\-include TEXT A regular expression that matches files and - directories that should be included on - recursive searches. An empty value means - all files are included regardless of the - name. Use forward slashes for directories - on all platforms (Windows, too). Exclusions - are calculated first, inclusions later. - [default: \e.pyi?$] - -\-\-exclude TEXT A regular expression that matches files and - directories that should be excluded on - recursive searches. An empty value means no - paths are excluded. Use forward slashes for - directories on all platforms (Windows, too). - Exclusions are calculated first, inclusions - later. [default: /(\e.git|\e.hg|\e.mypy_cache| - \e.tox|\e.venv|_build|buck\-out|build|dist)/] - -\-q, \-\-quiet Don\(aqt emit non\-error messages to stderr. - Errors are still emitted, silence those with - 2>/dev/null. - -\-v, \-\-verbose Also emit messages to stderr about files - that were not changed or were ignored due to - \-\-exclude=. - -\-\-version Show the version and exit. - -\-\-config FILE Read configuration from PATH. - -\-h, \-\-help Show this message and exit. -.ft P -.fi -.UNINDENT -.UNINDENT -.SH COPYRIGHT -2018 Åukasz Langa -.\" Generated by docutils manpage writer. -. diff --git a/debian/black.bash-completion b/debian/black.bash-completion new file mode 100644 index 0000000..cbe2615 --- /dev/null +++ b/debian/black.bash-completion @@ -0,0 +1,51 @@ +_have black && +_black() +{ + local cur prev + + COMPREPLY=() + cur=${COMP_WORDS[COMP_CWORD]} + prev=${COMP_WORDS[COMP_CWORD-1]} + + case $prev in + -l|--line-length|--include|--exclude|--python-cell-magics|--required-version|--extend-exclude|--force-exclude|-W|--workers) + return 0;; + -t|--target-version) + COMPREPLY=( $(compgen -W "py33 py34 py35 py36 py37 py38 py39 py310 py311 py312" -- $cur) ) + return;; + esac + if [[ "$cur" == -* ]]; then + opts='-c -l -t -S -C -W -q -v -h' + lopts=' + --code + --line-length + --pypi + --ipynb + --python-cell-magics + --skip-string-normalization + --skip-magic-trailing-comma + --preview + --check + --diff + --color + --no-color + --fast + --safe + --required-version + --include + --exclude + --extend-exclude + --force-exclude + --stdin-filename + --workers + --quiet + --verbose + --version + --config + ' + COMPREPLY=( $(compgen -W "${opts[*]} ${lopts[*]}" -- $cur) ) + else + _filedir + fi +} +complete -F _black $filenames black diff --git a/debian/black.manpages b/debian/black.manpages new file mode 100644 index 0000000..58fc802 --- /dev/null +++ b/debian/black.manpages @@ -0,0 +1,2 @@ +debian/black.1 +debian/black-primer.1 diff --git a/debian/changelog b/debian/changelog index 96ba6c4..4d072ce 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,90 @@ +black (22.10.0-2) unstable; urgency=medium + + * Team upload + * d/control: Remove unused dependency on python3-typed-ast + This dependency is only required for Python < 3.8. + + -- Sebastian Ramacher Thu, 13 Oct 2022 23:14:35 +0200 + +black (22.10.0-1) unstable; urgency=medium + + * Team upload. + + [ Debian Janitor ] + * Trim trailing whitespace. + + [ Timo Röhling ] + * New upstream version 22.10.0 + + -- Timo Röhling Fri, 07 Oct 2022 21:32:44 +0200 + +black (22.8.0-1) unstable; urgency=medium + + * Team upload. + * New upstream version 22.8.0 + + -- Timo Röhling Mon, 05 Sep 2022 18:12:46 +0200 + +black (22.6.0-2) unstable; urgency=medium + + * Team upload. + * Add bash completion. + Thanks to Hans-Christoph Steiner (Closes: #968043) + * Skip blackd tests + + -- Timo Röhling Sat, 06 Aug 2022 20:41:25 +0200 + +black (22.6.0-1) unstable; urgency=medium + + * Team upload. + * Modernize and simplify packaging + * New upstream version 22.6.0 + * Bump Standards-Version to 4.6.1 + + -- Timo Röhling Sat, 30 Jul 2022 23:05:32 +0200 + +black (22.3.0-1) unstable; urgency=medium + + * New upstream release + + -- Sylvestre Ledru Tue, 05 Apr 2022 09:28:05 +0200 + +black (22.1.0-1) unstable; urgency=medium + + * New upstream release + + -- Sylvestre Ledru Tue, 01 Mar 2022 10:20:37 +0100 + +black (21.12b0-1) unstable; urgency=medium + + * New upstream release. + * Add myself to Uploaders. + * Specify Rules-Requires-Root: no. + + -- Chris Lamb Fri, 28 Jan 2022 08:05:31 -0800 + +black (21.10b0-1) unstable; urgency=medium + + [ Neil Williams ] + * Prepare for upload + + [ Debian Janitor ] + * Remove constraints unnecessary since buster: + + Build-Depends: Drop versioned constraint on python3-regex. + * Set upstream metadata fields: Bug-Database, Bug-Submit, Repository, + Repository-Browse. + * Update standards version to 4.6.0, no changes needed. + + [ Neil Williams ] + * Update homepage links for new location to avoid the redirect. + * Bump debhelper from old 12 to 13. + * Update B-D for upstream changes + * Fix autopkgtest support for new upstream + * Allow autopkgtest on all architectures (Closes: #995344) + * New upstream release (Closes: #966141) + + -- Neil Williams Mon, 15 Nov 2021 20:15:11 +0000 + black (21.4b2-3) unstable; urgency=medium * Remove Sphinxdoc built-using as only symlinks are required. diff --git a/debian/clean b/debian/clean new file mode 100644 index 0000000..263affc --- /dev/null +++ b/debian/clean @@ -0,0 +1,6 @@ +debian/black.1 +debian/black-primer.1 +docs/_build/ +docs/_static/pypi.svg +src/_black_version.py +src/black.egg-info/ diff --git a/debian/control b/debian/control index c444cf1..57391e4 100644 --- a/debian/control +++ b/debian/control @@ -1,27 +1,48 @@ Source: black -Section: python -Priority: optional Maintainer: Debian Python Team Uploaders: Neil Williams , - Sylvestre Ledru -Build-Depends: debhelper-compat (= 12), dh-python, - python3-all:any, python3-recommonmark, python3-setuptools, - python3-appdirs, python3-attr, python3-click, - python3-toml, python3-sphinx (>= 3.2), python3-recommonmark, - python3-docutils, - libjs-underscore, libjs-jquery, python3-setuptools-scm, - python3-pathspec, python3-regex (>= 0.1.202018), python3-typed-ast, - python3-aiohttp, python3-aiohttp-cors, - python3-mypy-extensions, python3-typing-extensions -Standards-Version: 4.4.1 -Homepage: https://github.com/ambv/black -X-Python3-Version: >= 3.6 -Vcs-Git: https://salsa.debian.org/python-team/packages/black.git + Sylvestre Ledru , + Chris Lamb , +Section: python +Priority: optional +Build-Depends: + debhelper-compat (= 13), + bash-completion, + dh-python, + dh-sequence-python3, + dh-sequence-sphinxdoc , + libjs-jquery , + libjs-underscore , + pybuild-plugin-pyproject, + python3-aiohttp , + python3-all, + python3-click, + python3-hatch-vcs, + python3-hatchling, + python3-mypy-extensions, + python3-pathspec, + python3-platformdirs, + python3-pytest , + python3-regex, + python3-setuptools, + python3-setuptools-scm, + python3-sphinx (>= 3.2) , + python3-sphinx-copybutton (>= 0.4.0-2) , + python3-sphinxcontrib.programoutput , + python3-tomli, + python3-typing-extensions, +Standards-Version: 4.6.1 Vcs-Browser: https://salsa.debian.org/python-team/packages/black +Vcs-Git: https://salsa.debian.org/python-team/packages/black.git +Homepage: https://github.com/psf/black +X-Python3-Version: >= 3.6 +Rules-Requires-Root: no Package: black Architecture: all -Depends: python3-pkg-resources, ${python3:Depends}, ${misc:Depends} +Depends: python3-pkg-resources, + ${python3:Depends}, + ${misc:Depends} Suggests: python-black-doc Description: uncompromising Python code formatter (Python 3) Black is the uncompromising Python code formatter. By using it, you @@ -39,9 +60,10 @@ Description: uncompromising Python code formatter (Python 3) Package: python-black-doc Architecture: all -Section: doc Multi-Arch: foreign -Depends: ${sphinxdoc:Depends}, ${misc:Depends} +Section: doc +Depends: ${sphinxdoc:Depends}, + ${misc:Depends} Description: uncompromising Python code formatter (common documentation) Black is the uncompromising Python code formatter. This package contains the documentation for applying black to your code. diff --git a/debian/copyright b/debian/copyright index 9340134..48950a7 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,9 +1,9 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: black -Source: https://github.com/ambv/black +Source: https://github.com/psf/black Files: * -Copyright: 2018 Åukasz Langa +Copyright: 2018-2022, Łukasz Langa Anthony Sottile Carol Willing Carl Meyer @@ -27,13 +27,13 @@ Copyright: 2018 Åukasz Langa Stavros Korokithakis Sunil Kapil Vishwas B Sharma -License: MIT +License: Expat Files: debian/* -Copyright: 2018 Neil Williams -License: MIT +Copyright: 2018-2021, Neil Williams +License: Expat -License: MIT +License: Expat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation diff --git a/debian/manpage.rst b/debian/manpage.rst index 1225049..a4d6f59 100644 --- a/debian/manpage.rst +++ b/debian/manpage.rst @@ -1,10 +1,10 @@ :Version: 21.4b2 :Date: 2021-10-10 -:Copyright: 2018 Åukasz Langa +:Copyright: 2018 Łukasz Langa :Title: black :Subtitle: uncompromising Python code formatter :Manual group: uncompromising Python code formatter -:Manual section: "black" "1" +:Manual section: "1" Summary ####### diff --git a/debian/manpage2.rst b/debian/manpage2.rst index 0aab455..a6a0511 100644 --- a/debian/manpage2.rst +++ b/debian/manpage2.rst @@ -1,10 +1,10 @@ :Version: 21.4b2-2 :Date: 2021-10-10 -:Copyright: 2018 Åukasz Langa +:Copyright: 2018 Łukasz Langa :Title: black-primer :Subtitle: CI tool to check a number of projects in parallel using black :Manual group: uncompromising Python code formatter -:Manual section: "black-primer" "1" +:Manual section: "1" Description ########### diff --git a/debian/patches/dh_python_version_number_workaround.patch b/debian/patches/dh_python_version_number_workaround.patch new file mode 100644 index 0000000..e68e9df --- /dev/null +++ b/debian/patches/dh_python_version_number_workaround.patch @@ -0,0 +1,21 @@ +From: =?utf-8?q?Timo_R=C3=B6hling?= +Date: Fri, 7 Oct 2022 21:54:59 +0200 +Subject: Workaround for Bug #1021410 + +--- + pyproject.toml | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyproject.toml b/pyproject.toml +index d0d2e4f..25be3c7 100644 +--- a/pyproject.toml ++++ b/pyproject.toml +@@ -67,7 +67,7 @@ dependencies = [ + "mypy_extensions>=0.4.3", + "pathspec>=0.9.0", + "platformdirs>=2", +- "tomli>=1.1.0; python_full_version < '3.11.0a7'", ++ "tomli>=1.1.0; python_version < '3.11'", + "typed-ast>=1.4.2; python_version < '3.8' and implementation_name == 'cpython'", + "typing_extensions>=3.10.0.0; python_version < '3.10'", + ] diff --git a/debian/patches/disable-furo-theme.diff b/debian/patches/disable-furo-theme.diff new file mode 100644 index 0000000..dc0d425 --- /dev/null +++ b/debian/patches/disable-furo-theme.diff @@ -0,0 +1,22 @@ +From: Debian Python Team +Date: Sat, 30 Jul 2022 22:19:43 +0200 +Subject: disable-furo-theme + +=================================================================== +--- + docs/conf.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/docs/conf.py b/docs/conf.py +index 61ea71a..d775cd8 100644 +--- a/docs/conf.py ++++ b/docs/conf.py +@@ -121,7 +121,7 @@ pygments_style = "sphinx" + # The theme to use for HTML and HTML Help pages. See the documentation for + # a list of builtin themes. + # +-html_theme = "furo" ++#html_theme = "furo" + html_logo = "_static/logo2-readme.png" + + # Add any paths that contain custom static files (such as style sheets) here, diff --git a/debian/patches/disable_myst_parser.patch b/debian/patches/disable_myst_parser.patch new file mode 100644 index 0000000..11b1678 --- /dev/null +++ b/debian/patches/disable_myst_parser.patch @@ -0,0 +1,78 @@ +From: Debian Python Team +Date: Sat, 30 Jul 2022 22:19:43 +0200 +Subject: disable_myst_parser + +Remove privacy-breach links and self-host docs + +Remove tracking URLs from the README and sidebar of pages +in the documentation. +--- + docs/conf.py | 24 +++++++++++++----------- + 1 file changed, 13 insertions(+), 11 deletions(-) + +diff --git a/docs/conf.py b/docs/conf.py +index 7fc4f8f..61ea71a 100644 +--- a/docs/conf.py ++++ b/docs/conf.py +@@ -19,6 +19,8 @@ from pathlib import Path + + from pkg_resources import get_distribution + ++from _black_version import version as __version__ ++ + CURRENT_DIR = Path(__file__).parent + + +@@ -43,7 +45,7 @@ author = "Łukasz Langa and contributors to Black" + + # Autopopulate version + # The version, including alpha/beta/rc tags, but not commit hash and datestamps +-release = get_distribution("black").version.split("+")[0] ++release = __version__ + # The short X.Y version. + version = release + for sp in "abcfr": +@@ -64,13 +66,13 @@ extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", +- "myst_parser", ++# "myst_parser", + "sphinxcontrib.programoutput", + "sphinx_copybutton", + ] + + # If you need extensions of a certain version or higher, list them here. +-needs_extensions = {"myst_parser": "0.13.7"} ++# needs_extensions = {"myst_parser": "0.13.7"} + + # Add any paths that contain templates here, relative to this directory. + templates_path = ["_templates"] +@@ -100,19 +102,19 @@ pygments_style = "sphinx" + + # We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for + # headers up to and including level 3. +-myst_heading_anchors = 3 ++# myst_heading_anchors = 3 + + # Prettier support formatting some MyST syntax but not all, so let's disable the + # unsupported yet still enabled by default ones. +-myst_disable_syntax = [ +- "colon_fence", +- "myst_block_break", +- "myst_line_comment", +- "math_block", +-] ++#myst_disable_syntax = [ ++# "colon_fence", ++# "myst_block_break", ++# "myst_line_comment", ++# "math_block", ++#] + + # Optional MyST Syntaxes +-myst_enable_extensions = [] ++#myst_enable_extensions = [] + + # -- Options for HTML output ------------------------------------------------- + diff --git a/debian/patches/no_fancy_pypi_readme.patch b/debian/patches/no_fancy_pypi_readme.patch new file mode 100644 index 0000000..4326602 --- /dev/null +++ b/debian/patches/no_fancy_pypi_readme.patch @@ -0,0 +1,35 @@ +From: =?utf-8?q?Timo_R=C3=B6hling?= +Date: Fri, 7 Oct 2022 21:41:01 +0200 +Subject: Remove unsupported hatch-fancy-pypi-readme plugin + +--- + pyproject.toml | 9 +-------- + 1 file changed, 1 insertion(+), 8 deletions(-) + +diff --git a/pyproject.toml b/pyproject.toml +index 554d7d0..d0d2e4f 100644 +--- a/pyproject.toml ++++ b/pyproject.toml +@@ -26,7 +26,7 @@ preview = true + # NOTE: You don't need this in your own Black configuration. + + [build-system] +-requires = ["hatchling>=1.8.0", "hatch-vcs", "hatch-fancy-pypi-readme"] ++requires = ["hatchling>=1.8.0", "hatch-vcs"] + build-backend = "hatchling.build" + + [project] +@@ -92,13 +92,6 @@ blackd = "blackd:patched_main [d]" + Changelog = "https://github.com/psf/black/blob/main/CHANGES.md" + Homepage = "https://github.com/psf/black" + +-[tool.hatch.metadata.hooks.fancy-pypi-readme] +-content-type = "text/markdown" +-fragments = [ +- { path = "README.md" }, +- { path = "CHANGES.md" }, +-] +- + [tool.hatch.version] + source = "vcs" + diff --git a/debian/patches/privacy-docs.diff b/debian/patches/privacy-docs.diff deleted file mode 100644 index 82e091a..0000000 --- a/debian/patches/privacy-docs.diff +++ /dev/null @@ -1,57 +0,0 @@ -Description: Remove privacy-breach links and self-host docs - Remove tracking URLs from the README and sidebar of pages - in the documentation. - Also add the local source code to the doc generation page - to avoid needing the package installed to rebuild. -Author: Neil Williams ---- - -Index: black/README.md -=================================================================== ---- black.orig/README.md -+++ black/README.md -@@ -3,7 +3,6 @@ -

The Uncompromising Code Formatter

- -

--Build Status - Actions Status - Actions Status - Documentation Status -@@ -470,13 +469,7 @@ Use the badge in your project's README.m - - Using the badge in README.rst: - --``` --.. image:: https://img.shields.io/badge/code%20style-black-000000.svg -- :target: https://github.com/psf/black --``` -- --Looks like this: --[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -+https://img.shields.io/badge/code%20style-black-000000.svg - - ## License - -Index: black/docs/conf.py -=================================================================== ---- black.orig/docs/conf.py -+++ black/docs/conf.py -@@ -307,14 +307,14 @@ html_sidebars = { - html_theme_options = { - "show_related": False, - "description": "“Any color you like.”", -- "github_button": True, -+ "github_button": False, - "github_user": "psf", - "github_repo": "black", - "github_type": "star", -- "show_powered_by": True, -+ "show_powered_by": False, - "fixed_sidebar": True, - "logo": "logo2.png", -- "travis_button": True, -+ "travis_button": False, - } - - diff --git a/debian/patches/series b/debian/patches/series index 7820d36..0a55254 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -1,2 +1,4 @@ -privacy-docs.diff -version-declaration.diff +disable_myst_parser.patch +disable-furo-theme.diff +no_fancy_pypi_readme.patch +dh_python_version_number_workaround.patch diff --git a/debian/patches/version-declaration.diff b/debian/patches/version-declaration.diff deleted file mode 100644 index 51ef039..0000000 --- a/debian/patches/version-declaration.diff +++ /dev/null @@ -1,45 +0,0 @@ -Index: black/setup.py -=================================================================== ---- black.orig/setup.py -+++ black/setup.py -@@ -1,5 +1,6 @@ - # Copyright (C) 2020 Łukasz Langa - from setuptools import setup -+from _black_version import version as __version__ - import sys - import os - -@@ -47,10 +48,7 @@ else: - - setup( - name="black", -- use_scm_version={ -- "write_to": "src/_black_version.py", -- "write_to_template": 'version = "{version}"\n', -- }, -+ version=__version__, - description="The uncompromising code formatter.", - long_description=get_long_description(), - long_description_content_type="text/markdown", -Index: black/docs/conf.py -=================================================================== ---- black.orig/docs/conf.py -+++ black/docs/conf.py -@@ -13,6 +13,7 @@ - # documentation root, use os.path.abspath to make it absolute, like shown here. - # - from pathlib import Path -+from _black_version import version as __version__ - import re - import string - from typing import Callable, Dict, List, Optional, Pattern, Tuple, Set -@@ -207,7 +208,8 @@ author = "Łukasz Langa and contributors - - # Autopopulate version - # The version, including alpha/beta/rc tags, but not commit hash and datestamps --release = get_distribution("black").version.split("+")[0] -+# release = get_distribution("black").version.split("+")[0] -+release = __version__ - # The short X.Y version. - version = release - for sp in "abcfr": diff --git a/debian/rules b/debian/rules index ab37b56..0e5de6c 100755 --- a/debian/rules +++ b/debian/rules @@ -1,50 +1,19 @@ #!/usr/bin/make -f -# See debhelper(7) (uncomment to enable) -# output every command that modifies files on the build system. -#export DH_VERBOSE = 1 - -include /usr/share/dpkg/pkg-info.mk export BUILD_DATE=$(shell LC_ALL=C date -u "+%B %d, %Y" -d @"$(SOURCE_DATE_EPOCH)") - -# Prevent setuptools/distribute from accessing the internet. -export http_proxy = http://127.0.9.1:9 - -export PYBUILD_NAME=black - -export VERSION=$(shell dpkg-parsechangelog -S Version|cut -d- -f1) +export PYBUILD_DESTDIR=debian/black/ %: - echo "version = '$(VERSION)'" > _black_version.py - mkdir -p src - cp _black_version.py src/ - dh $@ --with sphinxdoc,python3 --buildsystem=pybuild + dh $@ --buildsystem=pybuild --with bash-completion + +execute_after_dh_auto_build: + make -C docs/ html PYTHONPATH="..:../src/" SPHINXBUILD="/usr/share/sphinx/scripts/python3/sphinx-build" SPHINXOPTS="-j 4 -D today=\"$(BUILD_DATE)\"" -override_dh_auto_build: - python3 setup.py build - PYTHONPATH=".." make -C docs/ html SPHINXBUILD="/usr/share/sphinx/scripts/python3/sphinx-build" SPHINXOPTS="-j 4 -D today=\"$(BUILD_DATE)\"" - ${RM} ./build/lib/blackd.py +execute_after_dh_auto_install: + rm debian/black/usr/bin/blackd -override_dh_auto_install: - python3 setup.py install --root=$(CURDIR)/debian/black/ --install-layout=deb +execute_before_dh_installman: rst2man debian/manpage.rst > debian/black.1 - dh_installman debian/black.1 rst2man debian/manpage2.rst > debian/black-primer.1 - dh_installman debian/black-primer.1 - ${RM} ./docs/_build/html/objects.inv - ${RM} ./debian/black/usr/bin/blackd override_dh_auto_test: - # Ensure we have entry_points created - python3 setup.py develop --prefix=$(CURDIR)/testtmp -ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) - PATH=testtmp/bin:${PATH} dh_auto_test -endif - ${RM} -rf testtmp - -override_dh_auto_clean: - python3 setup.py clean -a - ${RM} -r debian/black docs/_build/ docs/_static/pypi.svg - ${RM} -r __pycache__ ./blib2to3/pgen2/__pycache__ ./blib2to3/__pycache__ - ${RM} -r tests/__pycache__ _black_version.py src/_black_version.py - find docs/ -type l -delete diff --git a/debian/source/options b/debian/source/options deleted file mode 100644 index cb61fa5..0000000 --- a/debian/source/options +++ /dev/null @@ -1 +0,0 @@ -extend-diff-ignore = "^[^/]*[.]egg-info/" diff --git a/debian/tests/control b/debian/tests/control index 0cd7008..b7e27ea 100644 --- a/debian/tests/control +++ b/debian/tests/control @@ -1,7 +1,17 @@ Tests: testsuite -Depends: @, python3, python3-recommonmark, - python3-sphinx, python3-docutils, - python3-appdirs, python3-attr, python3-click, python3-toml, - python3-aiohttp, dpkg-dev, python3-pytest, - python3-parameterized, python3-aiohttp-cors, python3-typed-ast, +Depends: @, python3, + dpkg-dev, + python3-setuptools, + python3-platformdirs, + python3-click, + python3-sphinx, + python3-docutils, + python3-pytest, + python3-pathspec, + python3-regex, + python3-typed-ast, + python3-aiohttp, + python3-tomli, + python3-mypy-extensions, + python3-typing-extensions, Restrictions: allow-stderr diff --git a/debian/tests/testsuite b/debian/tests/testsuite index 97d0bb0..c56aca4 100644 --- a/debian/tests/testsuite +++ b/debian/tests/testsuite @@ -3,25 +3,16 @@ set -e -v DEB_HOST_ARCH=$(dpkg-architecture -qDEB_HOST_ARCH) -VERSION=$(dpkg-parsechangelog|grep ^Version|awk '{print $2}'|cut -d- -f1) function runtest() { - cp -v setup.py "$AUTOPKGTEST_TMP" - cp -vr tests "$AUTOPKGTEST_TMP" - mkdir -p "${AUTOPKGTEST_TMP}/src/black" - cp -vr src/black/__init__.py "${AUTOPKGTEST_TMP}/src/black" - cp -vr src/blib2to3 "${AUTOPKGTEST_TMP}/src" - - for py in $(py3versions -r 2>/dev/null); - do - cd "$AUTOPKGTEST_TMP" - echo "Testing with $py:" - $py -m pytest -v -k 'not test_process_queue' - done + # use the installed black, just add the tests from source + PYTHONPATH=tests pytest-3 tests --run-optional no_jupyter,no_blackd } -if [ "$DEB_HOST_ARCH" == "amd64" ]; then - runtest -else - echo "Skip the test execution on $DEB_HOST_ARCH" -fi +python3 -c "import black" + +black --version + +black --help + +runtest diff --git a/debian/upstream/metadata b/debian/upstream/metadata new file mode 100644 index 0000000..3f9ce8f --- /dev/null +++ b/debian/upstream/metadata @@ -0,0 +1,5 @@ +--- +Bug-Database: https://github.com/psf/black/issues +Bug-Submit: https://github.com/psf/black/issues/new +Repository: https://github.com/psf/black.git +Repository-Browse: https://github.com/psf/black diff --git a/debian/watch b/debian/watch index 098a5c8..7d6f250 100644 --- a/debian/watch +++ b/debian/watch @@ -4,5 +4,5 @@ version=4 # GitHub hosted projects opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/black-$1\.tar\.gz/ \ - https://github.com/ambv/black/tags .*/v?(\d\S+)\.tar\.gz + https://github.com/psf/black/tags .*/v?(\d\S+)\.tar\.gz diff --git a/docs/Makefile b/docs/Makefile index 2e0e5ee..cb0463c 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -17,4 +17,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/custom.css b/docs/_static/custom.css deleted file mode 100644 index c06c40a..0000000 --- a/docs/_static/custom.css +++ /dev/null @@ -1,38 +0,0 @@ -/* Make the sidebar scrollable. Fixes https://github.com/psf/black/issues/990 */ -div.sphinxsidebar { - max-height: calc(100% - 18px); - overflow-y: auto; -} - -/* Hide scrollbar for Chrome, Safari and Opera */ -div.sphinxsidebar::-webkit-scrollbar { - display: none; -} - -/* Hide scrollbar for IE 6, 7 and 8 */ -@media \0screen\, screen\9 { - div.sphinxsidebar { - -ms-overflow-style: none; - } -} - -/* Hide scrollbar for IE 9 and 10 */ -/* backslash-9 removes ie11+ & old Safari 4 */ -@media screen and (min-width: 0\0) { - div.sphinxsidebar { - -ms-overflow-style: none\9; - } -} - -/* Hide scrollbar for IE 11 and up */ -_:-ms-fullscreen, -:root div.sphinxsidebar { - -ms-overflow-style: none; -} - -/* Hide scrollbar for Edge */ -@supports (-ms-ime-align: auto) { - div.sphinxsidebar { - -ms-overflow-style: none; - } -} diff --git a/docs/_static/license.svg b/docs/_static/license.svg index 36bea29..25aa18b 100644 --- a/docs/_static/license.svg +++ b/docs/_static/license.svg @@ -1 +1 @@ -licenselicenseMITMIT \ No newline at end of file +licenselicenseMITMIT diff --git a/docs/authors.md b/docs/authors.md index 9f2ea05..21b0e1a 100644 --- a/docs/authors.md +++ b/docs/authors.md @@ -1,187 +1,3 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" +```{include} ../AUTHORS.md -# Authors - -Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). - -Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), -[Carl Meyer](mailto:carl@oddbird.net), -[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), -[Mika Naylor](mailto:mail@autophagy.io), -[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com), and -[Cooper Lees](mailto:me@cooperlees.com). - -Multiple contributions by: - -- [Abdur-Rahmaan Janhangeer](mailto:arj.python@gmail.com) -- [Adam Johnson](mailto:me@adamj.eu) -- [Adam Williamson](mailto:adamw@happyassassin.net) -- [Alexander Huynh](mailto:github@grande.coffee) -- [Alex Vandiver](mailto:github@chmrr.net) -- [Allan Simon](mailto:allan.simon@supinfo.com) -- Anders-Petter Ljungquist -- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) -- [Andrew Zhou](mailto:andrewfzhou@gmail.com) -- [Andrey](mailto:dyuuus@yandex.ru) -- [Andy Freeland](mailto:andy@andyfreeland.net) -- [Anthony Sottile](mailto:asottile@umich.edu) -- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) -- [Arnav Borbornah](mailto:arnavborborah11@gmail.com) -- [Artem Malyshev](mailto:proofit404@gmail.com) -- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) -- [Augie Fackler](mailto:raf@durin42.com) -- [Aviskar KC](mailto:aviskarkc10@gmail.com) -- Batuhan Taşkaya -- [Benjamin Wohlwend](mailto:bw@piquadrat.ch) -- [Benjamin Woodruff](mailto:github@benjam.info) -- [Bharat Raghunathan](mailto:bharatraghunthan9767@gmail.com) -- [Brandt Bucher](mailto:brandtbucher@gmail.com) -- [Brett Cannon](mailto:brett@python.org) -- [Bryan Bugyi](mailto:bryan.bugyi@rutgers.edu) -- [Bryan Forbes](mailto:bryan@reigndropsfall.net) -- [Calum Lind](mailto:calumlind@gmail.com) -- [Charles](mailto:peacech@gmail.com) -- Charles Reid -- [Christian Clauss](mailto:cclauss@bluewin.ch) -- [Christian Heimes](mailto:christian@python.org) -- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) -- [Chris Rose](mailto:offline@offby1.net) -- Codey Oxley -- [Cong](mailto:congusbongus@gmail.com) -- [Cooper Ry Lees](mailto:me@cooperlees.com) -- [Dan Davison](mailto:dandavison7@gmail.com) -- [Daniel Hahler](mailto:github@thequod.de) -- [Daniel M. Capella](mailto:polycitizen@gmail.com) -- Daniele Esposti -- [David Hotham](mailto:david.hotham@metaswitch.com) -- [David Lukes](mailto:dafydd.lukes@gmail.com) -- [David Szotten](mailto:davidszotten@gmail.com) -- [Denis Laxalde](mailto:denis@laxalde.org) -- [Douglas Thor](mailto:dthor@transphormusa.com) -- dylanjblack -- [Eli Treuherz](mailto:eli@treuherz.com) -- [Emil Hessman](mailto:emil@hessman.se) -- [Felix Kohlgrüber](mailto:felix.kohlgrueber@gmail.com) -- [Florent Thiery](mailto:fthiery@gmail.com) -- Francisco -- [Giacomo Tagliabue](mailto:giacomo.tag@gmail.com) -- [Greg Gandenberger](mailto:ggandenberger@shoprunner.com) -- [Gregory P. Smith](mailto:greg@krypto.org) -- Gustavo Camargo -- hauntsaninja -- [Hadi Alqattan](mailto:alqattanhadizaki@gmail.com) -- [Heaford](mailto:dan@heaford.com) -- [Hugo Barrera](mailto::hugo@barrera.io) -- Hugo van Kemenade -- [Hynek Schlawack](mailto:hs@ox.cx) -- [Ivan Katanić](mailto:ivan.katanic@gmail.com) -- [Jakub Kadlubiec](mailto:jakub.kadlubiec@skyscanner.net) -- [Jakub Warczarek](mailto:jakub.warczarek@gmail.com) -- [Jan Hnátek](mailto:jan.hnatek@gmail.com) -- [Jason Fried](mailto:me@jasonfried.info) -- [Jason Friedland](mailto:jason@friedland.id.au) -- [jgirardet](mailto:ijkl@netc.fr) -- Jim Brännlund -- [Jimmy Jia](mailto:tesrin@gmail.com) -- [Joe Antonakakis](mailto:jma353@cornell.edu) -- [Jon Dufresne](mailto:jon.dufresne@gmail.com) -- [Jonas Obrist](mailto:ojiidotch@gmail.com) -- [Jonty Wareing](mailto:jonty@jonty.co.uk) -- [Jose Nazario](mailto:jose.monkey.org@gmail.com) -- [Joseph Larson](mailto:larson.joseph@gmail.com) -- [Josh Bode](mailto:joshbode@fastmail.com) -- [Josh Holland](mailto:anowlcalledjosh@gmail.com) -- [José Padilla](mailto:jpadilla@webapplicate.com) -- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) -- [kaiix](mailto:kvn.hou@gmail.com) -- [Katie McLaughlin](mailto:katie@glasnt.com) -- Katrin Leinweber -- [Keith Smiley](mailto:keithbsmiley@gmail.com) -- [Kenyon Ralph](mailto:kenyon@kenyonralph.com) -- [Kevin Kirsche](mailto:Kev.Kirsche+GitHub@gmail.com) -- [Kyle Hausmann](mailto:kyle.hausmann@gmail.com) -- [Kyle Sunden](mailto:sunden@wisc.edu) -- Lawrence Chan -- [Linus Groh](mailto:mail@linusgroh.de) -- [Loren Carvalho](mailto:comradeloren@gmail.com) -- [Luka Sterbic](mailto:luka.sterbic@gmail.com) -- [LukasDrude](mailto:mail@lukas-drude.de) -- Mahmoud Hossam -- Mariatta -- [Matt VanEseltine](mailto:vaneseltine@gmail.com) -- [Matthew Clapp](mailto:itsayellow+dev@gmail.com) -- [Matthew Walster](mailto:matthew@walster.org) -- Max Smolens -- [Michael Aquilina](mailto:michaelaquilina@gmail.com) -- [Michael Flaxman](mailto:michael.flaxman@gmail.com) -- [Michael J. Sullivan](mailto:sully@msully.net) -- [Michael McClimon](mailto:michael@mcclimon.org) -- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) -- [Mike](mailto:roshi@fedoraproject.org) -- [mikehoyio](mailto:mikehoy@gmail.com) -- [Min ho Kim](mailto:minho42@gmail.com) -- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) -- MomIsBestFriend -- [Nathan Goldbaum](mailto:ngoldbau@illinois.edu) -- [Nathan Hunt](mailto:neighthan.hunt@gmail.com) -- [Neraste](mailto:neraste.herr10@gmail.com) -- [Nikolaus Waxweiler](mailto:madigens@gmail.com) -- [Ofek Lev](mailto:ofekmeister@gmail.com) -- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) -- [otstrel](mailto:otstrel@gmail.com) -- [Pablo Galindo](mailto:Pablogsal@gmail.com) -- [Paul Ganssle](mailto:p.ganssle@gmail.com) -- [Paul Meinhardt](mailto:mnhrdt@gmail.com) -- [Paul "TBBle" Hampson](mailto:Paul.Hampson@Pobox.com) -- [Peter Bengtsson](mailto:mail@peterbe.com) -- [Peter Grayson](mailto:pete@jpgrayson.net) -- [Peter Stensmyr](mailto:peter.stensmyr@gmail.com) -- pmacosta -- [Quentin Pradet](mailto:quentin@pradet.me) -- [Ralf Schmitt](mailto:ralf@systemexit.de) -- [Ramón Valles](mailto:mroutis@protonmail.com) -- [Richard Fearn](mailto:richardfearn@gmail.com) -- Richard Si -- [Rishikesh Jha](mailto:rishijha424@gmail.com) -- [Rupert Bedford](mailto:rupert@rupertb.com) -- Russell Davis -- [Rémi Verschelde](mailto:rverschelde@gmail.com) -- [Sami Salonen](mailto:sakki@iki.fi) -- [Samuel Cormier-Iijima](mailto:samuel@cormier-iijima.com) -- [Sanket Dasgupta](mailto:sanketdasgupta@gmail.com) -- Sergi -- [Scott Stevenson](mailto:scott@stevenson.io) -- Shantanu -- [shaoran](mailto:shaoran@sakuranohana.org) -- [Shinya Fujino](mailto:shf0811@gmail.com) -- springstan -- [Stavros Korokithakis](mailto:hi@stavros.io) -- [Stephen Rosen](mailto:sirosen@globus.org) -- [Steven M. Vascellaro](mailto:S.Vascellaro@gmail.com) -- [Sunil Kapil](mailto:snlkapil@gmail.com) -- [Sébastien Eustace](mailto:sebastien.eustace@gmail.com) -- [Tal Amuyal](mailto:TalAmuyal@gmail.com) -- [Terrance](mailto:git@terrance.allofti.me) -- [Thom Lu](mailto:thomas.c.lu@gmail.com) -- [Thomas Grainger](mailto:tagrain@gmail.com) -- [Tim Gates](mailto:tim.gates@iress.com) -- [Tim Swast](mailto:swast@google.com) -- [Timo](mailto:timo_tk@hotmail.com) -- Toby Fleming -- [Tom Christie](mailto:tom@tomchristie.com) -- [Tony Narlock](mailto:tony@git-pull.com) -- [Tsuyoshi Hombashi](mailto:tsuyoshi.hombashi@gmail.com) -- [Tushar Chandra](mailto:tusharchandra2018@u.northwestern.edu) -- [Tzu-ping Chung](mailto:uranusjr@gmail.com) -- [Utsav Shah](mailto:ukshah2@illinois.edu) -- utsav-dbx -- vezeli -- [Ville Skyttä](mailto:ville.skytta@iki.fi) -- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) -- [Vlad Emelianov](mailto:volshebnyi@gmail.com) -- [williamfzc](mailto:178894043@qq.com) -- [wouter bolsterlee](mailto:wouter@bolsterl.ee) -- Yazdan -- [Yngve Høiseth](mailto:yngve@hoiseth.net) -- [Yurii Karabas](mailto:1998uriyyo@gmail.com) -- [Zac Hatfield-Dodds](mailto:zac@zhd.dev) +``` diff --git a/docs/black_primer.md b/docs/black_primer.md deleted file mode 100644 index a2dd964..0000000 --- a/docs/black_primer.md +++ /dev/null @@ -1,120 +0,0 @@ -# black-primer - -`black-primer` is a tool built for CI (and humans) to have _Black_ `--check` a number of -(configured in `primer.json`) Git accessible projects in parallel. _(A PR will be -accepted to add Mercurial support.)_ - -## Run flow - -- Ensure we have a `black` + `git` in PATH -- Load projects from `primer.json` -- Run projects in parallel with `--worker` workers (defaults to CPU count / 2) - - Checkout projects - - Run black and record result - - Clean up repository checkout _(can optionally be disabled via `--keep`)_ -- Display results summary to screen -- Default to cleaning up `--work-dir` (which defaults to tempfile schemantics) -- Return - - 0 for successful run - - < 0 for environment / internal error - - \> 0 for each project with an error - -## Speed up runs 🏎 - -If you're running locally yourself to test black on lots of code try: - -- Using `-k` / `--keep` + `-w` / `--work-dir` so you don't have to re-checkout the repo - each run - -## CLI arguments - -```text -Usage: black-primer [OPTIONS] - - primer - prime projects for blackening... 🏴 - -Options: - -c, --config PATH JSON config file path [default: /Users/cooper/repos/ - black/src/black_primer/primer.json] - - --debug Turn on debug logging [default: False] - -k, --keep Keep workdir + repos post run [default: False] - -L, --long-checkouts Pull big projects to test [default: False] - -R, --rebase Rebase project if already checked out [default: - False] - - -w, --workdir PATH Directory path for repo checkouts [default: /var/fol - ders/tc/hbwxh76j1hn6gqjd2n2sjn4j9k1glp/T/primer.20200 - 517125229] - - -W, --workers INTEGER Number of parallel worker coroutines [default: 2] - -h, --help Show this message and exit. -``` - -## Primer config file - -The config file is in JSON format. Its main element is the `"projects"` dictionary and -each parameter is explained below: - -```json -{ - "projects": { - "00_Example": { - "cli_arguments": "List of extra CLI arguments to pass Black for this project", - "expect_formatting_changes": "Boolean to indicate that the version of Black is expected to cause changes", - "git_clone_url": "URL you would pass `git clone` to check out this repo", - "long_checkout": "Boolean to have repo skipped by default unless `--long-checkouts` is specified", - "py_versions": "List of major Python versions to run this project with - all will do as you'd expect - run on ALL versions" - }, - "aioexabgp": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/cooperlees/aioexabgp.git", - "long_checkout": false, - "py_versions": ["all", "3.8"] - } - } -} -``` - -An example primer config file is used by Black -[here](https://github.com/psf/black/blob/master/src/black_primer/primer.json) - -## Example run - -```console -cooper-mbp:black cooper$ ~/venvs/b/bin/black-primer -[2020-05-17 13:06:40,830] INFO: 4 projects to run Black over (lib.py:270) -[2020-05-17 13:06:44,215] INFO: Analyzing results (lib.py:285) --- primer results 📊 -- - -3 / 4 succeeded (75.0%) ✅ -1 / 4 FAILED (25.0%) 💩 - - 0 projects disabled by config - - 0 projects skipped due to Python version - - 0 skipped due to long checkout - -Failed projects: - -## flake8-bugbear: - - Returned 1 - - stdout: ---- tests/b303_b304.py 2020-05-17 20:04:09.991227 +0000 -+++ tests/b303_b304.py 2020-05-17 20:06:42.753851 +0000 -@@ -26,11 +26,11 @@ - maxint = 5 # this is okay - # the following should not crash - (a, b, c) = list(range(3)) - # it is different than this - a, b, c = list(range(3)) -- a, b, c, = list(range(3)) -+ a, b, c = list(range(3)) - # and different than this - (a, b), c = list(range(3)) - a, *b, c = [1, 2, 3, 4, 5] - b[1:3] = [0, 0] - -would reformat tests/b303_b304.py -Oh no! 💥 💔 💥 -1 file would be reformatted, 22 files would be left unchanged. -``` diff --git a/docs/change_log.md b/docs/change_log.md deleted file mode 120000 index cf54708..0000000 --- a/docs/change_log.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGES.md \ No newline at end of file diff --git a/docs/change_log.md b/docs/change_log.md new file mode 100644 index 0000000..e5f67e7 --- /dev/null +++ b/docs/change_log.md @@ -0,0 +1,3 @@ +```{include} ../CHANGES.md + +``` diff --git a/docs/compatible_configs/pylint/pylintrc b/docs/compatible_configs/pylint/pylintrc index 7abddd2..e863488 100644 --- a/docs/compatible_configs/pylint/pylintrc +++ b/docs/compatible_configs/pylint/pylintrc @@ -1,5 +1,2 @@ -[MESSAGES CONTROL] -disable = C0330, C0326 - [format] max-line-length = 88 diff --git a/docs/compatible_configs/pylint/pyproject.toml b/docs/compatible_configs/pylint/pyproject.toml index 49ad7a2..ef51f98 100644 --- a/docs/compatible_configs/pylint/pyproject.toml +++ b/docs/compatible_configs/pylint/pyproject.toml @@ -1,5 +1,2 @@ -[tool.pylint.messages_control] -disable = "C0330, C0326" - [tool.pylint.format] max-line-length = "88" diff --git a/docs/compatible_configs/pylint/setup.cfg b/docs/compatible_configs/pylint/setup.cfg index 3ada245..0b754cd 100644 --- a/docs/compatible_configs/pylint/setup.cfg +++ b/docs/compatible_configs/pylint/setup.cfg @@ -1,5 +1,2 @@ [pylint] max-line-length = 88 - -[pylint.messages_control] -disable = C0330, C0326 diff --git a/docs/conf.py b/docs/conf.py index ec6aad9..7fc4f8f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,61 +12,14 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -from pathlib import Path -import re + +import os import string -from typing import Callable, Dict, List, Optional, Pattern, Tuple, Set -from dataclasses import dataclass -import logging +from pathlib import Path from pkg_resources import get_distribution -logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) - -LOG = logging.getLogger(__name__) - CURRENT_DIR = Path(__file__).parent -README = CURRENT_DIR / ".." / "README.md" -REFERENCE_DIR = CURRENT_DIR / "reference" -STATIC_DIR = CURRENT_DIR / "_static" - - -@dataclass -class SrcRange: - """Tracks which part of a file to get a section's content. - - Data: - start_line: The line where the section starts (i.e. its sub-header) (inclusive). - end_line: The line where the section ends (usually next sub-header) (exclusive). - """ - - start_line: int - end_line: int - - -@dataclass -class DocSection: - """Tracks information about a section of documentation. - - Data: - name: The section's name. This will used to detect duplicate sections. - src: The filepath to get its contents. - processors: The processors to run before writing the section to CURRENT_DIR. - out_filename: The filename to use when writing the section to CURRENT_DIR. - src_range: The line range of SRC to gets its contents. - """ - - name: str - src: Path - src_range: SrcRange = SrcRange(0, 1_000_000) - out_filename: str = "" - processors: Tuple[Callable, ...] = () - - def get_out_filename(self) -> str: - if not self.out_filename: - return self.name + ".md" - else: - return self.out_filename def make_pypi_svg(version: str) -> None: @@ -78,131 +31,14 @@ def make_pypi_svg(version: str) -> None: f.write(svg) -def make_filename(line: str) -> str: - non_letters: Pattern = re.compile(r"[^a-z]+") - filename: str = line[3:].rstrip().lower() - filename = non_letters.sub("_", filename) - if filename.startswith("_"): - filename = filename[1:] - if filename.endswith("_"): - filename = filename[:-1] - return filename + ".md" - - -def get_contents(section: DocSection) -> str: - """Gets the contents for the DocSection.""" - contents: List[str] = [] - src: Path = section.src - start_line: int = section.src_range.start_line - end_line: int = section.src_range.end_line - with open(src, "r", encoding="utf-8") as f: - for lineno, line in enumerate(f, start=1): - if lineno >= start_line and lineno < end_line: - contents.append(line) - result = "".join(contents) - # Let's make Prettier happy with the amount of trailing newlines in the sections. - if result.endswith("\n\n"): - result = result[:-1] - if not result.endswith("\n"): - result = result + "\n" - return result - - -def get_sections_from_readme() -> List[DocSection]: - """Gets the sections from README so they can be processed by process_sections. - - It opens README and goes down line by line looking for sub-header lines which - denotes a section. Once it finds a sub-header line, it will create a DocSection - object with all of the information currently available. Then on every line, it will - track the ending line index of the section. And it repeats this for every sub-header - line it finds. - """ - sections: List[DocSection] = [] - section: Optional[DocSection] = None - with open(README, "r", encoding="utf-8") as f: - for lineno, line in enumerate(f, start=1): - if line.startswith("## "): - filename = make_filename(line) - section_name = filename[:-3] - section = DocSection( - name=str(section_name), - src=README, - src_range=SrcRange(lineno, lineno), - out_filename=filename, - processors=(fix_headers,), - ) - sections.append(section) - if section is not None: - section.src_range.end_line += 1 - return sections - - -def fix_headers(contents: str) -> str: - """Fixes the headers of sections copied from README. - - Removes one octothorpe (#) from all headers since the contents are no longer nested - in a root document (i.e. the README). - """ - lines: List[str] = contents.splitlines() - fixed_contents: List[str] = [] - for line in lines: - if line.startswith("##"): - line = line[1:] - fixed_contents.append(line + "\n") # splitlines strips the leading newlines - return "".join(fixed_contents) - - -def process_sections( - custom_sections: List[DocSection], readme_sections: List[DocSection] -) -> None: - """Reads, processes, and writes sections to CURRENT_DIR. - - For each section, the contents will be fetched, processed by processors - required by the section, and written to CURRENT_DIR. If it encounters duplicate - sections (i.e. shares the same name attribute), it will skip processing the - duplicates. - - It processes custom sections before the README generated sections so sections in the - README can be overwritten with custom options. - """ - processed_sections: Dict[str, DocSection] = {} - modified_files: Set[Path] = set() - sections: List[DocSection] = custom_sections - sections.extend(readme_sections) - for section in sections: - if section.name in processed_sections: - LOG.warning( - f"Skipping '{section.name}' from '{section.src}' as it is a duplicate" - f" of a custom section from '{processed_sections[section.name].src}'" - ) - continue - - LOG.info(f"Processing '{section.name}' from '{section.src}'") - target_path: Path = CURRENT_DIR / section.get_out_filename() - if target_path in modified_files: - LOG.warning( - f"{target_path} has been already written to, its contents will be" - " OVERWRITTEN and notices will be duplicated" - ) - contents: str = get_contents(section) - - # processors goes here - if fix_headers in section.processors: - contents = fix_headers(contents) - - with open(target_path, "w", encoding="utf-8") as f: - if section.src.suffix == ".md" and section.src != target_path: - rel = section.src.resolve().relative_to(CURRENT_DIR.parent) - f.write(f'[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM {rel}"\n\n') - f.write(contents) - processed_sections[section.name] = section - modified_files.add(target_path) - +# Necessary so Click doesn't hit an encode error when called by +# sphinxcontrib-programoutput on Windows. +os.putenv("pythonioencoding", "utf-8") # -- Project information ----------------------------------------------------- project = "Black" -copyright = "2020, Łukasz Langa and contributors to Black" +copyright = "2018-Present, Łukasz Langa and contributors to Black" author = "Łukasz Langa and contributors to Black" # Autopopulate version @@ -213,39 +49,13 @@ def process_sections( for sp in "abcfr": version = version.split(sp)[0] -custom_sections = [ - DocSection("the_black_code_style", CURRENT_DIR / "the_black_code_style.md"), - DocSection("editor_integration", CURRENT_DIR / "editor_integration.md"), - DocSection("blackd", CURRENT_DIR / "blackd.md"), - DocSection("black_primer", CURRENT_DIR / "black_primer.md"), - DocSection("contributing_to_black", CURRENT_DIR / ".." / "CONTRIBUTING.md"), -] - -# Sphinx complains when there is a source file that isn't referenced in any of the docs. -# Since some sections autogenerated from the README are unused warnings will appear. -# -# Sections must be listed to what their name is when passed through make_filename(). -blocklisted_sections_from_readme = { - "license", - "pragmatism", - "testimonials", - "used_by", - "change_log", -} - make_pypi_svg(release) -readme_sections = get_sections_from_readme() -readme_sections = [ - x for x in readme_sections if x.name not in blocklisted_sections_from_readme -] - -process_sections(custom_sections, readme_sections) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "3.0" +needs_sphinx = "4.4" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -254,11 +64,13 @@ def process_sections( "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", - "recommonmark", + "myst_parser", + "sphinxcontrib.programoutput", + "sphinx_copybutton", ] # If you need extensions of a certain version or higher, list them here. -needs_extensions = {"recommonmark": "0.5"} +needs_extensions = {"myst_parser": "0.13.7"} # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -275,7 +87,7 @@ def process_sections( # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -286,37 +98,29 @@ def process_sections( # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" +# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for +# headers up to and including level 3. +myst_heading_anchors = 3 + +# Prettier support formatting some MyST syntax but not all, so let's disable the +# unsupported yet still enabled by default ones. +myst_disable_syntax = [ + "colon_fence", + "myst_block_break", + "myst_line_comment", + "math_block", +] + +# Optional MyST Syntaxes +myst_enable_extensions = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "alabaster" - -html_sidebars = { - "**": [ - "about.html", - "navigation.html", - "relations.html", - "sourcelink.html", - "searchbox.html", - ] -} - -html_theme_options = { - "show_related": False, - "description": "“Any color you like.”", - "github_button": True, - "github_user": "psf", - "github_repo": "black", - "github_type": "star", - "show_powered_by": True, - "fixed_sidebar": True, - "logo": "logo2.png", - "travis_button": True, -} - +html_theme = "furo" +html_logo = "_static/logo2-readme.png" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -342,21 +146,6 @@ def process_sections( # -- Options for LaTeX output ------------------------------------------------ -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). diff --git a/docs/contributing/gauging_changes.md b/docs/contributing/gauging_changes.md new file mode 100644 index 0000000..8562a83 --- /dev/null +++ b/docs/contributing/gauging_changes.md @@ -0,0 +1,58 @@ +# Gauging changes + +A lot of the time, your change will affect formatting and/or performance. Quantifying +these changes is hard, so we have tooling to help make it easier. + +It's recommended you evaluate the quantifiable changes your _Black_ formatting +modification causes before submitting a PR. Think about if the change seems disruptive +enough to cause frustration to projects that are already "black formatted". + +## diff-shades + +diff-shades is a tool that runs _Black_ across a list of open-source projects recording +the results. The main highlight feature of diff-shades is being able to compare two +revisions of _Black_. This is incredibly useful as it allows us to see what exact +changes will occur, say merging a certain PR. + +For more information, please see the [diff-shades documentation][diff-shades]. + +### CI integration + +diff-shades is also the tool behind the "diff-shades results comparing ..." / +"diff-shades reports zero changes ..." comments on PRs. The project has a GitHub Actions +workflow that analyzes and compares two revisions of _Black_ according to these rules: + +| | Baseline revision | Target revision | +| --------------------- | ----------------------- | ---------------------------- | +| On PRs | latest commit on `main` | PR commit with `main` merged | +| On pushes (main only) | latest PyPI version | the pushed commit | + +For pushes to main, there's only one analysis job named `preview-changes` where the +preview style is used for all projects. + +For PRs they get one more analysis job: `assert-no-changes`. It's similar to +`preview-changes` but runs with the stable code style. It will fail if changes were +made. This makes sure code won't be reformatted again and again within the same year in +accordance to Black's stability policy. + +Additionally for PRs, a PR comment will be posted embedding a summary of the preview +changes and links to further information. If there's a pre-existing diff-shades comment, +it'll be updated instead the next time the workflow is triggered on the same PR. + +```{note} +The `preview-changes` job will only fail intentionally if while analyzing a file failed to +format. Otherwise a failure indicates a bug in the workflow. +``` + +The workflow uploads several artifacts upon completion: + +- The raw analyses (.json) +- HTML diffs (.html) +- `.pr-comment.json` (if triggered by a PR) + +The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be +downloaded locally. The HTML diffs come in handy for push-based where there's no PR to +post a comment. And the analyses exist just in case you want to do further analysis +using the collected data locally. + +[diff-shades]: https://github.com/ichard26/diff-shades#readme diff --git a/docs/contributing/index.md b/docs/contributing/index.md new file mode 100644 index 0000000..f56e57c --- /dev/null +++ b/docs/contributing/index.md @@ -0,0 +1,49 @@ +# Contributing + +```{toctree} +--- +hidden: +--- + +the_basics +gauging_changes +issue_triage +release_process +reference/reference_summary +``` + +Welcome! Happy to see you willing to make the project better. Have you read the entire +[user documentation](https://black.readthedocs.io/en/latest/) yet? + +```{rubric} Bird's eye view + +``` + +In terms of inspiration, _Black_ is about as configurable as _gofmt_ (which is to say, +not very). This is deliberate. _Black_ aims to provide a consistent style and take away +opportunities for arguing about style. + +Bug reports and fixes are always welcome! Please follow the +[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. + +Before you suggest a new feature or configuration knob, ask yourself why you want it. If +it enables better integration with some workflow, fixes an inconsistency, speeds things +up, and so on - go for it! On the other hand, if your answer is "because I don't like a +particular formatting" then you're not ready to embrace _Black_ yet. Such changes are +unlikely to get accepted. You can still try but prepare to be disappointed. + +```{rubric} Contents + +``` + +This section covers the following topics: + +- {doc}`the_basics` +- {doc}`gauging_changes` +- {doc}`release_process` +- {doc}`reference/reference_summary` + +For an overview on contributing to the _Black_, please checkout {doc}`the_basics`. + +If you need a reference of the functions, classes, etc. available to you while +developing _Black_, there's the {doc}`reference/reference_summary` docs. diff --git a/docs/contributing/issue_triage.md b/docs/contributing/issue_triage.md new file mode 100644 index 0000000..9b987fb --- /dev/null +++ b/docs/contributing/issue_triage.md @@ -0,0 +1,169 @@ +# Issue triage + +Currently, _Black_ uses the issue tracker for bugs, feature requests, proposed design +modifications, and general user support. Each of these issues have to be triaged so they +can be eventually be resolved somehow. This document outlines the triaging process and +also the current guidelines and recommendations. + +```{tip} +If you're looking for a way to contribute without submitting patches, this might be +the area for you. Since _Black_ is a popular project, its issue tracker is quite busy +and always needs more attention than is available. While triage isn't the most +glamorous or technically challenging form of contribution, it's still important. +For example, we would love to know whether that old bug report is still reproducible! + +You can get easily started by reading over this document and then responding to issues. + +If you contribute enough and have stayed for a long enough time, you may even be +given Triage permissions! +``` + +## The basics + +_Black_ gets a whole bunch of different issues, they range from bug reports to user +support issues. To triage is to identify, organize, and kickstart the issue's journey +through its lifecycle to resolution. + +More specifically, to triage an issue means to: + +- identify what type and categories the issue falls under +- confirm bugs +- ask questions / for further information if necessary +- link related issues +- provide the first initial feedback / support + +Note that triage is typically the first response to an issue, so don't fret if the issue +doesn't make much progress after initial triage. The main goal of triaging to prepare +the issue for future more specific development or discussion, so _eventually_ it will be +resolved. + +The lifecycle of a bug report or user support issue typically goes something like this: + +1. _the issue is waiting for triage_ +2. **identified** - has been marked with a type label and other relevant labels, more + details or a functional reproduction may be still needed (and therefore should be + marked with `S: needs repro` or `S: awaiting reponse`) +3. **confirmed** - the issue can reproduced and necessary details have been provided +4. **discussion** - initial triage has been done and now the general details on how the + issue should be best resolved are being hashed out +5. **awaiting fix** - no further discussion on the issue is necessary and a resolving PR + is the next step +6. **closed** - the issue has been resolved, reasons include: + - the issue couldn't be reproduced + - the issue has been fixed + - duplicate of another pre-existing issue or is invalid + +For enhancement, documentation, and design issues, the lifecycle looks very similar but +the details are different: + +1. _the issue is waiting for triage_ +2. **identified** - has been marked with a type label and other relevant labels +3. **discussion** - the merits of the suggested changes are currently being discussed, a + PR would be acceptable but would be at sigificant risk of being rejected +4. **accepted & awaiting PR** - it's been determined the suggested changes are OK and a + PR would be welcomed (`S: accepted`) +5. **closed**: - the issue has been resolved, reasons include: + - the suggested changes were implemented + - it was rejected (due to technical concerns, ethos conflicts, etc.) + - duplicate of a pre-existing issue or is invalid + +**Note**: documentation issues don't use the `S: accepted` label currently since they're +less likely to be rejected. + +## Labelling + +We use labels to organize, track progress, and help effectively divvy up work. + +Our labels are divided up into several groups identified by their prefix: + +- **T - Type**: the general flavor of issue / PR +- **C - Category**: areas of concerns, ranges from bug types to project maintenance +- **F - Formatting Area**: like C but for formatting specifically +- **S - Status**: what stage of resolution is this issue currently in? +- **R - Resolution**: how / why was the issue / PR resolved? + +We also have a few standalone labels: + +- **`good first issue`**: issues that are beginner-friendly (and will show up in GitHub + banners for first-time visitors to the repository) +- **`help wanted`**: complex issues that need and are looking for a fair bit of work as + to progress (will also show up in various GitHub pages) +- **`skip news`**: for PRs that are trivial and don't need a CHANGELOG entry (and skips + the CHANGELOG entry check) + +```{note} +We do use labels for PRs, in particular the `skip news` label, but we aren't that +rigorous about it. Just follow your judgement on what labels make sense for the +specific PR (if any even make sense). +``` + +## Projects + +For more general and broad goals we use projects to track work. Some may be longterm +projects with no true end (e.g. the "Amazing documentation" project) while others may be +more focused and have a definite end (like the "Getting to beta" project). + +```{note} +To modify GitHub Projects you need the [Write repository permission level or higher](https://docs.github.com/en/organizations/managing-access-to-your-organizations-repositories/repository-permission-levels-for-an-organization#repository-access-for-each-permission-level). +``` + +## Closing issues + +Closing an issue signifies the issue has reached the end of its life, so closing issues +should be taken with care. The following is the general recommendation for each type of +issue. Note that these are only guidelines and if your judgement says something else +it's totally cool to go with it instead. + +For most issues, closing the issue manually or automatically after a resolving PR is +ideal. For bug reports specifically, if the bug has already been fixed, try to check in +with the issue opener that their specific case has been resolved before closing. Note +that we close issues as soon as they're fixed in the `main` branch. This doesn't +necessarily mean they've been released yet. + +Design and enhancement issues should be also closed when it's clear the proposed change +won't be implemented, whether that has been determined after a lot of discussion or just +simply goes against _Black_'s ethos. If such an issue turns heated, closing and locking +is acceptable if it's severe enough (although checking in with the core team is probably +a good idea). + +User support issues are best closed by the author or when it's clear the issue has been +resolved in some sort of manner. + +Duplicates and invalid issues should always be closed since they serve no purpose and +add noise to an already busy issue tracker. Although be careful to make sure it's truly +a duplicate and not just very similar before labelling and closing an issue as +duplicate. + +## Common reports + +Some issues are frequently opened, like issues about _Black_ formatted code causing E203 +messages. Even though these issues are probably heavily duplicated, they still require +triage sucking up valuable time from other things (although they usually skip most of +their lifecycle since they're closed on triage). + +Here's some of the most common issues and also pre-made responses you can use: + +### "The trailing comma isn't being removed by Black!" + +```text +Black used to remove the trailing comma if the expression fits in a single line, but this was changed by #826 and #1288. Now a trailing comma tells Black to always explode the expression. This change was made mostly for the cases where you _know_ a collection or whatever will grow in the future. Having it always exploded as one element per line reduces diff noise when adding elements. Before the "magic trailing comma" feature, you couldn't anticipate a collection's growth reliably since collections that fitted in one line were ruthlessly collapsed regardless of your intentions. One of Black's goals is reducing diff noise, so this was a good pragmatic change. + +So no, this is not a bug, but an intended feature. Anyway, [here's the documentation](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#the-magic-trailing-comma) on the "magic trailing comma", including the ability to skip this functionality with the `--skip-magic-trailing-comma` option. Hopefully that helps solve the possible confusion. +``` + +### "Black formatted code is violating Flake8's E203!" + +```text +Hi, + +This is expected behaviour, please see the documentation regarding this case (emphasis +mine): + +> PEP 8 recommends to treat : in slices as a binary operator with the lowest priority, and to leave an equal amount of space on either side, **except if a parameter is omitted (e.g. ham[1 + 1 :])**. It recommends no spaces around : operators for “simple expressions” (ham[lower:upper]), and **extra space for “complex expressions” (ham[lower : upper + offset])**. **Black treats anything more than variable names as “complex” (ham[lower : upper + 1]).** It also states that for extended slices, both : operators have to have the same amount of spacing, except if a parameter is omitted (ham[1 + 1 ::]). Black enforces these rules consistently. + +> This behaviour may raise E203 whitespace before ':' warnings in style guide enforcement tools like Flake8. **Since E203 is not PEP 8 compliant, you should tell Flake8 to ignore these warnings**. + +https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html#slices + +Have a good day! +``` diff --git a/docs/reference/reference_classes.rst b/docs/contributing/reference/reference_classes.rst similarity index 89% rename from docs/reference/reference_classes.rst rename to docs/contributing/reference/reference_classes.rst index 8a2ded9..fa76596 100644 --- a/docs/reference/reference_classes.rst +++ b/docs/contributing/reference/reference_classes.rst @@ -8,7 +8,7 @@ :class:`BracketTracker` ----------------------- -.. autoclass:: black.BracketTracker +.. autoclass:: black.brackets.BracketTracker :members: :class:`EmptyLineTracker` @@ -34,7 +34,7 @@ :class:`ProtoComment` --------------------- -.. autoclass:: black.ProtoComment +.. autoclass:: black.comments.ProtoComment :members: :class:`Report` @@ -47,7 +47,7 @@ :class:`Visitor` ---------------- -.. autoclass:: black.Visitor +.. autoclass:: black.nodes.Visitor :show-inheritance: :members: diff --git a/docs/reference/reference_exceptions.rst b/docs/contributing/reference/reference_exceptions.rst similarity index 80% rename from docs/reference/reference_exceptions.rst rename to docs/contributing/reference/reference_exceptions.rst index 517249f..aafe61e 100644 --- a/docs/reference/reference_exceptions.rst +++ b/docs/contributing/reference/reference_exceptions.rst @@ -5,7 +5,7 @@ .. currentmodule:: black -.. autoexception:: black.CannotSplit +.. autoexception:: black.linegen.CannotSplit .. autoexception:: black.NothingChanged diff --git a/docs/contributing/reference/reference_functions.rst b/docs/contributing/reference/reference_functions.rst new file mode 100644 index 0000000..3bda5de --- /dev/null +++ b/docs/contributing/reference/reference_functions.rst @@ -0,0 +1,180 @@ +*Black* functions +================= + +*Contents are subject to change.* + +.. currentmodule:: black + +Assertions and checks +--------------------- + +.. autofunction:: black.assert_equivalent + +.. autofunction:: black.assert_stable + +.. autofunction:: black.lines.can_be_split + +.. autofunction:: black.lines.can_omit_invisible_parens + +.. autofunction:: black.nodes.is_empty_tuple + +.. autofunction:: black.nodes.is_import + +.. autofunction:: black.lines.is_line_short_enough + +.. autofunction:: black.nodes.is_multiline_string + +.. autofunction:: black.nodes.is_one_tuple + +.. autofunction:: black.brackets.is_split_after_delimiter + +.. autofunction:: black.brackets.is_split_before_delimiter + +.. autofunction:: black.nodes.is_stub_body + +.. autofunction:: black.nodes.is_stub_suite + +.. autofunction:: black.nodes.is_vararg + +.. autofunction:: black.nodes.is_yield + + +Formatting +---------- + +.. autofunction:: black.format_file_contents + +.. autofunction:: black.format_file_in_place + +.. autofunction:: black.format_stdin_to_stdout + +.. autofunction:: black.format_str + +.. autofunction:: black.reformat_one + +.. autofunction:: black.concurrency.schedule_formatting + +File operations +--------------- + +.. autofunction:: black.dump_to_file + +.. autofunction:: black.find_project_root + +.. autofunction:: black.gen_python_files + +.. autofunction:: black.read_pyproject_toml + +Parsing +------- + +.. autofunction:: black.decode_bytes + +.. autofunction:: black.parsing.lib2to3_parse + +.. autofunction:: black.parsing.lib2to3_unparse + +Split functions +--------------- + +.. autofunction:: black.linegen.bracket_split_build_line + +.. autofunction:: black.linegen.bracket_split_succeeded_or_raise + +.. autofunction:: black.linegen.delimiter_split + +.. autofunction:: black.linegen.left_hand_split + +.. autofunction:: black.linegen.right_hand_split + +.. autofunction:: black.linegen.standalone_comment_split + +.. autofunction:: black.linegen.transform_line + +Caching +------- + +.. autofunction:: black.cache.filter_cached + +.. autofunction:: black.cache.get_cache_dir + +.. autofunction:: black.cache.get_cache_file + +.. autofunction:: black.cache.get_cache_info + +.. autofunction:: black.cache.read_cache + +.. autofunction:: black.cache.write_cache + +Utilities +--------- + +.. py:function:: black.debug.DebugVisitor.show(code: str) -> None + + Pretty-print the lib2to3 AST of a given string of `code`. + +.. autofunction:: black.concurrency.cancel + +.. autofunction:: black.nodes.child_towards + +.. autofunction:: black.nodes.container_of + +.. autofunction:: black.comments.convert_one_fmt_off_pair + +.. autofunction:: black.diff + +.. autofunction:: black.linegen.dont_increase_indentation + +.. autofunction:: black.numerics.format_float_or_int_string + +.. autofunction:: black.nodes.ensure_visible + +.. autofunction:: black.lines.enumerate_reversed + +.. autofunction:: black.comments.generate_comments + +.. autofunction:: black.comments.generate_ignored_nodes + +.. autofunction:: black.comments.is_fmt_on + +.. autofunction:: black.comments.children_contains_fmt_on + +.. autofunction:: black.nodes.first_leaf_of + +.. autofunction:: black.linegen.generate_trailers_to_omit + +.. autofunction:: black.get_future_imports + +.. autofunction:: black.comments.list_comments + +.. autofunction:: black.comments.make_comment + +.. autofunction:: black.linegen.maybe_make_parens_invisible_in_atom + +.. autofunction:: black.brackets.max_delimiter_priority_in_atom + +.. autofunction:: black.normalize_fmt_off + +.. autofunction:: black.numerics.normalize_numeric_literal + +.. autofunction:: black.linegen.normalize_prefix + +.. autofunction:: black.strings.normalize_string_prefix + +.. autofunction:: black.strings.normalize_string_quotes + +.. autofunction:: black.linegen.normalize_invisible_parens + +.. autofunction:: black.patch_click + +.. autofunction:: black.nodes.preceding_leaf + +.. autofunction:: black.re_compile_maybe_verbose + +.. autofunction:: black.linegen.should_split_line + +.. autofunction:: black.concurrency.shutdown + +.. autofunction:: black.strings.sub_twice + +.. autofunction:: black.nodes.whitespace diff --git a/docs/reference/reference_summary.rst b/docs/contributing/reference/reference_summary.rst similarity index 51% rename from docs/reference/reference_summary.rst rename to docs/contributing/reference/reference_summary.rst index 780a4b4..f6ff468 100644 --- a/docs/reference/reference_summary.rst +++ b/docs/contributing/reference/reference_summary.rst @@ -1,6 +1,11 @@ Developer reference =================== +.. note:: + + The documentation here is quite outdated and has been neglected. Many objects worthy + of inclusion aren't documented. Contributions are appreciated! + *Contents are subject to change.* .. toctree:: diff --git a/docs/contributing/release_process.md b/docs/contributing/release_process.md new file mode 100644 index 0000000..be9b08a --- /dev/null +++ b/docs/contributing/release_process.md @@ -0,0 +1,212 @@ +# Release process + +_Black_ has had a lot of work done into standardizing and automating its release +process. This document sets out to explain how everything works and how to release +_Black_ using said automation. + +## Release cadence + +**We aim to release whatever is on `main` every 1-2 months.** This ensures merged +improvements and bugfixes are shipped to users reasonably quickly, while not massively +fracturing the user-base with too many versions. This also keeps the workload on +maintainers consistent and predictable. + +If there's not much new on `main` to justify a release, it's acceptable to skip a +month's release. Ideally January releases should not be skipped because as per our +[stability policy](labels/stability-policy), the first release in a new calendar year +may make changes to the _stable_ style. While the policy applies to the first release +(instead of only January releases), confining changes to the stable style to January +will keep things predictable (and nicer) for users. + +Unless there is a serious regression or bug that requires immediate patching, **there +should not be more than one release per month**. While version numbers are cheap, +releases require a maintainer to both commit to do the actual cutting of a release, but +also to be able to deal with the potential fallout post-release. Releasing more +frequently than monthly nets rapidly diminishing returns. + +## Cutting a release + +**You must have `write` permissions for the _Black_ repository to cut a release.** + +The 10,000 foot view of the release process is that you prepare a release PR and then +publish a [GitHub Release]. This triggers [release automation](#release-workflows) that +builds all release artifacts and publishes them to the various platforms we publish to. + +To cut a release: + +1. Determine the release's version number + - **_Black_ follows the [CalVer] versioning standard using the `YY.M.N` format** + - So unless there already has been a release during this month, `N` should be `0` + - Example: the first release in January, 2022 → `22.1.0` +1. File a PR editing `CHANGES.md` and the docs to version the latest changes + 1. Replace the `## Unreleased` header with the version number + 1. Remove any empty sections for the current release + 1. (_optional_) Read through and copy-edit the changelog (eg. by moving entries, + fixing typos, or rephrasing entries) + 1. Add a new empty template for the next release above + ([template below](#changelog-template)) + 1. Update references to the latest version in + {doc}`/integrations/source_version_control` and + {doc}`/usage_and_configuration/the_basics` + - Example PR: [GH-3139] +1. Once the release PR is merged, wait until all CI passes + - If CI does not pass, **stop** and investigate the failure(s) as generally we'd want + to fix failing CI before cutting a release +1. [Draft a new GitHub Release][new-release] + 1. Click `Choose a tag` and type in the version number, then select the + `Create new tag: YY.M.N on publish` option that appears + 1. Verify that the new tag targets the `main` branch + 1. You can leave the release title blank, GitHub will default to the tag name + 1. Copy and paste the _raw changelog Markdown_ for the current release into the + description box +1. Publish the GitHub Release, triggering [release automation](#release-workflows) that + will handle the rest +1. At this point, you're basically done. It's good practice to go and [watch and verify + that all the release workflows pass][black-actions], although you will receive a + GitHub notification should something fail. + - If something fails, don't panic. Please go read the respective workflow's logs and + configuration file to reverse-engineer your way to a fix/solution. + +Congratulations! You've successfully cut a new release of _Black_. Go and stand up and +take a break, you deserve it. + +```{important} +Once the release artifacts reach PyPI, you may see new issues being filed indicating +regressions. While regressions are not great, they don't automatically mean a hotfix +release is warranted. Unless the regressions are serious and impact many users, a hotfix +release is probably unnecessary. + +In the end, use your best judgement and ask other maintainers for their thoughts. +``` + +### Changelog template + +Use the following template for a clean changelog after the release: + +``` +## Unreleased + +### Highlights + + + +### Stable style + + + +### Preview style + + + +### Configuration + + + +### Packaging + + + +### Parser + + + +### Performance + + + +### Output + + + +### _Blackd_ + + + +### Integrations + + + +### Documentation + + +``` + +## Release workflows + +All of _Black_'s release automation uses [GitHub Actions]. All workflows are therefore +configured using YAML files in the `.github/workflows` directory of the _Black_ +repository. + +They are triggered by the publication of a [GitHub Release]. + +Below are descriptions of our release workflows. + +### Publish to PyPI + +This is our main workflow. It builds an [sdist] and [wheels] to upload to PyPI where the +vast majority of users will download Black from. It's divided into three job groups: + +#### sdist + pure wheel + +This single job builds the sdist and pure Python wheel (i.e., a wheel that only contains +Python code) using [build] and then uploads them to PyPI using [twine]. These artifacts +are general-purpose and can be used on basically any platform supported by Python. + +#### mypyc wheels (…) + +We use [mypyc] to compile _Black_ into a CPython C extension for significantly improved +performance. Wheels built with mypyc are platform and Python version specific. +[Supported platforms are documented in the FAQ](labels/mypyc-support). + +These matrix jobs use [cibuildwheel] which handles the complicated task of building C +extensions for many environments for us. Since building these wheels is slow, there are +multiple mypyc wheels jobs (hence the term "matrix") that build for a specific platform +(as noted in the job name in parentheses). + +Like the previous job group, the built wheels are uploaded to PyPI using [twine]. + +#### Update stable branch + +So this job doesn't _really_ belong here, but updating the `stable` branch after the +other PyPI jobs pass (they must pass for this job to start) makes the most sense. This +saves us from remembering to update the branch sometime after cutting the release. + +- _Currently this workflow uses an API token associated with @ambv's PyPI account_ + +### Publish executables + +This workflow builds native executables for multiple platforms using [PyInstaller]. This +allows people to download the executable for their platform and run _Black_ without a +[Python runtime](https://wiki.python.org/moin/PythonImplementations) installed. + +The created binaries are stored on the associated GitHub Release for download over _IPv4 +only_ (GitHub still does not have IPv6 access 😢). + +### docker + +This workflow uses the QEMU powered `buildx` feature of Docker to upload an `arm64` and +`amd64`/`x86_64` build of the official _Black_ Docker image™. + +- _Currently this workflow uses an API Token associated with @cooperlees account_ + +```{note} +This also runs on each push to `main`. +``` + +[black-actions]: https://github.com/psf/black/actions +[build]: https://pypa-build.readthedocs.io/ +[calver]: https://calver.org +[cibuildwheel]: https://cibuildwheel.readthedocs.io/ +[gh-3139]: https://github.com/psf/black/pull/3139 +[github actions]: https://github.com/features/actions +[github release]: https://github.com/psf/black/releases +[new-release]: https://github.com/psf/black/releases/new +[mypyc]: https://mypyc.readthedocs.io/ +[mypyc-platform-support]: + /faq.html#what-is-compiled-yes-no-all-about-in-the-version-output +[pyinstaller]: https://www.pyinstaller.org/ +[sdist]: + https://packaging.python.org/en/latest/glossary/#term-Source-Distribution-or-sdist +[twine]: https://github.com/features/actions +[wheels]: https://packaging.python.org/en/latest/glossary/#term-Wheel diff --git a/docs/contributing/the_basics.md b/docs/contributing/the_basics.md new file mode 100644 index 0000000..9325a9e --- /dev/null +++ b/docs/contributing/the_basics.md @@ -0,0 +1,80 @@ +# The basics + +An overview on contributing to the _Black_ project. + +## Technicalities + +Development on the latest version of Python is preferred. As of this writing it's 3.9. +You can use any operating system. + +Install development dependencies inside a virtual environment of your choice, for +example: + +```console +$ python3 -m venv .venv +$ source .venv/bin/activate +(.venv)$ pip install -r test_requirements.txt +(.venv)$ pip install -e .[d] +(.venv)$ pre-commit install +``` + +Before submitting pull requests, run lints and tests with the following commands from +the root of the black repo: + +```console +# Linting +(.venv)$ pre-commit run -a + +# Unit tests +(.venv)$ tox -e py + +# Optional Fuzz testing +(.venv)$ tox -e fuzz +``` + +### News / Changelog Requirement + +`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If +you feel this PR does not require a changelog entry please state that in a comment and a +maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you +have a line in the following format: + +```md +- `Black` is now more awesome (#X) +``` + +Note that X should be your PR number, not issue number! To workout X, please use +[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This +is not perfect but saves a lot of release overhead as now the releaser does not need to +go back and workout what to add to the `CHANGES.md` for each release. + +### Style Changes + +If a change would affect the advertised code style, please modify the documentation (The +_Black_ code style) to reflect that change. Patches that fix unintended bugs in +formatting don't need to be mentioned separately though. If the change is implemented +with the `--preview` flag, please include the change in the future style document +instead and write the changelog entry under a dedicated "Preview changes" heading. + +### Docs Testing + +If you make changes to docs, you can test they still build locally too. + +```console +(.venv)$ pip install -r docs/requirements.txt +(.venv)$ pip install [-e] .[d] +(.venv)$ sphinx-build -a -b html -W docs/ docs/_build/ +``` + +## Hygiene + +If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, +run it again to confirm it's really fixed. + +If adding a new feature, add a test. In fact, always add a test. But wait, before adding +any large feature, first open an issue for us to discuss the idea first. + +## Finally + +Thanks again for your interest in improving the project! You're taking action when most +people decide to sit and watch. diff --git a/docs/contributing_to_black.md b/docs/contributing_to_black.md deleted file mode 100644 index b911b46..0000000 --- a/docs/contributing_to_black.md +++ /dev/null @@ -1,112 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM CONTRIBUTING.md" - -# Contributing to _Black_ - -Welcome! Happy to see you willing to make the project better. Have you read the entire -[user documentation](https://black.readthedocs.io/en/latest/) yet? - -## Bird's eye view - -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. - -Bug reports and fixes are always welcome! Please follow the -[issue template on GitHub](https://github.com/psf/black/issues/new) for best results. - -Before you suggest a new feature or configuration knob, ask yourself why you want it. If -it enables better integration with some workflow, fixes an inconsistency, speeds things -up, and so on - go for it! On the other hand, if your answer is "because I don't like a -particular formatting" then you're not ready to embrace _Black_ yet. Such changes are -unlikely to get accepted. You can still try but prepare to be disappointed. - -## Technicalities - -Development on the latest version of Python is preferred. As of this writing it's 3.9. -You can use any operating system. I am using macOS myself and CentOS at work. - -Install all development dependencies using: - -```console -$ pipenv install --dev -$ pipenv shell -$ pre-commit install -``` - -If you haven't used `pipenv` before but are comfortable with virtualenvs, just run -`pip install pipenv` in the virtualenv you're already using and invoke the command above -from the cloned _Black_ repo. It will do the correct thing. - -Non pipenv install works too: - -```console -$ pip install -r test_requirements -$ pip install -e .[d] -``` - -Before submitting pull requests, run lints and tests with the following commands from -the root of the black repo: - -```console -# Linting -$ pre-commit run -a - -# Unit tests -$ tox -e py - -# Optional Fuzz testing -$ tox -e fuzz - -# Optional CI run to test your changes on many popular python projects -$ black-primer [-k -w /tmp/black_test_repos] -``` - -### News / Changelog Requirement - -`Black` has CI that will check for an entry corresponding to your PR in `CHANGES.md`. If -you feel this PR not require a changelog entry please state that in a comment and a -maintainer can add a `skip news` label to make the CI pass. Otherwise, please ensure you -have a line in the following format: - -```md -- `Black` is now more awesome (#X) -``` - -To workout X, please use -[Next PR Number](https://ichard26.github.io/next-pr-number/?owner=psf&name=black). This -is not perfect but saves a lot of release overhead as now the releaser does not need to -go back and workout what to add to the `CHANGES.md` for each release. - -### Docs Testing - -If you make changes to docs, you can test they still build locally too. - -```console -$ pip install -r docs/requirements.txt -$ pip install [-e] .[d] -$ sphinx-build -a -b html -W docs/ docs/_build/ -``` - -## black-primer - -`black-primer` is used by CI to pull down well-known _Black_ formatted projects and see -if we get source code changes. It will error on formatting changes or errors. Please run -before pushing your PR to see if you get the actions you would expect from _Black_ with -your PR. You may need to change -[primer.json](https://github.com/psf/black/blob/master/src/black_primer/primer.json) -configuration for it to pass. - -For more `black-primer` information visit the -[documentation](https://github.com/psf/black/blob/master/docs/black_primer.md). - -## Hygiene - -If you're fixing a bug, add a test. Run it first to confirm it fails, then fix the bug, -run it again to confirm it's really fixed. - -If adding a new feature, add a test. In fact, always add a test. But wait, before adding -any large feature, first open an issue for us to discuss the idea first. - -## Finally - -Thanks again for your interest in improving the project! You're taking action when most -people decide to sit and watch. diff --git a/docs/editor_integration.md b/docs/editor_integration.md deleted file mode 100644 index f2d21f2..0000000 --- a/docs/editor_integration.md +++ /dev/null @@ -1,335 +0,0 @@ -# Editor integration - -## Emacs - -Options include the following: - -- [purcell/reformatter.el](https://github.com/purcell/reformatter.el) -- [proofit404/blacken](https://github.com/pythonic-emacs/blacken) -- [Elpy](https://github.com/jorgenschaefer/elpy). - -## PyCharm/IntelliJ IDEA - -1. Install `black`. - - ```console - $ pip install black - ``` - -2. Locate your `black` installation folder. - - On macOS / Linux / BSD: - - ```console - $ which black - /usr/local/bin/black # possible location - ``` - - On Windows: - - ```console - $ where black - %LocalAppData%\Programs\Python\Python36-32\Scripts\black.exe # possible location - ``` - - Note that if you are using a virtual environment detected by PyCharm, this is an - unneeded step. In this case the path to `black` is `$PyInterpreterDirectory$/black`. - -3. Open External tools in PyCharm/IntelliJ IDEA - - On macOS: - - `PyCharm -> Preferences -> Tools -> External Tools` - - On Windows / Linux / BSD: - - `File -> Settings -> Tools -> External Tools` - -4. Click the + icon to add a new external tool with the following values: - - - Name: Black - - Description: Black is the uncompromising Python code formatter. - - Program: - - Arguments: `"$FilePath$"` - -5. Format the currently opened file by selecting `Tools -> External Tools -> black`. - - - Alternatively, you can set a keyboard shortcut by navigating to - `Preferences or Settings -> Keymap -> External Tools -> External Tools - Black`. - -6. Optionally, run _Black_ on every file save: - - 1. Make sure you have the - [File Watchers](https://plugins.jetbrains.com/plugin/7177-file-watchers) plugin - installed. - 2. Go to `Preferences or Settings -> Tools -> File Watchers` and click `+` to add a - new watcher: - - Name: Black - - File type: Python - - Scope: Project Files - - Program: - - Arguments: `$FilePath$` - - Output paths to refresh: `$FilePath$` - - Working directory: `$ProjectFileDir$` - - - In Advanced Options - - Uncheck "Auto-save edited files to trigger the watcher" - - Uncheck "Trigger the watcher on external changes" - -## Wing IDE - -Wing supports black via the OS Commands tool, as explained in the Wing documentation on -[pep8 formatting](https://wingware.com/doc/edit/pep8). The detailed procedure is: - -1. Install `black`. - - ```console - $ pip install black - ``` - -2. Make sure it runs from the command line, e.g. - - ```console - $ black --help - ``` - -3. In Wing IDE, activate the **OS Commands** panel and define the command **black** to - execute black on the currently selected file: - - - Use the Tools -> OS Commands menu selection - - click on **+** in **OS Commands** -> New: Command line.. - - Title: black - - Command Line: black %s - - I/O Encoding: Use Default - - Key Binding: F1 - - [x] Raise OS Commands when executed - - [x] Auto-save files before execution - - [x] Line mode - -4. Select a file in the editor and press **F1** , or whatever key binding you selected - in step 3, to reformat the file. - -## Vim - -### Official plugin - -Commands and shortcuts: - -- `:Black` to format the entire file (ranges not supported); -- `:BlackUpgrade` to upgrade _Black_ inside the virtualenv; -- `:BlackVersion` to get the current version of _Black_ inside the virtualenv. - -Configuration: - -- `g:black_fast` (defaults to `0`) -- `g:black_linelength` (defaults to `88`) -- `g:black_skip_string_normalization` (defaults to `0`) -- `g:black_virtualenv` (defaults to `~/.vim/black` or `~/.local/share/nvim/black`) -- `g:black_quiet` (defaults to `0`) - -To install with [vim-plug](https://github.com/junegunn/vim-plug): - -``` -Plug 'psf/black', { 'branch': 'stable' } -``` - -or with [Vundle](https://github.com/VundleVim/Vundle.vim): - -``` -Plugin 'psf/black' -``` - -and execute the following in a terminal: - -```console -$ cd ~/.vim/bundle/black -$ git checkout origin/stable -b stable -``` - -or you can copy the plugin from -[plugin/black.vim](https://github.com/psf/black/blob/stable/plugin/black.vim). - -``` -mkdir -p ~/.vim/pack/python/start/black/plugin -curl https://raw.githubusercontent.com/psf/black/stable/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim -``` - -Let me know if this requires any changes to work with Vim 8's builtin `packadd`, or -Pathogen, and so on. - -This plugin **requires Vim 7.0+ built with Python 3.6+ support**. It needs Python 3.6 to -be able to run _Black_ inside the Vim process which is much faster than calling an -external command. - -On first run, the plugin creates its own virtualenv using the right Python version and -automatically installs _Black_. You can upgrade it later by calling `:BlackUpgrade` and -restarting Vim. - -If you need to do anything special to make your virtualenv work and install _Black_ (for -example you want to run a version from master), create a virtualenv manually and point -`g:black_virtualenv` to it. The plugin will use it. - -To run _Black_ on save, add the following line to `.vimrc` or `init.vim`: - -``` -autocmd BufWritePre *.py execute ':Black' -``` - -To run _Black_ on a key press (e.g. F9 below), add this: - -``` -nnoremap :Black -``` - -**How to get Vim with Python 3.6?** On Ubuntu 17.10 Vim comes with Python 3.6 by -default. On macOS with Homebrew run: `brew install vim`. When building Vim from source, -use: `./configure --enable-python3interp=yes`. There's many guides online how to do -this. - -**I get an import error when using _Black_ from a virtual environment**: If you get an -error message like this: - -```text -Traceback (most recent call last): - File "", line 63, in - File "/home/gui/.vim/black/lib/python3.7/site-packages/black.py", line 45, in - from typed_ast import ast3, ast27 - File "/home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/ast3.py", line 40, in - from typed_ast import _ast3 -ImportError: /home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/_ast3.cpython-37m-x86_64-linux-gnu.so: undefined symbool: PyExc_KeyboardInterrupt -``` - -Then you need to install `typed_ast` and `regex` directly from the source code. The -error happens because `pip` will download [Python wheels](https://pythonwheels.com/) if -they are available. Python wheels are a new standard of distributing Python packages and -packages that have Cython and extensions written in C are already compiled, so the -installation is much more faster. The problem here is that somehow the Python -environment inside Vim does not match with those already compiled C extensions and these -kind of errors are the result. Luckily there is an easy fix: installing the packages -from the source code. - -The two packages that cause the problem are: - -- [regex](https://pypi.org/project/regex/) -- [typed-ast](https://pypi.org/project/typed-ast/) - -Now remove those two packages: - -```console -$ pip uninstall regex typed-ast -y -``` - -And now you can install them with: - -```console -$ pip install --no-binary :all: regex typed-ast -``` - -The C extensions will be compiled and now Vim's Python environment will match. Note that -you need to have the GCC compiler and the Python development files installed (on -Ubuntu/Debian do `sudo apt-get install build-essential python3-dev`). - -If you later want to update _Black_, you should do it like this: - -```console -$ pip install -U black --no-binary regex,typed-ast -``` - -### With ALE - -1. Install [`ale`](https://github.com/dense-analysis/ale) -2. Install `black` -3. Add this to your vimrc: - - ```vim - let g:ale_fixers = {} - let g:ale_fixers.python = ['black'] - ``` - -## Gedit - -gedit is the default text editor of the GNOME, Unix like Operating Systems. Open gedit -as - -```console -$ gedit -``` - -1. `Go to edit > preferences > plugins` -2. Search for `external tools` and activate it. -3. In `Tools menu -> Manage external tools` -4. Add a new tool using `+` button. -5. Copy the below content to the code window. - -```console -#!/bin/bash -Name=$GEDIT_CURRENT_DOCUMENT_NAME -black $Name -``` - -- Set a keyboard shortcut if you like, Ex. `ctrl-B` -- Save: `Nothing` -- Input: `Nothing` -- Output: `Display in bottom pane` if you like. -- Change the name of the tool if you like. - -Use your keyboard shortcut or `Tools -> External Tools` to use your new tool. When you -close and reopen your File, _Black_ will be done with its job. - -## Visual Studio Code - -Use the -[Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) -([instructions](https://code.visualstudio.com/docs/python/editing#_formatting)). - -## SublimeText 3 - -Use [sublack plugin](https://github.com/jgirardet/sublack). - -## Jupyter Notebook Magic - -Use [blackcellmagic](https://github.com/csurfer/blackcellmagic). - -## Python Language Server - -If your editor supports the [Language Server Protocol](https://langserver.org/) (Atom, -Sublime Text, Visual Studio Code and many more), you can use the -[Python Language Server](https://github.com/palantir/python-language-server) with the -[pyls-black](https://github.com/rupert/pyls-black) plugin. - -## Atom/Nuclide - -Use [python-black](https://atom.io/packages/python-black) or -[formatters-python](https://atom.io/packages/formatters-python). - -## Gradle (the build tool) - -Use the [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-gradle) plugin. - -## Kakoune - -Add the following hook to your kakrc, then run _Black_ with `:format`. - -``` -hook global WinSetOption filetype=python %{ - set-option window formatcmd 'black -q -' -} -``` - -## Thonny - -Use [Thonny-black-code-format](https://github.com/Franccisco/thonny-black-code-format). - -## Other integrations - -Other editors and tools will require external contributions. - -Patches welcome! ✨ 🍰 ✨ - -Any tool that can pipe code through _Black_ using its stdio mode (just -[use `-` as the file name](https://www.tldp.org/LDP/abs/html/special-chars.html#DASHREF2)). -The formatted code will be returned on stdout (unless `--check` was passed). _Black_ -will still emit messages on stderr but that shouldn't affect your use case. - -This can be used for example with PyCharm's or IntelliJ's -[File Watchers](https://www.jetbrains.com/help/pycharm/file-watchers.html). diff --git a/docs/faq.md b/docs/faq.md new file mode 100644 index 0000000..bc9decc --- /dev/null +++ b/docs/faq.md @@ -0,0 +1,138 @@ +# Frequently Asked Questions + +The most common questions and issues users face are aggregated to this FAQ. + +```{contents} +:local: +:backlinks: none +:class: this-will-duplicate-information-and-it-is-still-useful-here +``` + +## Why spaces? I prefer tabs + +PEP 8 recommends spaces over tabs, and they are used by most of the Python community. +_Black_ provides no options to configure the indentation style, and requests for such +options will not be considered. + +However, we recognise that using tabs is an accessibility issue as well. While the +option will never be added to _Black_, visually impaired developers may find conversion +tools such as `expand/unexpand` (for Linux) useful when contributing to Python projects. +A workflow might consist of e.g. setting up appropriate pre-commit and post-merge git +hooks, and scripting `unexpand` to run after applying _Black_. + +## Does Black have an API? + +Not yet. _Black_ is fundamentally a command line tool. Many +[integrations](integrations/index.rst) are provided, but a Python interface is not one +of them. A simple API is being [planned](https://github.com/psf/black/issues/779) +though. + +## Is Black safe to use? + +Yes. _Black_ is strictly about formatting, nothing else. Black strives to ensure that +after formatting the AST is +[checked](the_black_code_style/current_style.md#ast-before-and-after-formatting) with +limited special cases where the code is allowed to differ. If issues are found, an error +is raised and the file is left untouched. Magical comments that influence linters and +other tools, such as `# noqa`, may be moved by _Black_. See below for more details. + +## How stable is Black's style? + +Stable. _Black_ aims to enforce one style and one style only, with some room for +pragmatism. See [The Black Code Style](the_black_code_style/index.rst) for more details. + +Starting in 2022, the formatting output will be stable for the releases made in the same +year (other than unintentional bugs). It is possible to opt-in to the latest formatting +styles, using the `--preview` flag. + +## Why is my file not formatted? + +Most likely because it is ignored in `.gitignore` or excluded with configuration. See +[file collection and discovery](usage_and_configuration/file_collection_and_discovery.md) +for details. + +## Why is my Jupyter Notebook cell not formatted? + +_Black_ is timid about formatting Jupyter Notebooks. Cells containing any of the +following will not be formatted: + +- automagics (e.g. `pip install black`) +- non-Python cell magics (e.g. `%%writeline`). These can be added with the flag + `--python-cell-magics`, e.g. `black --python-cell-magics writeline hello.ipynb`. +- multiline magics, e.g.: + + ```python + %timeit f(1, \ + 2, \ + 3) + ``` + +- code which `IPython`'s `TransformerManager` would transform magics into, e.g.: + + ```python + get_ipython().system('ls') + ``` + +- invalid syntax, as it can't be safely distinguished from automagics in the absence of + a running `IPython` kernel. + +## Why are Flake8's E203 and W503 violated? + +Because they go against PEP 8. E203 falsely triggers on list +[slices](the_black_code_style/current_style.md#slices), and adhering to W503 hinders +readability because operators are misaligned. Disable W503 and enable the +disabled-by-default counterpart W504. E203 should be disabled while changes are still +[discussed](https://github.com/PyCQA/pycodestyle/issues/373). + +## Which Python versions does Black support? + +Currently the runtime requires Python 3.7-3.11. Formatting is supported for files +containing syntax from Python 3.3 to 3.11. We promise to support at least all Python +versions that have not reached their end of life. This is the case for both running +_Black_ and formatting code. + +Support for formatting Python 2 code was removed in version 22.0. While we've made no +plans to stop supporting older Python 3 minor versions immediately, their support might +also be removed some time in the future without a deprecation period. + +Runtime support for 3.6 was removed in version 22.10.0. + +## Why does my linter or typechecker complain after I format my code? + +Some linters and other tools use magical comments (e.g., `# noqa`, `# type: ignore`) to +influence their behavior. While Black does its best to recognize such comments and leave +them in the right place, this detection is not and cannot be perfect. Therefore, you'll +sometimes have to manually move these comments to the right place after you format your +codebase with _Black_. + +## Can I run Black with PyPy? + +Yes, there is support for PyPy 3.7 and higher. + +## Why does Black not detect syntax errors in my code? + +_Black_ is an autoformatter, not a Python linter or interpreter. Detecting all syntax +errors is not a goal. It can format all code accepted by CPython (if you find an example +where that doesn't hold, please report a bug!), but it may also format some code that +CPython doesn't accept. + +(labels/mypyc-support)= + +## What is `compiled: yes/no` all about in the version output? + +While _Black_ is indeed a pure Python project, we use [mypyc] to compile _Black_ into a +C Python extension, usually doubling performance. These compiled wheels are available +for 64-bit versions of Windows, Linux (via the manylinux standard), and macOS across all +supported CPython versions. + +Platforms including musl-based and/or ARM Linux distributions, and ARM Windows are +currently **not** supported. These platforms will fall back to the slower pure Python +wheel available on PyPI. + +If you are experiencing exceptionally weird issues or even segfaults, you can try +passing `--no-binary black` to your pip install invocation. This flag excludes all +wheels (including the pure Python wheel), so this command will use the [sdist]. + +[mypyc]: https://mypyc.readthedocs.io/en/latest/ +[sdist]: + https://packaging.python.org/en/latest/glossary/#term-Source-Distribution-or-sdist diff --git a/docs/getting_started.md b/docs/getting_started.md new file mode 100644 index 0000000..1825f3b --- /dev/null +++ b/docs/getting_started.md @@ -0,0 +1,48 @@ +# Getting Started + +New to _Black_? Don't worry, you've found the perfect place to get started! + +## Do you like the _Black_ code style? + +Before using _Black_ on some of your code, it might be a good idea to first understand +how _Black_ will format your code. _Black_ isn't for everyone and you may find something +that is a dealbreaker for you personally, which is okay! The current _Black_ code style +[is described here](./the_black_code_style/current_style.md). + +## Try it out online + +Also, you can try out _Black_ online for minimal fuss on the +[Black Playground](https://black.vercel.app) generously created by José Padilla. + +## Installation + +_Black_ can be installed by running `pip install black`. It requires Python 3.7+ to run. +If you want to format Jupyter Notebooks, install with `pip install 'black[jupyter]'`. + +If you can't wait for the latest _hotness_ and want to install from GitHub, use: + +`pip install git+https://github.com/psf/black` + +## Basic usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory}... +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory}... +``` + +## Next steps + +Took a look at [the _Black_ code style](./the_black_code_style/current_style.md) and +tried out _Black_? Fantastic, you're ready for more. Why not explore some more on using +_Black_ by reading +[Usage and Configuration: The basics](./usage_and_configuration/the_basics.md). +Alternatively, you can check out the +[Introducing _Black_ to your project](./guides/introducing_black_to_your_project.md) +guide. diff --git a/docs/github_actions.md b/docs/github_actions.md deleted file mode 100644 index bd46809..0000000 --- a/docs/github_actions.md +++ /dev/null @@ -1,27 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# GitHub Actions - -Create a file named `.github/workflows/black.yml` inside your repository with: - -```yaml -name: Lint - -on: [push, pull_request] - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: psf/black@stable - with: - black_args: ". --check" -``` - -## Inputs - -### `black_args` - -**optional**: Black input arguments. Defaults to `. --check --diff`. diff --git a/docs/guides/index.md b/docs/guides/index.md new file mode 100644 index 0000000..127279b --- /dev/null +++ b/docs/guides/index.md @@ -0,0 +1,16 @@ +# Guides + +```{toctree} +--- +hidden: +--- + +introducing_black_to_your_project +using_black_with_other_tools +``` + +Wondering how to do something specific? You've found the right place! Listed below are +topic specific guides available: + +- {doc}`introducing_black_to_your_project` +- {doc}`using_black_with_other_tools` diff --git a/docs/guides/introducing_black_to_your_project.md b/docs/guides/introducing_black_to_your_project.md new file mode 100644 index 0000000..9ae40a1 --- /dev/null +++ b/docs/guides/introducing_black_to_your_project.md @@ -0,0 +1,52 @@ +# Introducing _Black_ to your project + +```{note} +This guide is incomplete. Contributions are welcomed and would be deeply +appreciated! +``` + +## Avoiding ruining git blame + +A long-standing argument against moving to automated code formatters like _Black_ is +that the migration will clutter up the output of `git blame`. This was a valid argument, +but since Git version 2.23, Git natively supports +[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) +with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore +using the `--ignore-revs-file` option. The changes made by the revision will be ignored +when assigning blame. Lines modified by an ignored revision will be blamed on the +previous revision that modified those lines. + +So when migrating your project's code style to _Black_, reformat everything and commit +the changes (preferably in one massive commit). Then put the full 40 characters commit +identifier(s) into a file. + +```text +# Migrate code style to Black +5b4ab991dede475d393e9d69ec388fd6bd949699 +``` + +Afterwards, you can pass that file to `git blame` and see clean and meaningful blame +information. + +```console +$ git blame important.py --ignore-revs-file .git-blame-ignore-revs +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): +abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: +7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) +``` + +You can even configure `git` to automatically ignore revisions listed in a file on every +call to `git blame`. + +```console +$ git config blame.ignoreRevsFile .git-blame-ignore-revs +``` + +**The one caveat is that some online Git-repositories like GitLab do not yet support +ignoring revisions using their native blame UI.** So blame information will be cluttered +with a reformatting commit on those platforms. (If you'd like this feature, there's an +open issue for [GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423)). This is +however supported by +[GitHub](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view), +currently in beta. diff --git a/docs/compatible_configs.md b/docs/guides/using_black_with_other_tools.md similarity index 69% rename from docs/compatible_configs.md rename to docs/guides/using_black_with_other_tools.md index a4f83ee..1d380bd 100644 --- a/docs/compatible_configs.md +++ b/docs/guides/using_black_with_other_tools.md @@ -1,4 +1,6 @@ -# _Black_ compatible configurations +# Using _Black_ with other tools + +## Black compatible configurations All of Black's changes are harmless (or at least, they should be), but a few do conflict against other tools. It is not uncommon to be using other tools alongside _Black_ like @@ -11,15 +13,32 @@ tools out there. tools, using **their** supported file formats. Compatible configuration files can be -[found here](https://github.com/psf/black/blob/master/docs/compatible_configs/). +[found here](https://github.com/psf/black/blob/main/docs/compatible_configs/). -## isort +### isort [isort](https://pypi.org/p/isort/) helps to sort and format imports in your Python code. _Black_ also formats imports, but in a different way from isort's defaults which leads to conflicting changes. -### Configuration +#### Profile + +Since version 5.0.0, isort supports +[profiles](https://pycqa.github.io/isort/docs/configuration/profiles.html) to allow easy +interoperability with common code styles. You can set the black profile in any of the +[config files](https://pycqa.github.io/isort/docs/configuration/config_files.html) +supported by isort. Below, an example for `pyproject.toml`: + +```toml +[tool.isort] +profile = "black" +``` + +#### Custom Configuration + +If you're using an isort version that is older than 5.0.0 or you have some custom +configuration for _Black_, you can tweak your isort configuration to make it compatible +with _Black_. Below, an example for `.isort.cfg`: ``` multi_line_output = 3 @@ -30,12 +49,12 @@ ensure_newline_before_comments = True line_length = 88 ``` -### Why those options above? +#### Why those options above? _Black_ wraps imports that surpass `line-length` by moving identifiers into their own indented line. If that still doesn't fit the bill, it will put all of them in separate lines and put a trailing comma. A more detailed explanation of this behaviour can be -[found here](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#how-black-wraps-lines). +[found here](../the_black_code_style/current_style.md#how-black-wraps-lines). isort's default mode of wrapping imports that extend past the `line_length` limit is "Grid". @@ -72,23 +91,15 @@ works the same as with _Black_. **Please note** `ensure_newline_before_comments = True` only works since isort >= 5 but does not break older versions so you can keep it if you are running previous versions. -If only isort >= 5 is used you can add `profile = black` instead of all the options -since [profiles](https://timothycrosley.github.io/isort/docs/configuration/profiles/) -are available and do the configuring for you. -### Formats +#### Formats

.isort.cfg -```cfg +```ini [settings] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ```
@@ -96,14 +107,9 @@ line_length = 88
setup.cfg -```cfg +```ini [isort] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ```
@@ -113,12 +119,7 @@ line_length = 88 ```toml [tool.isort] -multi_line_output = 3 -include_trailing_comma = true -force_grid_wrap = 0 -use_parentheses = true -ensure_newline_before_comments = true -line_length = 88 +profile = 'black' ``` @@ -128,31 +129,26 @@ line_length = 88 ```ini [*.py] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 +profile = black ``` -## Flake8 +### Flake8 [Flake8](https://pypi.org/p/flake8/) is a code linter. It warns you of syntax errors, possible bugs, stylistic errors, etc. For the most part, Flake8 follows [PEP 8](https://www.python.org/dev/peps/pep-0008/) when warning about stylistic errors. There are a few deviations that cause incompatibilities with _Black_. -### Configuration +#### Configuration ``` max-line-length = 88 extend-ignore = E203 ``` -### Why those options above? +#### Why those options above? In some cases, as determined by PEP 8, _Black_ will enforce an equal amount of whitespace around slice operators. Due to this, Flake8 will raise @@ -169,7 +165,7 @@ in your configuration. Also, as like with isort, flake8 should be configured to allow lines up to the length limit of `88`, _Black_'s default. This explains `max-line-length = 88`. -### Formats +#### Formats
.flake8 @@ -185,7 +181,7 @@ extend-ignore = E203
setup.cfg -```cfg +```ini [flake8] max-line-length = 88 extend-ignore = E203 @@ -204,51 +200,33 @@ extend-ignore = E203
-## Pylint +### Pylint [Pylint](https://pypi.org/p/pylint/) is also a code linter like Flake8. It has the same checks as flake8 and more. In particular, it has more formatting checks regarding style conventions like variable naming. With so many checks, Pylint is bound to have some mixed feelings about _Black_'s formatting style. -### Configuration +#### Configuration ``` -disable = C0330, C0326 max-line-length = 88 ``` -### Why those options above? - -When _Black_ is folding very long expressions, the closing brackets will -[be dedented](https://github.com/psf/black/blob/master/docs/the_black_code_style.md#how-black-wraps-lines). +#### Why those options above? -```py3 -ImportantClass.important_method( - exc, limit, lookup_lines, capture_locals, callback -) -``` +Pylint should be configured to only complain about lines that surpass `88` characters +via `max-line-length = 88`. -Although, this style is PEP 8 compliant, Pylint will raise -`C0330: Wrong hanging indentation before block (add 4 spaces)` warnings. Since _Black_ -isn't configurable on this style, Pylint should be told to ignore these warnings via -`disable = C0330`. +If using `pylint<2.6.0`, also disable `C0326` and `C0330` as these are incompatible with +_Black_ formatting and have since been removed. -Also, since _Black_ deals with whitespace around operators and brackets, Pylint's -warning `C0326: Bad whitespace` should be disabled using `disable = C0326`. - -And as usual, Pylint should be configured to only complain about lines that surpass `88` -characters via `max-line-length = 88`. - -### Formats +#### Formats
pylintrc ```ini -[MESSAGES CONTROL] -disable = C0330, C0326 - [format] max-line-length = 88 ``` @@ -261,9 +239,6 @@ max-line-length = 88 ```cfg [pylint] max-line-length = 88 - -[pylint.messages_control] -disable = C0330, C0326 ```
@@ -272,9 +247,6 @@ disable = C0330, C0326 pyproject.toml ```toml -[tool.pylint.messages_control] -disable = "C0330, C0326" - [tool.pylint.format] max-line-length = "88" ``` diff --git a/docs/ignoring_unmodified_files.md b/docs/ignoring_unmodified_files.md deleted file mode 100644 index a915f4e..0000000 --- a/docs/ignoring_unmodified_files.md +++ /dev/null @@ -1,23 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# Ignoring unmodified files - -_Black_ remembers files it has already formatted, unless the `--diff` flag is used or -code is passed via standard input. This information is stored per-user. The exact -location of the file depends on the _Black_ version and the system on which _Black_ is -run. The file is non-portable. The standard location on common operating systems is: - -- Windows: - `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` -- macOS: - `/Users//Library/Caches/black//cache...pickle` -- Linux: - `/home//.cache/black//cache...pickle` - -`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, -as .pyi, and whether string normalization was omitted. - -To override the location of these files on macOS or Linux, set the environment variable -`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache -in the directory you're running _Black_ from, set `XDG_CACHE_HOME=.cache`. _Black_ will -then write the above files to `.cache/black//`. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..9d0db46 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,139 @@ + + +# The uncompromising code formatter + +> “Any color you like.” + +By using _Black_, you agree to cede control over minutiae of hand-formatting. In return, +_Black_ gives you speed, determinism, and freedom from `pycodestyle` nagging about +formatting. You will save time and mental energy for more important matters. + +_Black_ makes code review faster by producing the smallest diffs possible. Blackened +code looks the same regardless of the project you're reading. Formatting becomes +transparent after a while and you can focus on the content instead. + +Try it out now using the [Black Playground](https://black.vercel.app). + +```{admonition} Note - Black is now stable! +*Black* is [successfully used](https://github.com/psf/black#used-by) by +many projects, small and big. *Black* has a comprehensive test suite, with efficient +parallel tests, our own auto formatting and parallel Continuous Integration runner. +Now that we have become stable, you should not expect large formatting to changes in +the future. Stylistic changes will mostly be responses to bug reports and support for new Python +syntax. + +Also, as a safety measure which slows down processing, *Black* will check that the +reformatted code still produces a valid AST that is effectively equivalent to the +original (see the +[Pragmatism](./the_black_code_style/current_style.md#pragmatism) +section for details). If you're feeling confident, use `--fast`. +``` + +```{note} +{doc}`Black is licensed under the MIT license `. +``` + +## Testimonials + +**Mike Bayer**, author of [SQLAlchemy](https://www.sqlalchemy.org/): + +> _I can't think of any single tool in my entire programming career that has given me a +> bigger productivity increase by its introduction. I can now do refactorings in about +> 1% of the keystrokes that it would have taken me previously when we had no way for +> code to format itself._ + +**Dusty Phillips**, +[writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): + +> _Black is opinionated so you don't have to be._ + +**Hynek Schlawack**, creator of [attrs](https://www.attrs.org/), core developer of +Twisted and CPython: + +> _An auto-formatter that doesn't suck is all I want for Xmas!_ + +**Carl Meyer**, [Django](https://www.djangoproject.com/) core developer: + +> _At least the name is good._ + +**Kenneth Reitz**, creator of [requests](http://python-requests.org/) and +[pipenv](https://docs.pipenv.org/): + +> _This vastly improves the formatting of our code. Thanks a ton!_ + +## Show your style + +Use the badge in your project's README.md: + +```md +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +``` + +Using the badge in README.rst: + +```rst +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +``` + +Looks like this: + +```{image} https://img.shields.io/badge/code%20style-black-000000.svg +:target: https://github.com/psf/black +``` + +## Contents + +```{toctree} +--- +maxdepth: 3 +includehidden: +--- + +the_black_code_style/index +``` + +```{toctree} +--- +maxdepth: 3 +includehidden: +caption: User Guide +--- + +getting_started +usage_and_configuration/index +integrations/index +guides/index +faq +``` + +```{toctree} +--- +maxdepth: 2 +includehidden: +caption: Development +--- + +contributing/index +change_log +authors +``` + +```{toctree} +--- +hidden: +caption: Project Links +--- + +GitHub +PyPI +Chat +``` + +# Indices and tables + +- {ref}`genindex` +- {ref}`search` diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index f03d247..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. black documentation master file, created by - sphinx-quickstart on Fri Mar 23 10:53:30 2018. - -The uncompromising code formatter -================================= - -By using *Black*, you agree to cede control over minutiae of -hand-formatting. In return, *Black* gives you speed, determinism, and -freedom from `pycodestyle` nagging about formatting. You will save time -and mental energy for more important matters. - -*Black* makes code review faster by producing the smallest diffs -possible. Blackened code looks the same regardless of the project -you're reading. Formatting becomes transparent after a while and you -can focus on the content instead. - -Try it out now using the `Black Playground `_. - -.. note:: - - `Black is beta `_. - - -Testimonials ------------- - -**Dusty Phillips**, `writer `_: - - *Black is opinionated so you don't have to be.* - -**Hynek Schlawack**, creator of `attrs `_, core -developer of Twisted and CPython: - - *An auto-formatter that doesn't suck is all I want for Xmas!* - -**Carl Meyer**, `Django `_ core developer: - - *At least the name is good.* - -**Kenneth Reitz**, creator of `requests `_ -and `pipenv `_: - - *This vastly improves the formatting of our code. Thanks a ton!* - -Contents --------- - -.. toctree:: - :maxdepth: 2 - - installation_and_usage - the_black_code_style - pyproject_toml - compatible_configs - editor_integration - blackd - black_primer - version_control_integration - github_actions - ignoring_unmodified_files - contributing_to_black - show_your_style - change_log - reference/reference_summary - authors - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/docs/installation_and_usage.md b/docs/installation_and_usage.md deleted file mode 100644 index fcde49f..0000000 --- a/docs/installation_and_usage.md +++ /dev/null @@ -1,195 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# Installation and usage - -## Installation - -_Black_ can be installed by running `pip install black`. It requires Python 3.6.0+ to -run but you can reformat Python 2 code with it, too. - -### Install from GitHub - -If you can't wait for the latest _hotness_ and want to install from GitHub, use: - -`pip install git+git://github.com/psf/black` - -## Usage - -To get started right away with sensible defaults: - -```sh -black {source_file_or_directory} -``` - -You can run _Black_ as a package if running it as a script doesn't work: - -```sh -python -m black {source_file_or_directory} -``` - -## Command line options - -_Black_ doesn't provide many options. You can list them by running `black --help`: - -```text -Usage: black [OPTIONS] [SRC]... - - The uncompromising code formatter. - -Options: - -c, --code TEXT Format the code passed in as a string. - -l, --line-length INTEGER How many characters per line to allow. - [default: 88] - - -t, --target-version [py27|py33|py34|py35|py36|py37|py38|py39] - Python versions that should be supported by - Black's output. [default: per-file auto- - detection] - - --pyi Format all input files like typing stubs - regardless of file extension (useful when - piping source on standard input). - - -S, --skip-string-normalization - Don't normalize string quotes or prefixes. - -C, --skip-magic-trailing-comma - Don't use trailing commas as a reason to - split lines. - - --check Don't write the files back, just return the - status. Return code 0 means nothing would - change. Return code 1 means some files - would be reformatted. Return code 123 means - there was an internal error. - - --diff Don't write the files back, just output a - diff for each file on stdout. - - --color / --no-color Show colored diff. Only applies when - `--diff` is given. - - --fast / --safe If --fast given, skip temporary sanity - checks. [default: --safe] - - --include TEXT A regular expression that matches files and - directories that should be included on - recursive searches. An empty value means - all files are included regardless of the - name. Use forward slashes for directories - on all platforms (Windows, too). Exclusions - are calculated first, inclusions later. - [default: \.pyi?$] - - --exclude TEXT A regular expression that matches files and - directories that should be excluded on - recursive searches. An empty value means no - paths are excluded. Use forward slashes for - directories on all platforms (Windows, too). - Exclusions are calculated first, inclusions - later. [default: /(\.direnv|\.eggs|\.git|\. - hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.sv - n|_build|buck-out|build|dist)/] - - --force-exclude TEXT Like --exclude, but files and directories - matching this regex will be excluded even - when they are passed explicitly as - arguments. - - --extend-exclude TEXT Like --exclude, but adds additional files - and directories on top of the excluded - ones. (useful if you simply want to add to - the default) - - --stdin-filename TEXT The name of the file when passing it through - stdin. Useful to make sure Black will - respect --force-exclude option on some - editors that rely on using stdin. - - -q, --quiet Don't emit non-error messages to stderr. - Errors are still emitted; silence those with - 2>/dev/null. - - -v, --verbose Also emit messages to stderr about files - that were not changed or were ignored due to - exclusion patterns. - - --version Show the version and exit. - --config FILE Read configuration from FILE path. - -h, --help Show this message and exit. -``` - -_Black_ is a well-behaved Unix-style command-line tool: - -- it does nothing if no sources are passed to it; -- it will read from standard input and write to standard output if `-` is used as the - filename; -- it only outputs messages to users on standard error; -- exits with code 0 unless an internal error occurred (or `--check` was used). - -## Using _Black_ with other tools - -While _Black_ enforces formatting that conforms to PEP 8, other tools may raise warnings -about _Black_'s changes or will overwrite _Black_'s changes. A good example of this is -[isort](https://pypi.org/p/isort). Since _Black_ is barely configurable, these tools -should be configured to neither warn about nor overwrite _Black_'s changes. - -Actual details on _Black_ compatible configurations for various tools can be found in -[compatible_configs](https://github.com/psf/black/blob/master/docs/compatible_configs.md#black-compatible-configurations). - -## Migrating your code style without ruining git blame - -A long-standing argument against moving to automated code formatters like _Black_ is -that the migration will clutter up the output of `git blame`. This was a valid argument, -but since Git version 2.23, Git natively supports -[ignoring revisions in blame](https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt) -with the `--ignore-rev` option. You can also pass a file listing the revisions to ignore -using the `--ignore-revs-file` option. The changes made by the revision will be ignored -when assigning blame. Lines modified by an ignored revision will be blamed on the -previous revision that modified those lines. - -So when migrating your project's code style to _Black_, reformat everything and commit -the changes (preferably in one massive commit). Then put the full 40 characters commit -identifier(s) into a file. - -``` -# Migrate code style to Black -5b4ab991dede475d393e9d69ec388fd6bd949699 -``` - -Afterwards, you can pass that file to `git blame` and see clean and meaningful blame -information. - -```console -$ git blame important.py --ignore-revs-file .git-blame-ignore-revs -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 1) def very_important_function(text, file): -abdfd8b0 (Alice Doe 2019-09-23 11:39:32 -0400 2) text = text.lstrip() -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 3) with open(file, "r+") as f: -7a1ae265 (John Smith 2019-04-15 15:55:13 -0400 4) f.write(formatted) -``` - -You can even configure `git` to automatically ignore revisions listed in a file on every -call to `git blame`. - -```console -$ git config blame.ignoreRevsFile .git-blame-ignore-revs -``` - -**The one caveat is that GitHub and GitLab do not yet support ignoring revisions using -their native UI of blame.** So blame information will be cluttered with a reformatting -commit on those platforms. (If you'd like this feature, there's an open issue for -[GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423) and please let GitHub -know!) - -## NOTE: This is a beta product - -_Black_ is already [successfully used](https://github.com/psf/black#used-by) by many -projects, small and big. It also sports a decent test suite. However, it is still very -new. Things will probably be wonky for a while. This is made explicit by the "Beta" -trove classifier, as well as by the "b" in the version number. What this means for you -is that **until the formatter becomes stable, you should expect some formatting to -change in the future**. That being said, no drastic stylistic changes are planned, -mostly responses to bug reports. - -Also, as a temporary safety measure, _Black_ will check that the reformatted code still -produces a valid AST that is equivalent to the original. This slows it down. If you're -feeling confident, use `--fast`. diff --git a/docs/integrations/editors.md b/docs/integrations/editors.md new file mode 100644 index 0000000..28c9f48 --- /dev/null +++ b/docs/integrations/editors.md @@ -0,0 +1,317 @@ +# Editor integration + +## Emacs + +Options include the following: + +- [wbolster/emacs-python-black](https://github.com/wbolster/emacs-python-black) +- [proofit404/blacken](https://github.com/pythonic-emacs/blacken) +- [Elpy](https://github.com/jorgenschaefer/elpy). + +## PyCharm/IntelliJ IDEA + +1. Install _Black_ with the `d` extra. + + ```console + $ pip install 'black[d]' + ``` + +1. Install + [BlackConnect IntelliJ IDEs plugin](https://plugins.jetbrains.com/plugin/14321-blackconnect). + +1. Open plugin configuration in PyCharm/IntelliJ IDEA + + On macOS: + + `PyCharm -> Preferences -> Tools -> BlackConnect` + + On Windows / Linux / BSD: + + `File -> Settings -> Tools -> BlackConnect` + +1. In `Local Instance (shared between projects)` section: + + 1. Check `Start local blackd instance when plugin loads`. + 1. Press the `Detect` button near `Path` input. The plugin should detect the `blackd` + executable. + +1. In `Trigger Settings` section check `Trigger on code reformat` to enable code + reformatting with _Black_. + +1. Format the currently opened file by selecting `Code -> Reformat Code` or using a + shortcut. + +1. Optionally, to run _Black_ on every file save: + + - In `Trigger Settings` section of plugin configuration check + `Trigger when saving changed files`. + +## Wing IDE + +Wing IDE supports `black` via **Preference Settings** for system wide settings and +**Project Properties** for per-project or workspace specific settings, as explained in +the Wing documentation on +[Auto-Reformatting](https://wingware.com/doc/edit/auto-reformatting). The detailed +procedure is: + +### Prerequistes + +- Wing IDE version 8.0+ + +- Install `black`. + + ```console + $ pip install black + ``` + +- Make sure it runs from the command line, e.g. + + ```console + $ black --help + ``` + +### Preference Settings + +If you want Wing IDE to always reformat with `black` for every project, follow these +steps: + +1. In menubar navigate to `Edit -> Preferences -> Editor -> Reformatting`. + +1. Set **Auto-Reformat** from `disable` (default) to `Line after edit` or + `Whole files before save`. + +1. Set **Reformatter** from `PEP8` (default) to `Black`. + +### Project Properties + +If you want to just reformat for a specific project and not intervene with Wing IDE +global setting, follow these steps: + +1. In menubar navigate to `Project -> Project Properties -> Options`. + +1. Set **Auto-Reformat** from `Use Preferences setting` (default) to `Line after edit` + or `Whole files before save`. + +1. Set **Reformatter** from `Use Preferences setting` (default) to `Black`. + +## Vim + +### Official plugin + +Commands and shortcuts: + +- `:Black` to format the entire file (ranges not supported); + - you can optionally pass `target_version=` with the same values as in the + command line. +- `:BlackUpgrade` to upgrade _Black_ inside the virtualenv; +- `:BlackVersion` to get the current version of _Black_ inside the virtualenv. + +Configuration: + +- `g:black_fast` (defaults to `0`) +- `g:black_linelength` (defaults to `88`) +- `g:black_skip_string_normalization` (defaults to `0`) +- `g:black_virtualenv` (defaults to `~/.vim/black` or `~/.local/share/nvim/black`) +- `g:black_quiet` (defaults to `0`) +- `g:black_preview` (defaults to `0`) + +To install with [vim-plug](https://github.com/junegunn/vim-plug): + +``` +Plug 'psf/black', { 'branch': 'stable' } +``` + +or with [Vundle](https://github.com/VundleVim/Vundle.vim): + +``` +Plugin 'psf/black' +``` + +and execute the following in a terminal: + +```console +$ cd ~/.vim/bundle/black +$ git checkout origin/stable -b stable +``` + +or you can copy the plugin files from +[plugin/black.vim](https://github.com/psf/black/blob/stable/plugin/black.vim) and +[autoload/black.vim](https://github.com/psf/black/blob/stable/autoload/black.vim). + +``` +mkdir -p ~/.vim/pack/python/start/black/plugin +mkdir -p ~/.vim/pack/python/start/black/autoload +curl https://raw.githubusercontent.com/psf/black/stable/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim +curl https://raw.githubusercontent.com/psf/black/stable/autoload/black.vim -o ~/.vim/pack/python/start/black/autoload/black.vim +``` + +Let me know if this requires any changes to work with Vim 8's builtin `packadd`, or +Pathogen, and so on. + +This plugin **requires Vim 7.0+ built with Python 3.7+ support**. It needs Python 3.7 to +be able to run _Black_ inside the Vim process which is much faster than calling an +external command. + +On first run, the plugin creates its own virtualenv using the right Python version and +automatically installs _Black_. You can upgrade it later by calling `:BlackUpgrade` and +restarting Vim. + +If you need to do anything special to make your virtualenv work and install _Black_ (for +example you want to run a version from main), create a virtualenv manually and point +`g:black_virtualenv` to it. The plugin will use it. + +To run _Black_ on save, add the following lines to `.vimrc` or `init.vim`: + +``` +augroup black_on_save + autocmd! + autocmd BufWritePre *.py Black +augroup end +``` + +To run _Black_ on a key press (e.g. F9 below), add this: + +``` +nnoremap :Black +``` + +**How to get Vim with Python 3.6?** On Ubuntu 17.10 Vim comes with Python 3.6 by +default. On macOS with Homebrew run: `brew install vim`. When building Vim from source, +use: `./configure --enable-python3interp=yes`. There's many guides online how to do +this. + +**I get an import error when using _Black_ from a virtual environment**: If you get an +error message like this: + +```text +Traceback (most recent call last): + File "", line 63, in + File "/home/gui/.vim/black/lib/python3.7/site-packages/black.py", line 45, in + from typed_ast import ast3, ast27 + File "/home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/ast3.py", line 40, in + from typed_ast import _ast3 +ImportError: /home/gui/.vim/black/lib/python3.7/site-packages/typed_ast/_ast3.cpython-37m-x86_64-linux-gnu.so: undefined symbool: PyExc_KeyboardInterrupt +``` + +Then you need to install `typed_ast` directly from the source code. The error happens +because `pip` will download [Python wheels](https://pythonwheels.com/) if they are +available. Python wheels are a new standard of distributing Python packages and packages +that have Cython and extensions written in C are already compiled, so the installation +is much more faster. The problem here is that somehow the Python environment inside Vim +does not match with those already compiled C extensions and these kind of errors are the +result. Luckily there is an easy fix: installing the packages from the source code. + +The package that causes problems is: + +- [typed-ast](https://pypi.org/project/typed-ast/) + +Now remove those two packages: + +```console +$ pip uninstall typed-ast -y +``` + +And now you can install them with: + +```console +$ pip install --no-binary :all: typed-ast +``` + +The C extensions will be compiled and now Vim's Python environment will match. Note that +you need to have the GCC compiler and the Python development files installed (on +Ubuntu/Debian do `sudo apt-get install build-essential python3-dev`). + +If you later want to update _Black_, you should do it like this: + +```console +$ pip install -U black --no-binary typed-ast +``` + +### With ALE + +1. Install [`ale`](https://github.com/dense-analysis/ale) + +1. Install `black` + +1. Add this to your vimrc: + + ```vim + let g:ale_fixers = {} + let g:ale_fixers.python = ['black'] + ``` + +## Gedit + +gedit is the default text editor of the GNOME, Unix like Operating Systems. Open gedit +as + +```console +$ gedit +``` + +1. `Go to edit > preferences > plugins` +1. Search for `external tools` and activate it. +1. In `Tools menu -> Manage external tools` +1. Add a new tool using `+` button. +1. Copy the below content to the code window. + +```console +#!/bin/bash +Name=$GEDIT_CURRENT_DOCUMENT_NAME +black $Name +``` + +- Set a keyboard shortcut if you like, Ex. `ctrl-B` +- Save: `Nothing` +- Input: `Nothing` +- Output: `Display in bottom pane` if you like. +- Change the name of the tool if you like. + +Use your keyboard shortcut or `Tools -> External Tools` to use your new tool. When you +close and reopen your File, _Black_ will be done with its job. + +## Visual Studio Code + +- Use the + [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) + ([instructions](https://code.visualstudio.com/docs/python/editing#_formatting)). + +- Alternatively the pre-release + [Black Formatter](https://marketplace.visualstudio.com/items?itemName=ms-python.black-formatter) + extension can be used which runs a [Language Server Protocol](https://langserver.org/) + server for Black. Formatting is much more responsive using this extension, **but the + minimum supported version of Black is 22.3.0**. + +## SublimeText 3 + +Use [sublack plugin](https://github.com/jgirardet/sublack). + +## Python LSP Server + +If your editor supports the [Language Server Protocol](https://langserver.org/) (Atom, +Sublime Text, Visual Studio Code and many more), you can use the +[Python LSP Server](https://github.com/python-lsp/python-lsp-server) with the +[python-lsp-black](https://github.com/python-lsp/python-lsp-black) plugin. + +## Atom/Nuclide + +Use [python-black](https://atom.io/packages/python-black) or +[formatters-python](https://atom.io/packages/formatters-python). + +## Gradle (the build tool) + +Use the [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-gradle) plugin. + +## Kakoune + +Add the following hook to your kakrc, then run _Black_ with `:format`. + +``` +hook global WinSetOption filetype=python %{ + set-option window formatcmd 'black -q -' +} +``` + +## Thonny + +Use [Thonny-black-code-format](https://github.com/Franccisco/thonny-black-code-format). diff --git a/docs/integrations/github_actions.md b/docs/integrations/github_actions.md new file mode 100644 index 0000000..12bcb21 --- /dev/null +++ b/docs/integrations/github_actions.md @@ -0,0 +1,70 @@ +# GitHub Actions integration + +You can use _Black_ within a GitHub Actions workflow without setting your own Python +environment. Great for enforcing that your code matches the _Black_ code style. + +## Compatibility + +This action is known to support all GitHub-hosted runner OSes. In addition, only +published versions of _Black_ are supported (i.e. whatever is available on PyPI). + +Finally, this action installs _Black_ with the `colorama` extra so the `--color` flag +should work fine. + +## Usage + +Create a file named `.github/workflows/black.yml` inside your repository with: + +```yaml +name: Lint + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: psf/black@stable +``` + +We recommend the use of the `@stable` tag, but per version tags also exist if you prefer +that. Note that the action's version you select is independent of the version of _Black_ +the action will use. + +The version of _Black_ the action will use can be configured via `version`. This can be +any +[valid version specifier](https://packaging.python.org/en/latest/glossary/#term-Version-Specifier) +or just the version number if you want an exact version. The action defaults to the +latest release available on PyPI. Only versions available from PyPI are supported, so no +commit SHAs or branch names. + +If you want to include Jupyter Notebooks, _Black_ must be installed with the `jupyter` +extra. Installing the extra and including Jupyter Notebook files can be configured via +`jupyter` (default is `false`). + +You can also configure the arguments passed to _Black_ via `options` (defaults to +`'--check --diff'`) and `src` (default is `'.'`) + +Here's an example configuration: + +```yaml +- uses: psf/black@stable + with: + options: "--check --verbose" + src: "./src" + jupyter: true + version: "21.5b1" +``` + +If you want to match versions covered by Black's +[stability policy](labels/stability-policy), you can use the compatible release operator +(`~=`): + +```yaml +- uses: psf/black@stable + with: + options: "--check --verbose" + src: "./src" + version: "~= 22.0" +``` diff --git a/docs/integrations/index.md b/docs/integrations/index.md new file mode 100644 index 0000000..33135d0 --- /dev/null +++ b/docs/integrations/index.md @@ -0,0 +1,31 @@ +# Integrations + +```{toctree} +--- +hidden: +--- + +editors +github_actions +source_version_control +``` + +_Black_ can be integrated into many environments, providing a better and smoother +experience. Documentation for integrating _Black_ with a tool can be found for the +following areas: + +- {doc}`Editor / IDE <./editors>` +- {doc}`GitHub Actions <./github_actions>` +- {doc}`Source version control <./source_version_control>` + +Editors and tools not listed will require external contributions. + +Patches welcome! ✨ 🍰 ✨ + +Any tool can pipe code through _Black_ using its stdio mode (just +[use `-` as the file name](https://www.tldp.org/LDP/abs/html/special-chars.html#DASHREF2)). +The formatted code will be returned on stdout (unless `--check` was passed). _Black_ +will still emit messages on stderr but that shouldn't affect your use case. + +This can be used for example with PyCharm's or IntelliJ's +[File Watchers](https://www.jetbrains.com/help/pycharm/file-watchers.html). diff --git a/docs/integrations/source_version_control.md b/docs/integrations/source_version_control.md new file mode 100644 index 0000000..4189b5c --- /dev/null +++ b/docs/integrations/source_version_control.md @@ -0,0 +1,34 @@ +# Version control integration + +Use [pre-commit](https://pre-commit.com/). Once you +[have it installed](https://pre-commit.com/#install), add this to the +`.pre-commit-config.yaml` in your repository: + +```yaml +repos: + - repo: https://github.com/psf/black + rev: 22.10.0 + hooks: + - id: black + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.9 +``` + +Feel free to switch out the `rev` value to something else, like another +[tag/version][black-tags] or even a specific commit. Although we discourage the use of +branches or other mutable refs since the hook [won't auto update as you may +expect][pre-commit-mutable-rev]. + +If you want support for Jupyter Notebooks as well, then replace `id: black` with +`id: black-jupyter`. + +```{note} +The `black-jupyter` hook is only available from version 21.8b0 and onwards. +``` + +[black-tags]: https://github.com/psf/black/tags +[pre-commit-mutable-rev]: + https://pre-commit.com/#using-the-latest-version-for-a-repository diff --git a/docs/license.md b/docs/license.md new file mode 100644 index 0000000..132c95b --- /dev/null +++ b/docs/license.md @@ -0,0 +1,9 @@ +--- +orphan: true +--- + +# License + +```{include} ../LICENSE + +``` diff --git a/docs/pyproject_toml.md b/docs/pyproject_toml.md deleted file mode 100644 index ed88f37..0000000 --- a/docs/pyproject_toml.md +++ /dev/null @@ -1,91 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# pyproject.toml - -_Black_ is able to read project-specific default values for its command line options -from a `pyproject.toml` file. This is especially useful for specifying custom -`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your -project. - -**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is -"No". _Black_ is all about sensible defaults. - -## What on Earth is a `pyproject.toml` file? - -[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a -configuration file to store build system requirements for Python projects. With the help -of tools like [Poetry](https://python-poetry.org/) or -[Flit](https://flit.readthedocs.io/en/latest/) it can fully replace the need for -`setup.py` and `setup.cfg` files. - -## Where _Black_ looks for the file - -By default _Black_ looks for `pyproject.toml` starting from the common base directory of -all files and directories passed on the command line. If it's not there, it looks in -parent directories. It stops looking when it finds the file, or a `.git` directory, or a -`.hg` directory, or the root of the file system, whichever comes first. - -If you're formatting standard input, _Black_ will look for configuration starting from -the current working directory. - -You can use a "global" configuration, stored in a specific location in your home -directory. This will be used as a fallback configuration, that is, it will be used if -and only if _Black_ doesn't find any configuration as mentioned above. Depending on your -operating system, this configuration file should be stored as: - -- Windows: `~\.black` -- Unix-like (Linux, MacOS, etc.): `$XDG_CONFIG_HOME/black` (`~/.config/black` if the - `XDG_CONFIG_HOME` environment variable is not set) - -Note that these are paths to the TOML file itself (meaning that they shouldn't be named -as `pyproject.toml`), not directories where you store the configuration. Here, `~` -refers to the path to your home directory. On Windows, this will be something like -`C:\\Users\UserName`. - -You can also explicitly specify the path to a particular file that you want with -`--config`. In this situation _Black_ will not look for any other file. - -If you're running with `--verbose`, you will see a blue message if a file was found and -used. - -Files listed within a projects `.gitignore` file will not be formatted by _Black_. - -Please note `blackd` will not use `pyproject.toml` configuration. - -## Configuration format - -As the file extension suggests, `pyproject.toml` is a -[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for -different tools. _Black_ is using the `[tool.black]` section. The option keys are the -same as long names of options on the command line. - -Note that you have to use single-quoted strings in TOML for regular expressions. It's -the equivalent of r-strings in Python. Multiline strings are treated as verbose regular -expressions by Black. Use `[ ]` to denote a significant space character. - -
-Example pyproject.toml - -```toml -[tool.black] -line-length = 88 -target-version = ['py37'] -include = '\.pyi?$' -extend-exclude = ''' -# A regex preceded with ^/ will apply only to files and directories -# in the root of the project. -^/foo.py # exclude a file named foo.py in the root of the project (in addition to the defaults) -''' -``` - -
- -## Lookup hierarchy - -Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can -override those defaults. Finally, options provided by the user on the command line -override both. - -_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't -look for multiple files, and doesn't compose configuration from different levels of the -file hierarchy. diff --git a/docs/reference/reference_functions.rst b/docs/reference/reference_functions.rst deleted file mode 100644 index bffce7e..0000000 --- a/docs/reference/reference_functions.rst +++ /dev/null @@ -1,180 +0,0 @@ -*Black* functions -================= - -*Contents are subject to change.* - -.. currentmodule:: black - -Assertions and checks ---------------------- - -.. autofunction:: black.assert_equivalent - -.. autofunction:: black.assert_stable - -.. autofunction:: black.can_be_split - -.. autofunction:: black.can_omit_invisible_parens - -.. autofunction:: black.is_empty_tuple - -.. autofunction:: black.is_import - -.. autofunction:: black.is_line_short_enough - -.. autofunction:: black.is_multiline_string - -.. autofunction:: black.is_one_tuple - -.. autofunction:: black.is_split_after_delimiter - -.. autofunction:: black.is_split_before_delimiter - -.. autofunction:: black.is_stub_body - -.. autofunction:: black.is_stub_suite - -.. autofunction:: black.is_vararg - -.. autofunction:: black.is_yield - - -Formatting ----------- - -.. autofunction:: black.format_file_contents - -.. autofunction:: black.format_file_in_place - -.. autofunction:: black.format_stdin_to_stdout - -.. autofunction:: black.format_str - -.. autofunction:: black.reformat_one - -.. autofunction:: black.schedule_formatting - -File operations ---------------- - -.. autofunction:: black.dump_to_file - -.. autofunction:: black.find_project_root - -.. autofunction:: black.gen_python_files - -.. autofunction:: black.read_pyproject_toml - -Parsing -------- - -.. autofunction:: black.decode_bytes - -.. autofunction:: black.lib2to3_parse - -.. autofunction:: black.lib2to3_unparse - -Split functions ---------------- - -.. autofunction:: black.bracket_split_build_line - -.. autofunction:: black.bracket_split_succeeded_or_raise - -.. autofunction:: black.delimiter_split - -.. autofunction:: black.left_hand_split - -.. autofunction:: black.right_hand_split - -.. autofunction:: black.standalone_comment_split - -.. autofunction:: black.transform_line - -Caching -------- - -.. autofunction:: black.filter_cached - -.. autofunction:: black.get_cache_file - -.. autofunction:: black.get_cache_info - -.. autofunction:: black.read_cache - -.. autofunction:: black.write_cache - -Utilities ---------- - -.. py:function:: black.DebugVisitor.show(code: str) -> None - - Pretty-print the lib2to3 AST of a given string of `code`. - -.. autofunction:: black.cancel - -.. autofunction:: black.child_towards - -.. autofunction:: black.container_of - -.. autofunction:: black.convert_one_fmt_off_pair - -.. autofunction:: black.diff - -.. autofunction:: black.dont_increase_indentation - -.. autofunction:: black.format_float_or_int_string - -.. autofunction:: black.ensure_visible - -.. autofunction:: black.enumerate_reversed - -.. autofunction:: black.enumerate_with_length - -.. autofunction:: black.generate_comments - -.. autofunction:: black.generate_ignored_nodes - -.. autofunction:: black.is_fmt_on - -.. autofunction:: black.contains_fmt_on_at_column - -.. autofunction:: black.first_leaf_column - -.. autofunction:: black.generate_trailers_to_omit - -.. autofunction:: black.get_future_imports - -.. autofunction:: black.list_comments - -.. autofunction:: black.make_comment - -.. autofunction:: black.maybe_make_parens_invisible_in_atom - -.. autofunction:: black.max_delimiter_priority_in_atom - -.. autofunction:: black.normalize_fmt_off - -.. autofunction:: black.normalize_numeric_literal - -.. autofunction:: black.normalize_prefix - -.. autofunction:: black.normalize_string_prefix - -.. autofunction:: black.normalize_string_quotes - -.. autofunction:: black.normalize_invisible_parens - -.. autofunction:: black.patch_click - -.. autofunction:: black.preceding_leaf - -.. autofunction:: black.re_compile_maybe_verbose - -.. autofunction:: black.should_split_line - -.. autofunction:: black.shutdown - -.. autofunction:: black.sub_twice - -.. autofunction:: black.whitespace diff --git a/docs/requirements.txt b/docs/requirements.txt index fcb6809..3c4b435 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,9 @@ -recommonmark==0.6.0 -Sphinx==3.2.1 -Pygments==2.7.4 \ No newline at end of file +# Used by ReadTheDocs; pinned requirements for stability. + +myst-parser==0.18.1 +Sphinx==5.2.3 +# Older versions break Sphinx even though they're declared to be supported. +docutils==0.19 +sphinxcontrib-programoutput==0.17 +sphinx_copybutton==0.5.0 +furo==2022.9.29 diff --git a/docs/show_your_style.md b/docs/show_your_style.md deleted file mode 100644 index 67b213c..0000000 --- a/docs/show_your_style.md +++ /dev/null @@ -1,19 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# Show your style - -Use the badge in your project's README.md: - -```md -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -``` - -Using the badge in README.rst: - -``` -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black -``` - -Looks like this: -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) diff --git a/docs/the_black_code_style.md b/docs/the_black_code_style/current_style.md similarity index 84% rename from docs/the_black_code_style.md rename to docs/the_black_code_style/current_style.md index 39a452f..59d79c4 100644 --- a/docs/the_black_code_style.md +++ b/docs/the_black_code_style/current_style.md @@ -2,18 +2,28 @@ ## Code style -_Black_ reformats entire files in place. It is not configurable. It doesn't take -previous formatting into account. It doesn't reformat blocks that start with -`# fmt: off` and end with `# fmt: on`. `# fmt: on/off` have to be on the same level of -indentation. It also recognizes [YAPF](https://github.com/google/yapf)'s block comments -to the same effect, as a courtesy for straddling code. +_Black_ aims for consistency, generality, readability and reducing git diffs. Similar +language constructs are formatted with similar rules. Style configuration options are +deliberately limited and rarely added. Previous formatting is taken into account as +little as possible, with rare exceptions like the magic trailing comma. The coding style +used by _Black_ can be viewed as a strict subset of PEP 8. + +_Black_ reformats entire files in place. It doesn't reformat lines that end with +`# fmt: skip` or blocks that start with `# fmt: off` and end with `# fmt: on`. +`# fmt: on/off` must be on the same level of indentation and in the same block, meaning +no unindents beyond the initial indentation level between them. It also recognizes +[YAPF](https://github.com/google/yapf)'s block comments to the same effect, as a +courtesy for straddling code. + +The rest of this document describes the current formatting style. If you're interested +in trying out where the style is heading, see [future style](./future_style.md) and try +running `black --preview`. ### How _Black_ wraps lines _Black_ ignores previous formatting and applies uniform horizontal and vertical whitespace to your code. The rules for horizontal whitespace can be summarized as: do -whatever makes `pycodestyle` happy. The coding style used by _Black_ can be viewed as a -strict subset of PEP 8. +whatever makes `pycodestyle` happy. As for vertical whitespace, _Black_ tries to render one full expression or simple statement per line. If this fits the allotted line length, great. @@ -75,6 +85,21 @@ def very_important_function( ... ``` +If a data structure literal (tuple, list, set, dict) or a line of "from" imports cannot +fit in the allotted length, it's always split into one element per line. This minimizes +diffs as well as enables readers of code to find which commit introduced a particular +entry. This also makes _Black_ compatible with +[isort](../guides/using_black_with_other_tools.md#isort) with the ready-made `black` +profile or manual configuration. + +You might have noticed that closing brackets are always dedented and that a trailing +comma is always added. Such formatting produces smaller diffs; when you add or remove an +element, it's always just one line. Also, having the closing bracket dedented provides a +clear delimiter between two distinct sections of the code that otherwise share the same +indentation level (like the arguments list and the docstring in the example above). + +(labels/why-no-backslashes)= + _Black_ prefers parentheses over backslashes, and will remove backslashes if found. ```py3 @@ -115,62 +140,6 @@ If you're reaching for backslashes, that's a clear signal that you can do better slightly refactor your code. I hope some of the examples above show you that there are many ways in which you can do it. -However there is one exception: `with` statements using multiple context managers. -Python's grammar does not allow organizing parentheses around the series of context -managers. - -We don't want formatting like: - -```py3 -with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: - ... # nothing to split on - line too long -``` - -So _Black_ will now format it like this: - -```py3 -with \ - make_context_manager(1) as cm1, \ - make_context_manager(2) as cm2, \ - make_context_manager(3) as cm3, \ - make_context_manager(4) as cm4 \ -: - ... # backslashes and an ugly stranded colon -``` - -You might have noticed that closing brackets are always dedented and that a trailing -comma is always added. Such formatting produces smaller diffs; when you add or remove an -element, it's always just one line. Also, having the closing bracket dedented provides a -clear delimiter between two distinct sections of the code that otherwise share the same -indentation level (like the arguments list and the docstring in the example above). - -If a data structure literal (tuple, list, set, dict) or a line of "from" imports cannot -fit in the allotted length, it's always split into one element per line. This minimizes -diffs as well as enables readers of code to find which commit introduced a particular -entry. This also makes _Black_ compatible with [isort](https://pypi.org/p/isort/) with -the following configuration. - -
-A compatible `.isort.cfg` - -```cfg -[settings] -multi_line_output = 3 -include_trailing_comma = True -force_grid_wrap = 0 -use_parentheses = True -ensure_newline_before_comments = True -line_length = 88 -``` - -The equivalent command line is: - -``` -$ isort --multi-line=3 --trailing-comma --force-grid-wrap=0 --use-parentheses --line-width=88 [ file.py ] -``` - -
- ### Line length You probably noticed the peculiar default line length. _Black_ defaults to 88 characters @@ -239,6 +208,16 @@ following field or method. This conforms to _Black_ won't insert empty lines after function docstrings unless that empty line is required due to an inner function starting immediately after. +### Comments + +_Black_ does not format comment contents, but it enforces two spaces between code and a +comment on the same line, and a space before the comment text begins. Some types of +comments that require specific spacing rules are respected: doc comments (`#: comment`), +section comments with long runs of hashes, and Spyder cells. Non-breaking spaces after +hashes are also preserved. Comments may sometimes be moved because of formatting +changes, which can break tools that assign special meaning to them. See +[AST before and after formatting](#ast-before-and-after-formatting) for more discussion. + ### Trailing commas _Black_ will add trailing commas to expressions that are split by comma where each @@ -262,10 +241,10 @@ _Black_ prefers double quotes (`"` and `"""`) over single quotes (`'` and `'''`) will replace the latter with the former as long as it does not result in more backslash escapes than before. -_Black_ also standardizes string prefixes, making them always lowercase. On top of that, -if your code is already Python 3.6+ only or it's using the `unicode_literals` future -import, _Black_ will remove `u` from the string prefix as it is meaningless in those -scenarios. +_Black_ also standardizes string prefixes. Prefix characters are made lowercase with the +exception of [capital "R" prefixes](#rstrings-and-rstrings), unicode literal markers +(`u`) are removed because they are meaningless in Python 3, and in the case of multiple +characters "r" is put first as in spoken language: "raw f-string". The main reason to standardize on a single form of quotes is aesthetics. Having one kind of quotes everywhere reduces reader distraction. It will also enable a future version of @@ -289,27 +268,18 @@ If you are adopting _Black_ in a large project with pre-existing string conventi you can pass `--skip-string-normalization` on the command line. This is meant as an adoption helper, avoid using this for new projects. -As an experimental option, _Black_ splits long strings (using parentheses where -appropriate) and merges short ones. When split, parts of f-strings that don't need -formatting are converted to plain strings. User-made splits are respected when they do -not exceed the line length limit. Line continuation backslashes are converted into -parenthesized strings. Unnecessary parentheses are stripped. To enable experimental -string processing, pass `--experimental-string-processing` on the command line. Because -the functionality is experimental, feedback and issue reports are highly encouraged! - _Black_ also processes docstrings. Firstly the indentation of docstrings is corrected for both quotations and the text within, although relative indentation in the text is preserved. Superfluous trailing whitespace on each line and unnecessary new lines at the end of the docstring are removed. All leading tabs are converted to spaces, but tabs inside text are preserved. Whitespace leading and trailing one-line docstrings is -removed. The quotations of an empty docstring are separated with one space. +removed. ### Numeric literals _Black_ standardizes most numeric literals to use lowercase letters for the syntactic parts and uppercase letters for the digits themselves: `0xAB` instead of `0XAB` and -`1e10` instead of `1E10`. Python 2 long literals are styled as `2L` instead of `2l` to -avoid confusion between `l` and `1`. +`1e10` instead of `1E10`. ### Line breaks & binary operators @@ -318,6 +288,26 @@ multiple lines. This is so that _Black_ is compliant with the recent changes in [PEP 8](https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator) style guide, which emphasizes that this approach improves readability. +Almost all operators will be surrounded by single spaces, the only exceptions are unary +operators (`+`, `-`, and `~`), and power operators when both operands are simple. For +powers, an operand is considered simple if it's only a NAME, numeric CONSTANT, or +attribute access (chained attribute access is allowed), with or without a preceding +unary operator. + +```python +# For example, these won't be surrounded by whitespace +a = x**y +b = config.base**5.2 +c = config.base**runtime.config.exponent +d = 2**5 +e = 2**~5 + +# ... but these will be surrounded by whitespace +f = 2 ** get_exponent() +g = get_x() ** get_y() +h = config['base'] ** 2 +``` + ### Slices PEP 8 @@ -354,7 +344,7 @@ pair of parentheses to form an atom. There are a few interesting cases: In those cases, parentheses are removed when the entire statement fits in one line, or if the inner expression doesn't have any delimiters to further split on. If there is only a single delimiter and the expression starts or ends with a bracket, the -parenthesis can also be successfully omitted since the existing bracket pair will +parentheses can also be successfully omitted since the existing bracket pair will organize the expression neatly anyway. Otherwise, the parentheses are added. Please note that _Black_ does not add or remove any additional nested parentheses that @@ -410,16 +400,16 @@ recommended code style for those files is more terse than PEP 8: _Black_ enforces the above rules. There are additional guidelines for formatting `.pyi` file that are not enforced yet but might be in a future version of the formatter: -- all function bodies should be empty (contain `...` instead of the body); -- do not use docstrings; - prefer `...` over `pass`; -- for arguments with a default, use `...` instead of the actual default; - avoid using string literals in type annotations, stub files support forward references natively (like Python 3.7 code with `from __future__ import annotations`); - use variable annotations instead of type comments, even for stubs that target older - versions of Python; -- for arguments that default to `None`, use `Optional[]` explicitly; -- use `float` instead of `Union[int, float]`. + versions of Python. + +### Line endings + +_Black_ will normalize line endings (`\n` or `\r\n`) based on the first line ending of +the file. ## Pragmatism @@ -453,7 +443,7 @@ into one item per line. How do you make it stop? Just delete that trailing comma and _Black_ will collapse your collection into one line if it fits. -If you must, you can recover the behaviour of early versions of Black with the option +If you must, you can recover the behaviour of early versions of _Black_ with the option `--skip-magic-trailing-comma` / `-C`. ### r"strings" and R"strings" @@ -478,11 +468,11 @@ target. There are three limited cases in which the AST does differ: of docstrings that we're aware of sanitizes indentation and leading/trailing whitespace anyway. -2. _Black_ manages optional parentheses for some statements. In the case of the `del` +1. _Black_ manages optional parentheses for some statements. In the case of the `del` statement, presence of wrapping parentheses or lack of thereof changes the resulting AST but is semantically equivalent in the interpreter. -3. _Black_ might move comments around, which includes type comments. Those are part of +1. _Black_ might move comments around, which includes type comments. Those are part of the AST as of Python 3.8. While the tool implements a number of special cases for those comments, there is no guarantee they will remain where they were in the source. Note that this doesn't change runtime behavior of the source code. diff --git a/docs/the_black_code_style/future_style.md b/docs/the_black_code_style/future_style.md new file mode 100644 index 0000000..a028a28 --- /dev/null +++ b/docs/the_black_code_style/future_style.md @@ -0,0 +1,133 @@ +# The (future of the) Black code style + +```{warning} +Changes to this document often aren't tied and don't relate to releases of +_Black_. It's recommended that you read the latest version available. +``` + +## Using backslashes for with statements + +[Backslashes are bad and should be never be used](labels/why-no-backslashes) however +there is one exception: `with` statements using multiple context managers. Before Python +3.9 Python's grammar does not allow organizing parentheses around the series of context +managers. + +We don't want formatting like: + +```py3 +with make_context_manager1() as cm1, make_context_manager2() as cm2, make_context_manager3() as cm3, make_context_manager4() as cm4: + ... # nothing to split on - line too long +``` + +So _Black_ will eventually format it like this: + +```py3 +with \ + make_context_manager1() as cm1, \ + make_context_manager2() as cm2, \ + make_context_manager3() as cm3, \ + make_context_manager4() as cm4 \ +: + ... # backslashes and an ugly stranded colon +``` + +Although when the target version is Python 3.9 or higher, _Black_ will use parentheses +instead since they're allowed in Python 3.9 and higher. + +An alternative to consider if the backslashes in the above formatting are undesirable is +to use {external:py:obj}`contextlib.ExitStack` to combine context managers in the +following way: + +```python +with contextlib.ExitStack() as exit_stack: + cm1 = exit_stack.enter_context(make_context_manager1()) + cm2 = exit_stack.enter_context(make_context_manager2()) + cm3 = exit_stack.enter_context(make_context_manager3()) + cm4 = exit_stack.enter_context(make_context_manager4()) + ... +``` + +## Preview style + +Experimental, potentially disruptive style changes are gathered under the `--preview` +CLI flag. At the end of each year, these changes may be adopted into the default style, +as described in [The Black Code Style](./index.rst). Because the functionality is +experimental, feedback and issue reports are highly encouraged! + +### Improved string processing + +_Black_ will split long string literals and merge short ones. Parentheses are used where +appropriate. When split, parts of f-strings that don't need formatting are converted to +plain strings. User-made splits are respected when they do not exceed the line length +limit. Line continuation backslashes are converted into parenthesized strings. +Unnecessary parentheses are stripped. The stability and status of this feature is +tracked in [this issue](https://github.com/psf/black/issues/2188). + +### Removing newlines in the beginning of code blocks + +_Black_ will remove newlines in the beginning of new code blocks, i.e. when the +indentation level is increased. For example: + +```python +def my_func(): + + print("The line above me will be deleted!") +``` + +will be changed to: + +```python +def my_func(): + print("The line above me will be deleted!") +``` + +This new feature will be applied to **all code blocks**: `def`, `class`, `if`, `for`, +`while`, `with`, `case` and `match`. + +### Improved parentheses management + +_Black_ will format parentheses around return annotations similarly to other sets of +parentheses. For example: + +```python +def foo() -> (int): + ... + +def foo() -> looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong: + ... +``` + +will be changed to: + +```python +def foo() -> int: + ... + + +def foo() -> ( + looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong +): + ... +``` + +And, extra parentheses in `await` expressions and `with` statements are removed. For +example: + +```python +with ((open("bla.txt")) as f, open("x")): + ... + +async def main(): + await (asyncio.sleep(1)) +``` + +will be changed to: + +```python +with open("bla.txt") as f, open("x"): + ... + + +async def main(): + await asyncio.sleep(1) +``` diff --git a/docs/the_black_code_style/index.md b/docs/the_black_code_style/index.md new file mode 100644 index 0000000..e5967be --- /dev/null +++ b/docs/the_black_code_style/index.md @@ -0,0 +1,52 @@ +# The Black Code Style + +```{toctree} +--- +hidden: +--- + +Current style +Future style +``` + +_Black_ is a PEP 8 compliant opinionated formatter with its own style. + +While keeping the style unchanged throughout releases has always been a goal, the +_Black_ code style isn't set in stone. It evolves to accommodate for new features in the +Python language and, occasionally, in response to user feedback. Large-scale style +preferences presented in {doc}`current_style` are very unlikely to change, but minor +style aspects and details might change according to the stability policy presented +below. Ongoing style considerations are tracked on GitHub with the +[design](https://github.com/psf/black/labels/T%3A%20design) issue label. + +(labels/stability-policy)= + +## Stability Policy + +The following policy applies for the _Black_ code style, in non pre-release versions of +_Black_: + +- If code has been formatted with _Black_, it will remain unchanged when formatted with + the same options using any other release in the same calendar year. + + This means projects can safely use `black ~= 22.0` without worrying about formatting + changes disrupting their project in 2022. We may still fix bugs where _Black_ crashes + on some code, and make other improvements that do not affect formatting. + + In rare cases, we may make changes affecting code that has not been previously + formatted with _Black_. For example, we have had bugs where we accidentally removed + some comments. Such bugs can be fixed without breaking the stability policy. + +- The first release in a new calendar year _may_ contain formatting changes, although + these will be minimised as much as possible. This is to allow for improved formatting + enabled by newer Python language syntax as well as due to improvements in the + formatting logic. + +- The `--preview` flag is exempt from this policy. There are no guarantees around the + stability of the output with that flag passed into _Black_. This flag is intended for + allowing experimentation with the proposed changes to the _Black_ code style. + +Documentation for both the current and future styles can be found: + +- {doc}`current_style` +- {doc}`future_style` diff --git a/docs/blackd.md b/docs/usage_and_configuration/black_as_a_server.md similarity index 80% rename from docs/blackd.md rename to docs/usage_and_configuration/black_as_a_server.md index c8058ee..f24fb34 100644 --- a/docs/blackd.md +++ b/docs/usage_and_configuration/black_as_a_server.md @@ -1,13 +1,18 @@ -## blackd +# Black as a server (blackd) `blackd` is a small HTTP server that exposes _Black_'s functionality over a simple protocol. The main benefit of using it is to avoid the cost of starting up a new _Black_ process every time you want to blacken a file. -### Usage +```{warning} +`blackd` should not be run as a publicly accessible server as there are no security +precautions in place to prevent abuse. **It is intended for local use only**. +``` + +## Usage `blackd` is not packaged alongside _Black_ by default because it has additional -dependencies. You will need to execute `pip install black[d]` to install it. +dependencies. You will need to execute `pip install 'black[d]'` to install it. You can start the server on the default port, binding only to the local interface by running `blackd`. You will see a single line mentioning the server's version, and the @@ -18,14 +23,8 @@ formatting requests. `blackd` provides even less options than _Black_. You can see them by running `blackd --help`: -```text -Usage: blackd [OPTIONS] +```{program-output} blackd --help -Options: - --bind-host TEXT Address to bind the server to. - --bind-port INTEGER Port to listen on - --version Show the version and exit. - -h, --help Show this message and exit. ``` There is no official `blackd` client tool (yet!). You can test that blackd is working @@ -36,7 +35,7 @@ blackd --bind-port 9090 & # or let blackd choose a port curl -s -XPOST "localhost:9090" -d "print('valid')" ``` -### Protocol +## Protocol `blackd` only accepts `POST` requests at the `/` path. The body of the request should contain the python source code to be formatted, encoded according to the `charset` field @@ -51,12 +50,18 @@ is rejected with `HTTP 501` (Not Implemented). The headers controlling how source code is formatted are: - `X-Line-Length`: corresponds to the `--line-length` command line flag. +- `X-Skip-Source-First-Line`: corresponds to the `--skip-source-first-line` command line + flag. If present and its value is not an empty string, the first line of the source + code will be ignored. - `X-Skip-String-Normalization`: corresponds to the `--skip-string-normalization` command line flag. If present and its value is not the empty string, no string normalization will be performed. - `X-Skip-Magic-Trailing-Comma`: corresponds to the `--skip-magic-trailing-comma` - command line flag. If present and its value is not the empty string, trailing commas + command line flag. If present and its value is not an empty string, trailing commas will not be used as a reason to split lines. +- `X-Preview`: corresponds to the `--preview` command line flag. If present and its + value is not an empty string, experimental and potentially disruptive style changes + will be used. - `X-Fast-Or-Safe`: if set to `fast`, `blackd` will act as _Black_ does when passed the `--fast` command line flag. - `X-Python-Variant`: if set to `pyi`, `blackd` will act as _Black_ does when passed the diff --git a/docs/usage_and_configuration/black_docker_image.md b/docs/usage_and_configuration/black_docker_image.md new file mode 100644 index 0000000..8de566e --- /dev/null +++ b/docs/usage_and_configuration/black_docker_image.md @@ -0,0 +1,46 @@ +# Black Docker image + +Official _Black_ Docker images are available on +[Docker Hub](https://hub.docker.com/r/pyfound/black). + +_Black_ images with the following tags are available: + +- release numbers, e.g. `21.5b2`, `21.6b0`, `21.7b0` etc.\ + ℹ Recommended for users who want to use a particular version of _Black_. +- `latest_release` - tag created when a new version of _Black_ is released.\ + ℹ Recommended for users who want to use released versions of _Black_. It maps to [the latest release](https://github.com/psf/black/releases/latest) + of _Black_. +- `latest` - tag used for the newest image of _Black_.\ + ℹ Recommended for users who always want to use the latest version of _Black_, even before + it is released. + +There is one more tag used for _Black_ Docker images - `latest_non_release`. It is +created for all unreleased +[commits on the `main` branch](https://github.com/psf/black/commits/main). This tag is +not meant to be used by external users. + +## Usage + +A permanent container doesn't have to be created to use _Black_ as a Docker image. It's +enough to run _Black_ commands for the chosen image denoted as `:tag`. In the below +examples, the `latest_release` tag is used. If `:tag` is omitted, the `latest` tag will +be used. + +More about _Black_ usage can be found in +[Usage and Configuration: The basics](./the_basics.md). + +### Check Black version + +```console +$ docker run --rm pyfound/black:latest_release black --version +``` + +### Check code + +```console +$ docker run --rm --volume $(pwd):/src --workdir /src pyfound/black:latest_release black --check . +``` + +_Remark_: besides [regular _Black_ exit codes](./the_basics.md) returned by `--check` +option, [Docker exit codes](https://docs.docker.com/engine/reference/run/#exit-status) +should also be considered. diff --git a/docs/usage_and_configuration/file_collection_and_discovery.md b/docs/usage_and_configuration/file_collection_and_discovery.md new file mode 100644 index 0000000..de1d5e6 --- /dev/null +++ b/docs/usage_and_configuration/file_collection_and_discovery.md @@ -0,0 +1,38 @@ +# File collection and discovery + +You can directly pass _Black_ files, but you can also pass directories and _Black_ will +walk them, collecting files to format. It determines what files to format or skip +automatically using the inclusion and exclusion regexes and as well their modification +time. + +## Ignoring unmodified files + +_Black_ remembers files it has already formatted, unless the `--diff` flag is used or +code is passed via standard input. This information is stored per-user. The exact +location of the file depends on the _Black_ version and the system on which _Black_ is +run. The file is non-portable. The standard location on common operating systems is: + +- Windows: + `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` +- macOS: + `/Users//Library/Caches/black//cache...pickle` +- Linux: + `/home//.cache/black//cache...pickle` + +`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, +as .pyi, and whether string normalization was omitted. + +To override the location of these files on all systems, set the environment variable +`BLACK_CACHE_DIR` to the preferred location. Alternatively on macOS and Linux, set +`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache +in the directory you're running _Black_ from, set `BLACK_CACHE_DIR=.cache/black`. +_Black_ will then write the above files to `.cache/black`. Note that `BLACK_CACHE_DIR` +will take precedence over `XDG_CACHE_HOME` if both are set. + +## .gitignore + +If `--exclude` is not set, _Black_ will automatically ignore files and directories in +`.gitignore` file(s), if present. + +If you want _Black_ to continue using `.gitignore` while also configuring the exclusion +rules, please use `--extend-exclude`. diff --git a/docs/usage_and_configuration/index.md b/docs/usage_and_configuration/index.md new file mode 100644 index 0000000..1c86a49 --- /dev/null +++ b/docs/usage_and_configuration/index.md @@ -0,0 +1,28 @@ +# Usage and Configuration + +```{toctree} +--- +hidden: +--- + +the_basics +file_collection_and_discovery +black_as_a_server +black_docker_image +``` + +Sometimes, running _Black_ with its defaults and passing filepaths to it just won't cut +it. Passing each file using paths will become burdensome, and maybe you would like +_Black_ to not touch your files and just output diffs. And yes, you _can_ tweak certain +parts of _Black_'s style, but please know that configurability in this area is +purposefully limited. + +Using many of these more advanced features of _Black_ will require some configuration. +Configuration that will either live on the command line or in a TOML configuration file. + +This section covers features of _Black_ and configuring _Black_ in detail: + +- {doc}`The basics <./the_basics>` +- {doc}`File collection and discovery ` +- {doc}`Black as a server (blackd) <./black_as_a_server>` +- {doc}`Black Docker image <./black_docker_image>` diff --git a/docs/usage_and_configuration/the_basics.md b/docs/usage_and_configuration/the_basics.md new file mode 100644 index 0000000..20aa956 --- /dev/null +++ b/docs/usage_and_configuration/the_basics.md @@ -0,0 +1,297 @@ +# The basics + +Foundational knowledge on using and configuring Black. + +_Black_ is a well-behaved Unix-style command-line tool: + +- it does nothing if it finds no sources to format; +- it will read from standard input and write to standard output if `-` is used as the + filename; +- it only outputs messages to users on standard error; +- exits with code 0 unless an internal error occurred or a CLI option prompted it. + +## Usage + +To get started right away with sensible defaults: + +```sh +black {source_file_or_directory} +``` + +You can run _Black_ as a package if running it as a script doesn't work: + +```sh +python -m black {source_file_or_directory} +``` + +### Command line options + +The CLI options of _Black_ can be displayed by expanding the view below or by running +`black --help`. While _Black_ has quite a few knobs these days, it is still opinionated +so style options are deliberately limited and rarely added. + +
+ +CLI reference + +```{program-output} black --help + +``` + +
+ +### Code input alternatives + +#### Standard Input + +_Black_ supports formatting code via stdin, with the result being printed to stdout. +Just let _Black_ know with `-` as the path. + +```console +$ echo "print ( 'hello, world' )" | black - +print("hello, world") +reformatted - +All done! ✨ 🍰 ✨ +1 file reformatted. +``` + +**Tip:** if you need _Black_ to treat stdin input as a file passed directly via the CLI, +use `--stdin-filename`. Useful to make sure _Black_ will respect the `--force-exclude` +option on some editors that rely on using stdin. + +#### As a string + +You can also pass code as a string using the `-c` / `--code` option. + +```console +$ black --code "print ( 'hello, world' )" +print("hello, world") +``` + +### Writeback and reporting + +By default _Black_ reformats the files given and/or found in place. Sometimes you need +_Black_ to just tell you what it _would_ do without actually rewriting the Python files. + +There's two variations to this mode that are independently enabled by their respective +flags. Both variations can be enabled at once. + +#### Exit code + +Passing `--check` will make _Black_ exit with: + +- code 0 if nothing would change; +- code 1 if some files would be reformatted; or +- code 123 if there was an internal error + +```console +$ black test.py --check +All done! ✨ 🍰 ✨ +1 file would be left unchanged. +$ echo $? +0 + +$ black test.py --check +would reformat test.py +Oh no! 💥 💔 💥 +1 file would be reformatted. +$ echo $? +1 + +$ black test.py --check +error: cannot format test.py: INTERNAL ERROR: Black produced code that is not equivalent to the source. Please report a bug on https://github.com/psf/black/issues. This diff might be helpful: /tmp/blk_kjdr1oog.log +Oh no! 💥 💔 💥 +1 file would fail to reformat. +$ echo $? +123 +``` + +#### Diffs + +Passing `--diff` will make _Black_ print out diffs that indicate what changes _Black_ +would've made. They are printed to stdout so capturing them is simple. + +If you'd like colored diffs, you can enable them with the `--color`. + +```console +$ black test.py --diff +--- test.py 2021-03-08 22:23:40.848954 +0000 ++++ test.py 2021-03-08 22:23:47.126319 +0000 +@@ -1 +1 @@ +-print ( 'hello, world' ) ++print("hello, world") +would reformat test.py +All done! ✨ 🍰 ✨ +1 file would be reformatted. +``` + +### Output verbosity + +_Black_ in general tries to produce the right amount of output, balancing between +usefulness and conciseness. By default, _Black_ emits files modified and error messages, +plus a short summary. + +```console +$ black src/ +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +reformatted src/black_primer/lib.py +reformatted src/blackd/__init__.py +reformatted src/black/__init__.py +Oh no! 💥 💔 💥 +3 files reformatted, 2 files left unchanged, 1 file failed to reformat. +``` + +Passing `-v` / `--verbose` will cause _Black_ to also emit messages about files that +were not changed or were ignored due to exclusion patterns. If _Black_ is using a +configuration file, a blue message detailing which one it is using will be emitted. + +```console +$ black src/ -v +Using configuration from /tmp/pyproject.toml. +src/blib2to3 ignored: matches the --extend-exclude regular expression +src/_black_version.py wasn't modified on disk since last run. +src/black/__main__.py wasn't modified on disk since last run. +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +reformatted src/black_primer/lib.py +reformatted src/blackd/__init__.py +reformatted src/black/__init__.py +Oh no! 💥 💔 💥 +3 files reformatted, 2 files left unchanged, 1 file failed to reformat +``` + +Passing `-q` / `--quiet` will cause _Black_ to stop emitting all non-critial output. +Error messages will still be emitted (which can silenced by `2>/dev/null`). + +```console +$ black src/ -q +error: cannot format src/black_primer/cli.py: Cannot parse: 5:6: mport asyncio +``` + +### Versions + +You can check the version of _Black_ you have installed using the `--version` flag. + +```console +$ black --version +black, version 22.10.0 +``` + +An option to require a specific version to be running is also provided. + +```console +$ black --required-version 21.9b0 -c "format = 'this'" +format = "this" +$ black --required-version 31.5b2 -c "still = 'beta?!'" +Oh no! 💥 💔 💥 The required version does not match the running version! +``` + +This is useful for example when running _Black_ in multiple environments that haven't +necessarily installed the correct version. This option can be set in a configuration +file for consistent results across environments. + +## Configuration via a file + +_Black_ is able to read project-specific default values for its command line options +from a `pyproject.toml` file. This is especially useful for specifying custom +`--include` and `--exclude`/`--force-exclude`/`--extend-exclude` patterns for your +project. + +**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is +"No". _Black_ is all about sensible defaults. Applying those defaults will have your +code in compliance with many other _Black_ formatted projects. + +### What on Earth is a `pyproject.toml` file? + +[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a +configuration file to store build system requirements for Python projects. With the help +of tools like [Poetry](https://python-poetry.org/), +[Flit](https://flit.readthedocs.io/en/latest/), or +[Hatch](https://hatch.pypa.io/latest/) it can fully replace the need for `setup.py` and +`setup.cfg` files. + +### Where _Black_ looks for the file + +By default _Black_ looks for `pyproject.toml` starting from the common base directory of +all files and directories passed on the command line. If it's not there, it looks in +parent directories. It stops looking when it finds the file, or a `.git` directory, or a +`.hg` directory, or the root of the file system, whichever comes first. + +If you're formatting standard input, _Black_ will look for configuration starting from +the current working directory. + +You can use a "global" configuration, stored in a specific location in your home +directory. This will be used as a fallback configuration, that is, it will be used if +and only if _Black_ doesn't find any configuration as mentioned above. Depending on your +operating system, this configuration file should be stored as: + +- Windows: `~\.black` +- Unix-like (Linux, MacOS, etc.): `$XDG_CONFIG_HOME/black` (`~/.config/black` if the + `XDG_CONFIG_HOME` environment variable is not set) + +Note that these are paths to the TOML file itself (meaning that they shouldn't be named +as `pyproject.toml`), not directories where you store the configuration. Here, `~` +refers to the path to your home directory. On Windows, this will be something like +`C:\\Users\UserName`. + +You can also explicitly specify the path to a particular file that you want with +`--config`. In this situation _Black_ will not look for any other file. + +If you're running with `--verbose`, you will see a blue message if a file was found and +used. + +Please note `blackd` will not use `pyproject.toml` configuration. + +### Configuration format + +As the file extension suggests, `pyproject.toml` is a +[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for +different tools. _Black_ is using the `[tool.black]` section. The option keys are the +same as long names of options on the command line. + +Note that you have to use single-quoted strings in TOML for regular expressions. It's +the equivalent of r-strings in Python. Multiline strings are treated as verbose regular +expressions by Black. Use `[ ]` to denote a significant space character. + +
+Example pyproject.toml + +```toml +[tool.black] +line-length = 88 +target-version = ['py37'] +include = '\.pyi?$' +# 'extend-exclude' excludes files or directories in addition to the defaults +extend-exclude = ''' +# A regex preceded with ^/ will apply only to files and directories +# in the root of the project. +( + ^/foo.py # exclude a file named foo.py in the root of the project + | .*_pb2.py # exclude autogenerated Protocol Buffer files anywhere in the project +) +''' +``` + +
+ +### Lookup hierarchy + +Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can +override those defaults. Finally, options provided by the user on the command line +override both. + +_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't +look for multiple files, and doesn't compose configuration from different levels of the +file hierarchy. + +## Next steps + +You've probably noted that not all of the options you can pass to _Black_ have been +covered. Don't worry, the rest will be covered in a later section. + +A good next step would be configuring auto-discovery so `black .` is all you need +instead of laborously listing every file or directory. You can get started by heading +over to [File collection and discovery](./file_collection_and_discovery.md). + +Another good choice would be setting up an +[integration with your editor](../integrations/editors.md) of choice or with +[pre-commit for source version control](../integrations/source_version_control.md). diff --git a/docs/version_control_integration.md b/docs/version_control_integration.md deleted file mode 100644 index 2d8bc17..0000000 --- a/docs/version_control_integration.md +++ /dev/null @@ -1,28 +0,0 @@ -[//]: # "NOTE: THIS FILE WAS AUTOGENERATED FROM README.md" - -# Version control integration - -Use [pre-commit](https://pre-commit.com/). Once you -[have it installed](https://pre-commit.com/#install), add this to the -`.pre-commit-config.yaml` in your repository: - -```yaml -repos: - - repo: https://github.com/psf/black - rev: 20.8b1 # Replace by any tag/version: https://github.com/psf/black/tags - hooks: - - id: black - language_version: python3 # Should be a command that runs python3.6+ -``` - -Then run `pre-commit install` and you're ready to go. - -Avoid using `args` in the hook. Instead, store necessary configuration in -`pyproject.toml` so that editors and command-line usage of Black all behave consistently -for your project. See _Black_'s own -[pyproject.toml](https://github.com/psf/black/blob/master/pyproject.toml) for an -example. - -If you're already using Python 3.7, switch the `language_version` accordingly. Finally, -`stable` is a branch that tracks the latest release on PyPI. If you'd rather run on -master, this is also an option. diff --git a/gallery/gallery.py b/gallery/gallery.py index 6b42ec3..38e52e3 100755 --- a/gallery/gallery.py +++ b/gallery/gallery.py @@ -10,26 +10,16 @@ from concurrent.futures import ThreadPoolExecutor from functools import lru_cache, partial from pathlib import Path -from typing import ( # type: ignore # typing can't see Literal - Generator, - List, - Literal, - NamedTuple, - Optional, - Tuple, - Union, - cast, -) +from typing import Generator, List, NamedTuple, Optional, Tuple, Union, cast from urllib.request import urlopen, urlretrieve PYPI_INSTANCE = "https://pypi.org/pypi" PYPI_TOP_PACKAGES = ( - "https://hugovk.github.io/top-pypi-packages/top-pypi-packages-{days}-days.json" + "https://hugovk.github.io/top-pypi-packages/top-pypi-packages-30-days.min.json" ) INTERNAL_BLACK_REPO = f"{tempfile.gettempdir()}/__black" ArchiveKind = Union[tarfile.TarFile, zipfile.ZipFile] -Days = Union[Literal[30], Literal[365]] subprocess.run = partial(subprocess.run, check=True) # type: ignore # https://github.com/python/mypy/issues/1484 @@ -64,8 +54,8 @@ def get_pypi_download_url(package: str, version: Optional[str]) -> str: return cast(str, source["url"]) -def get_top_packages(days: Days) -> List[str]: - with urlopen(PYPI_TOP_PACKAGES.format(days=days)) as page: +def get_top_packages() -> List[str]: + with urlopen(PYPI_TOP_PACKAGES) as page: result = json.load(page) return [package["project"] for package in result["rows"]] @@ -74,7 +64,7 @@ def get_top_packages(days: Days) -> List[str]: def get_package_source(package: str, version: Optional[str]) -> str: if package == "cpython": if version is None: - version = "master" + version = "main" return f"https://github.com/python/cpython/archive/{version}.zip" elif package == "pypy": if version is None: @@ -128,13 +118,12 @@ def get_package( def download_and_extract_top_packages( directory: Path, - days: Days = 365, workers: int = 8, limit: slice = DEFAULT_SLICE, ) -> Generator[Path, None, None]: with ThreadPoolExecutor(max_workers=workers) as executor: bound_downloader = partial(get_package, version=None, directory=directory) - for package in executor.map(bound_downloader, get_top_packages(days)[limit]): + for package in executor.map(bound_downloader, get_top_packages()[limit]): if package is not None: yield package @@ -248,9 +237,9 @@ def format_repos(repos: Tuple[Path, ...], options: Namespace) -> None: black_version=black_version, input_directory=options.input, ) - git_switch_branch("master", repo=repo) + git_switch_branch("main", repo=repo) - git_switch_branch("master", repo=options.black_repo) + git_switch_branch("main", repo=options.black_repo) def main() -> None: @@ -296,7 +285,7 @@ def main() -> None: type=Path, help="Output directory to download and put result artifacts.", ) - parser.add_argument("versions", nargs="*", default=("master",), help="") + parser.add_argument("versions", nargs="*", default=("main",), help="") options = parser.parse_args() repos = init_repos(options) diff --git a/mypy.ini b/mypy.ini index 589fbf6..4811cc0 100644 --- a/mypy.ini +++ b/mypy.ini @@ -3,35 +3,44 @@ # free to run mypy on Windows, Linux, or macOS and get consistent # results. python_version=3.6 -platform=linux + +mypy_path=src show_column_numbers=True +show_error_codes=True -# show error messages from unrelated files -follow_imports=normal +# be strict +strict=True -# suppress errors about unsatisfied imports -ignore_missing_imports=True +# except for... +no_implicit_reexport = False -# be strict -disallow_untyped_calls=True -warn_return_any=True -strict_optional=True -warn_no_return=True -warn_redundant_casts=True -warn_unused_ignores=True -# Until we're not supporting 3.6 primer needs this -disallow_any_generics=False - -# The following are off by default. Flip them on if you feel -# adventurous. -disallow_untyped_defs=True -check_untyped_defs=True - -# No incremental mode -cache_dir=/dev/null - -[mypy-aiohttp.*] -follow_imports=skip -[mypy-_version] -follow_imports=skip +# Unreachable blocks have been an issue when compiling mypyc, let's try +# to avoid 'em in the first place. +warn_unreachable=True + +[mypy-black] +# The following is because of `patch_click()`. Remove when +# we drop Python 3.6 support. +warn_unused_ignores=False + +[mypy-blib2to3.driver.*] +ignore_missing_imports = True + +[mypy-IPython.*] +ignore_missing_imports = True + +[mypy-colorama.*] +ignore_missing_imports = True + +[mypy-pathspec.*] +ignore_missing_imports = True + +[mypy-tokenize_rt.*] +ignore_missing_imports = True + +[mypy-uvloop.*] +ignore_missing_imports = True + +[mypy-_black_version.*] +ignore_missing_imports = True diff --git a/plugin/black.vim b/plugin/black.vim index b5edb2a..fb70424 100644 --- a/plugin/black.vim +++ b/plugin/black.vim @@ -15,6 +15,10 @@ " 1.2: " - use autoload script +if exists("g:load_black") + finish +endif + if v:version < 700 || !has('python3') func! __BLACK_MISSING() echo "The black.vim plugin requires vim7.0+ with Python 3.6 support." @@ -25,10 +29,6 @@ if v:version < 700 || !has('python3') finish endif -if exists("g:load_black") - finish -endif - let g:load_black = "py1.0" if !exists("g:black_virtualenv") if has("nvim") @@ -43,18 +43,41 @@ endif if !exists("g:black_linelength") let g:black_linelength = 88 endif -if !exists("g:black_string_normalization") - if exists("g:black_skip_string_normalization") - let g:black_string_normalization = !g:black_skip_string_normalization +if !exists("g:black_skip_string_normalization") + if exists("g:black_string_normalization") + let g:black_skip_string_normalization = !g:black_string_normalization + else + let g:black_skip_string_normalization = 0 + endif +endif +if !exists("g:black_skip_magic_trailing_comma") + if exists("g:black_magic_trailing_comma") + let g:black_skip_magic_trailing_comma = !g:black_magic_trailing_comma else - let g:black_string_normalization = 1 + let g:black_skip_magic_trailing_comma = 0 endif endif if !exists("g:black_quiet") let g:black_quiet = 0 endif +if !exists("g:black_target_version") + let g:black_target_version = "" +endif +if !exists("g:black_preview") + let g:black_preview = 0 +endif +function BlackComplete(ArgLead, CmdLine, CursorPos) + return [ +\ 'target_version=py27', +\ 'target_version=py36', +\ 'target_version=py37', +\ 'target_version=py38', +\ 'target_version=py39', +\ 'target_version=py310', +\ ] +endfunction -command! Black :call black#Black() +command! -nargs=* -complete=customlist,BlackComplete Black :call black#Black() command! BlackUpgrade :call black#BlackUpgrade() command! BlackVersion :call black#BlackVersion() diff --git a/pyproject.toml b/pyproject.toml index 7f632f2..554d7d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ [tool.black] line-length = 88 -target-version = ['py36', 'py37', 'py38'] +target-version = ['py37', 'py38'] include = '\.pyi?$' extend-exclude = ''' /( @@ -17,11 +17,200 @@ extend-exclude = ''' | profiling )/ ''' +# We use preview style for formatting Black itself. If you +# want stable formatting across releases, you should keep +# this off. +preview = true - -# Build system information below. +# Build system information and other project-specific configuration below. # NOTE: You don't need this in your own Black configuration. [build-system] -requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] -build-backend = "setuptools.build_meta" +requires = ["hatchling>=1.8.0", "hatch-vcs", "hatch-fancy-pypi-readme"] +build-backend = "hatchling.build" + +[project] +name = "black" +description = "The uncompromising code formatter." +license = "MIT" +requires-python = ">=3.7" +authors = [ + { name = "Łukasz Langa", email = "lukasz@langa.pl" }, +] +keywords = [ + "automation", + "autopep8", + "formatter", + "gofmt", + "pyfmt", + "rustfmt", + "yapf", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Software Development :: Quality Assurance", +] +dependencies = [ + "click>=8.0.0", + "mypy_extensions>=0.4.3", + "pathspec>=0.9.0", + "platformdirs>=2", + "tomli>=1.1.0; python_full_version < '3.11.0a7'", + "typed-ast>=1.4.2; python_version < '3.8' and implementation_name == 'cpython'", + "typing_extensions>=3.10.0.0; python_version < '3.10'", +] +dynamic = ["readme", "version"] + +[project.optional-dependencies] +colorama = ["colorama>=0.4.3"] +uvloop = ["uvloop>=0.15.2"] +d = [ + "aiohttp>=3.7.4", +] +jupyter = [ + "ipython>=7.8.0", + "tokenize-rt>=3.2.0", +] + +[project.scripts] +black = "black:patched_main" +blackd = "blackd:patched_main [d]" + +[project.urls] +Changelog = "https://github.com/psf/black/blob/main/CHANGES.md" +Homepage = "https://github.com/psf/black" + +[tool.hatch.metadata.hooks.fancy-pypi-readme] +content-type = "text/markdown" +fragments = [ + { path = "README.md" }, + { path = "CHANGES.md" }, +] + +[tool.hatch.version] +source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "src/_black_version.py" +template = ''' +version = "{version}" +''' + +[tool.hatch.build.targets.sdist] +exclude = ["/profiling"] + +[tool.hatch.build.targets.wheel] +only-include = ["src"] +sources = ["src"] + +[tool.hatch.build.targets.wheel.hooks.mypyc] +enable-by-default = false +dependencies = [ + "hatch-mypyc>=0.13.0", + "mypy==0.971", + # Required stubs to be removed when the packages support PEP 561 themselves + "types-typed-ast>=1.4.2", +] +require-runtime-dependencies = true +exclude = [ + # There's no good reason for blackd to be compiled. + "/src/blackd", + # Not performance sensitive, so save bytes + compilation time: + "/src/blib2to3/__init__.py", + "/src/blib2to3/pgen2/__init__.py", + "/src/black/output.py", + "/src/black/concurrency.py", + "/src/black/files.py", + "/src/black/report.py", + # Breaks the test suite when compiled (and is also useless): + "/src/black/debug.py", + # Compiled modules can't be run directly and that's a problem here: + "/src/black/__main__.py", +] +options = { debug_level = "0" } + +[tool.cibuildwheel] +build-verbosity = 1 +# So these are the environments we target: +# - Python: CPython 3.7+ only +# - Architecture (64-bit only): amd64 / x86_64, universal2, and arm64 +# - OS: Linux (no musl), Windows, and macOS +build = "cp3*-*" +skip = ["*-manylinux_i686", "*-musllinux_*", "*-win32", "pp-*"] +# This is the bare minimum needed to run the test suite. Pulling in the full +# test_requirements.txt would download a bunch of other packages not necessary +# here and would slow down the testing step a fair bit. +test-requires = ["pytest>=6.1.1"] +test-command = 'pytest {project} -k "not incompatible_with_mypyc"' +test-extras = ["d"," jupyter"] +# Skip trying to test arm64 builds on Intel Macs. (so cross-compilation doesn't +# straight up crash) +test-skip = ["*-macosx_arm64", "*-macosx_universal2:arm64"] + +[tool.cibuildwheel.environment] +HATCH_BUILD_HOOKS_ENABLE = "1" +MYPYC_OPT_LEVEL = "3" +MYPYC_DEBUG_LEVEL = "0" +# CPython 3.11 wheels aren't available for aiohttp and building a Cython extension +# from source also doesn't work. +AIOHTTP_NO_EXTENSIONS = "1" + +[tool.cibuildwheel.linux] +before-build = [ + "yum install -y clang gcc", +] + +[tool.cibuildwheel.linux.environment] +HATCH_BUILD_HOOKS_ENABLE = "1" +MYPYC_OPT_LEVEL = "3" +MYPYC_DEBUG_LEVEL = "0" +# Black needs Clang to compile successfully on Linux. +CC = "clang" +AIOHTTP_NO_EXTENSIONS = "1" + +[tool.isort] +atomic = true +profile = "black" +line_length = 88 +skip_gitignore = true +skip_glob = ["src/blib2to3", "tests/data", "profiling"] +known_first_party = ["black", "blib2to3", "blackd", "_black_version"] + +[tool.pytest.ini_options] +# Option below requires `tests/optional.py` +addopts = "--strict-config --strict-markers" +optional-tests = [ + "no_blackd: run when `d` extra NOT installed", + "no_jupyter: run when `jupyter` extra NOT installed", +] +markers = [ + "incompatible_with_mypyc: run when testing mypyc compiled black" +] +xfail_strict = true +filterwarnings = [ + "error", + # this is mitigated by a try/catch in https://github.com/psf/black/pull/2974/ + # this ignore can be removed when support for aiohttp 3.7 is dropped. + '''ignore:Decorator `@unittest_run_loop` is no longer needed in aiohttp 3\.8\+:DeprecationWarning''', + # this is mitigated by a try/catch in https://github.com/psf/black/pull/3198/ + # this ignore can be removed when support for aiohttp 3.x is dropped. + '''ignore:Middleware decorator is deprecated since 4\.0 and its behaviour is default, you can simply remove this decorator:DeprecationWarning''', + # this is mitigated by https://github.com/python/cpython/issues/79071 in python 3.8+ + # this ignore can be removed when support for 3.7 is dropped. + '''ignore:Bare functions are deprecated, use async ones:DeprecationWarning''', + # aiohttp is using deprecated cgi modules - Safe to remove when fixed: + # https://github.com/aio-libs/aiohttp/issues/6905 + '''ignore:'cgi' is deprecated and slated for removal in Python 3.13:DeprecationWarning''', +] diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index 1506503..0000000 --- a/readthedocs.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -python: - version: 3.8 - install: - - requirements: docs/requirements.txt - - method: setuptools - path: . diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/scripts/check_pre_commit_rev_in_example.py b/scripts/check_pre_commit_rev_in_example.py new file mode 100644 index 0000000..9560b3b --- /dev/null +++ b/scripts/check_pre_commit_rev_in_example.py @@ -0,0 +1,54 @@ +""" +Check that the rev value in the example pre-commit configuration matches +the latest version of Black. This saves us from forgetting to update that +during the release process. + +Why can't we just use `rev: stable` and call it a day? Well pre-commit +won't auto update the hook as you may expect (and for good reasons, some +technical and some pragmatic). Encouraging bad practice is also just +not ideal. xref: https://github.com/psf/black/issues/420 +""" + +import os +import sys + +import commonmark +import yaml +from bs4 import BeautifulSoup + + +def main(changes: str, source_version_control: str) -> None: + changes_html = commonmark.commonmark(changes) + changes_soup = BeautifulSoup(changes_html, "html.parser") + headers = changes_soup.find_all("h2") + latest_tag, *_ = [ + header.string for header in headers if header.string != "Unreleased" + ] + + source_version_control_html = commonmark.commonmark(source_version_control) + source_version_control_soup = BeautifulSoup( + source_version_control_html, "html.parser" + ) + pre_commit_repos = yaml.safe_load( + source_version_control_soup.find(class_="language-yaml").string + )["repos"] + + for repo in pre_commit_repos: + pre_commit_rev = repo["rev"] + if not pre_commit_rev == latest_tag: + print( + "Please set the rev in ``source_version_control.md`` to be the latest " + f"one.\nExpected {latest_tag}, got {pre_commit_rev}.\n" + ) + sys.exit(1) + + +if __name__ == "__main__": + with open("CHANGES.md", encoding="utf-8") as fd: + changes = fd.read() + with open( + os.path.join("docs", "integrations", "source_version_control.md"), + encoding="utf-8", + ) as fd: + source_version_control = fd.read() + main(changes, source_version_control) diff --git a/scripts/check_version_in_basics_example.py b/scripts/check_version_in_basics_example.py new file mode 100644 index 0000000..c62780d --- /dev/null +++ b/scripts/check_version_in_basics_example.py @@ -0,0 +1,47 @@ +""" +Check that the rev value in the example from ``the_basics.md`` matches +the latest version of Black. This saves us from forgetting to update that +during the release process. +""" + +import os +import sys + +import commonmark +from bs4 import BeautifulSoup + + +def main(changes: str, the_basics: str) -> None: + changes_html = commonmark.commonmark(changes) + changes_soup = BeautifulSoup(changes_html, "html.parser") + headers = changes_soup.find_all("h2") + tags = [header.string for header in headers if header.string != "Unreleased"] + latest_tag = tags[0] + + the_basics_html = commonmark.commonmark(the_basics) + the_basics_soup = BeautifulSoup(the_basics_html, "html.parser") + (version_example,) = [ + code_block.string + for code_block in the_basics_soup.find_all(class_="language-console") + if "$ black --version" in code_block.string + ] + + for tag in tags: + if tag in version_example and tag != latest_tag: + print( + "Please set the version in the ``black --version`` " + "example from ``the_basics.md`` to be the latest one.\n" + f"Expected {latest_tag}, got {tag}.\n" + ) + sys.exit(1) + + +if __name__ == "__main__": + with open("CHANGES.md", encoding="utf-8") as fd: + changes = fd.read() + with open( + os.path.join("docs", "usage_and_configuration", "the_basics.md"), + encoding="utf-8", + ) as fd: + the_basics = fd.read() + main(changes, the_basics) diff --git a/scripts/diff_shades_gha_helper.py b/scripts/diff_shades_gha_helper.py new file mode 100644 index 0000000..b5fea5a --- /dev/null +++ b/scripts/diff_shades_gha_helper.py @@ -0,0 +1,227 @@ +"""Helper script for psf/black's diff-shades Github Actions integration. + +diff-shades is a tool for analyzing what happens when you run Black on +OSS code capturing it for comparisons or other usage. It's used here to +help measure the impact of a change *before* landing it (in particular +posting a comment on completion for PRs). + +This script exists as a more maintainable alternative to using inline +Javascript in the workflow YAML files. The revision configuration and +resolving, caching, and PR comment logic is contained here. + +For more information, please see the developer docs: + +https://black.readthedocs.io/en/latest/contributing/gauging_changes.html#diff-shades +""" + +import json +import os +import platform +import pprint +import subprocess +import sys +import zipfile +from io import BytesIO +from pathlib import Path +from typing import Any + +import click +import urllib3 +from packaging.version import Version + +if sys.version_info >= (3, 8): + from typing import Final, Literal +else: + from typing_extensions import Final, Literal + +COMMENT_FILE: Final = ".pr-comment.json" +DIFF_STEP_NAME: Final = "Generate HTML diff report" +DOCS_URL: Final = ( + "https://black.readthedocs.io/en/latest/" + "contributing/gauging_changes.html#diff-shades" +) +USER_AGENT: Final = f"psf/black diff-shades workflow via urllib3/{urllib3.__version__}" +SHA_LENGTH: Final = 10 +GH_API_TOKEN: Final = os.getenv("GITHUB_TOKEN") +REPO: Final = os.getenv("GITHUB_REPOSITORY", default="psf/black") +http = urllib3.PoolManager() + + +def set_output(name: str, value: str) -> None: + if len(value) < 200: + print(f"[INFO]: setting '{name}' to '{value}'") + else: + print(f"[INFO]: setting '{name}' to [{len(value)} chars]") + print(f"::set-output name={name}::{value}") + + +def http_get(url: str, *, is_json: bool = True, **kwargs: Any) -> Any: + headers = kwargs.get("headers") or {} + headers["User-Agent"] = USER_AGENT + if "github" in url: + if GH_API_TOKEN: + headers["Authorization"] = f"token {GH_API_TOKEN}" + headers["Accept"] = "application/vnd.github.v3+json" + kwargs["headers"] = headers + + r = http.request("GET", url, **kwargs) + if is_json: + data = json.loads(r.data.decode("utf-8")) + else: + data = r.data + print(f"[INFO]: issued GET request for {r.geturl()}") + if not (200 <= r.status < 300): + pprint.pprint(dict(r.info())) + pprint.pprint(data) + raise RuntimeError(f"unexpected status code: {r.status}") + + return data + + +def get_main_revision() -> str: + data = http_get( + f"https://api.github.com/repos/{REPO}/commits", + fields={"per_page": "1", "sha": "main"}, + ) + assert isinstance(data[0]["sha"], str) + return data[0]["sha"] + + +def get_pr_revision(pr: int) -> str: + data = http_get(f"https://api.github.com/repos/{REPO}/pulls/{pr}") + assert isinstance(data["head"]["sha"], str) + return data["head"]["sha"] + + +def get_pypi_version() -> Version: + data = http_get("https://pypi.org/pypi/black/json") + versions = [Version(v) for v in data["releases"]] + sorted_versions = sorted(versions, reverse=True) + return sorted_versions[0] + + +@click.group() +def main() -> None: + pass + + +@main.command("config", help="Acquire run configuration and metadata.") +@click.argument("event", type=click.Choice(["push", "pull_request"])) +def config(event: Literal["push", "pull_request"]) -> None: + import diff_shades + + if event == "push": + jobs = [{"mode": "preview-changes", "force-flag": "--force-preview-style"}] + # Push on main, let's use PyPI Black as the baseline. + baseline_name = str(get_pypi_version()) + baseline_cmd = f"git checkout {baseline_name}" + target_rev = os.getenv("GITHUB_SHA") + assert target_rev is not None + target_name = "main-" + target_rev[:SHA_LENGTH] + target_cmd = f"git checkout {target_rev}" + + elif event == "pull_request": + jobs = [ + {"mode": "preview-changes", "force-flag": "--force-preview-style"}, + {"mode": "assert-no-changes", "force-flag": "--force-stable-style"}, + ] + # PR, let's use main as the baseline. + baseline_rev = get_main_revision() + baseline_name = "main-" + baseline_rev[:SHA_LENGTH] + baseline_cmd = f"git checkout {baseline_rev}" + pr_ref = os.getenv("GITHUB_REF") + assert pr_ref is not None + pr_num = int(pr_ref[10:-6]) + pr_rev = get_pr_revision(pr_num) + target_name = f"pr-{pr_num}-{pr_rev[:SHA_LENGTH]}" + target_cmd = f"gh pr checkout {pr_num} && git merge origin/main" + + env = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}" + for entry in jobs: + entry["baseline-analysis"] = f"{entry['mode']}-{baseline_name}.json" + entry["baseline-setup-cmd"] = baseline_cmd + entry["target-analysis"] = f"{entry['mode']}-{target_name}.json" + entry["target-setup-cmd"] = target_cmd + entry["baseline-cache-key"] = f"{env}-{baseline_name}-{entry['mode']}" + if event == "pull_request": + # These are only needed for the PR comment. + entry["baseline-sha"] = baseline_rev + entry["target-sha"] = pr_rev + + set_output("matrix", json.dumps(jobs, indent=None)) + pprint.pprint(jobs) + + +@main.command("comment-body", help="Generate the body for a summary PR comment.") +@click.argument("baseline", type=click.Path(exists=True, path_type=Path)) +@click.argument("target", type=click.Path(exists=True, path_type=Path)) +@click.argument("baseline-sha") +@click.argument("target-sha") +@click.argument("pr-num", type=int) +def comment_body( + baseline: Path, target: Path, baseline_sha: str, target_sha: str, pr_num: int +) -> None: + # fmt: off + cmd = [ + sys.executable, "-m", "diff_shades", "--no-color", + "compare", str(baseline), str(target), "--quiet", "--check" + ] + # fmt: on + proc = subprocess.run(cmd, stdout=subprocess.PIPE, encoding="utf-8") + if not proc.returncode: + body = ( + f"**diff-shades** reports zero changes comparing this PR ({target_sha}) to" + f" main ({baseline_sha}).\n\n---\n\n" + ) + else: + body = ( + f"**diff-shades** results comparing this PR ({target_sha}) to main" + f" ({baseline_sha}). The full diff is [available in the logs]" + f'($job-diff-url) under the "{DIFF_STEP_NAME}" step.' + ) + body += "\n```text\n" + proc.stdout.strip() + "\n```\n" + body += ( + f"[**What is this?**]({DOCS_URL}) | [Workflow run]($workflow-run-url) |" + " [diff-shades documentation](https://github.com/ichard26/diff-shades#readme)" + ) + print(f"[INFO]: writing comment details to {COMMENT_FILE}") + with open(COMMENT_FILE, "w", encoding="utf-8") as f: + json.dump({"body": body, "pr-number": pr_num}, f) + + +@main.command("comment-details", help="Get PR comment resources from a workflow run.") +@click.argument("run-id") +def comment_details(run_id: str) -> None: + data = http_get(f"https://api.github.com/repos/{REPO}/actions/runs/{run_id}") + if data["event"] != "pull_request" or data["conclusion"] == "cancelled": + set_output("needs-comment", "false") + return + + set_output("needs-comment", "true") + jobs = http_get(data["jobs_url"])["jobs"] + job = next(j for j in jobs if j["name"] == "analysis / preview-changes") + diff_step = next(s for s in job["steps"] if s["name"] == DIFF_STEP_NAME) + diff_url = job["html_url"] + f"#step:{diff_step['number']}:1" + + artifacts = http_get(data["artifacts_url"])["artifacts"] + comment_artifact = next(a for a in artifacts if a["name"] == COMMENT_FILE) + comment_url = comment_artifact["archive_download_url"] + comment_zip = BytesIO(http_get(comment_url, is_json=False)) + with zipfile.ZipFile(comment_zip) as zfile: + with zfile.open(COMMENT_FILE) as rf: + comment_data = json.loads(rf.read().decode("utf-8")) + + set_output("pr-number", str(comment_data["pr-number"])) + body = comment_data["body"] + # It's more convenient to fill in these fields after the first workflow is done + # since this command can access the workflows API (doing it in the main workflow + # while it's still in progress seems impossible). + body = body.replace("$workflow-run-url", data["html_url"]) + body = body.replace("$job-diff-url", diff_url) + # https://github.community/t/set-output-truncates-multiline-strings/16852/3 + escaped = body.replace("%", "%25").replace("\n", "%0A").replace("\r", "%0D") + set_output("comment-body", escaped) + + +if __name__ == "__main__": + main() diff --git a/fuzz.py b/scripts/fuzz.py similarity index 89% rename from fuzz.py rename to scripts/fuzz.py index a9ca8ef..25362c9 100644 --- a/fuzz.py +++ b/scripts/fuzz.py @@ -8,7 +8,8 @@ import re import hypothesmith -from hypothesis import HealthCheck, given, settings, strategies as st +from hypothesis import HealthCheck, given, settings +from hypothesis import strategies as st import black from blib2to3.pgen2.tokenize import TokenError @@ -32,7 +33,9 @@ black.FileMode, line_length=st.just(88) | st.integers(0, 200), string_normalization=st.booleans(), + preview=st.booleans(), is_pyi=st.booleans(), + magic_trailing_comma=st.booleans(), ), ) def test_idempotent_any_syntatically_valid_python( @@ -46,7 +49,7 @@ def test_idempotent_any_syntatically_valid_python( dst_contents = black.format_str(src_contents, mode=mode) except black.InvalidInput: # This is a bug - if it's valid Python code, as above, Black should be - # able to cope with it. See issues #970, #1012, #1358, and #1557. + # able to cope with it. See issues #970, #1012 # TODO: remove this try-except block when issues are resolved. return except TokenError as e: @@ -76,10 +79,14 @@ def test_idempotent_any_syntatically_valid_python( # (if you want only bounded fuzzing, just use `pytest fuzz.py`) try: import sys + import atheris except ImportError: pass else: test = test_idempotent_any_syntatically_valid_python - atheris.Setup(sys.argv, test.hypothesis.fuzz_one_input) + atheris.Setup( + sys.argv, + test.hypothesis.fuzz_one_input, # type: ignore[attr-defined] + ) atheris.Fuzz() diff --git a/scripts/migrate-black.py b/scripts/migrate-black.py new file mode 100755 index 0000000..ff52939 --- /dev/null +++ b/scripts/migrate-black.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# check out every commit added by the current branch, blackify them, +# and generate diffs to reconstruct the original commits, but then +# blackified +import logging +import os +import sys +from subprocess import PIPE, Popen, check_output, run + + +def git(*args: str) -> str: + return check_output(["git"] + list(args)).decode("utf8").strip() + + +def blackify(base_branch: str, black_command: str, logger: logging.Logger) -> int: + current_branch = git("branch", "--show-current") + + if not current_branch or base_branch == current_branch: + logger.error("You need to check out a feature branch to work on") + return 1 + + if not os.path.exists(".git"): + logger.error("Run me in the root of your repo") + return 1 + + merge_base = git("merge-base", "HEAD", base_branch) + if not merge_base: + logger.error( + "Could not find a common commit for current head and %s" % base_branch + ) + return 1 + + commits = git( + "log", "--reverse", "--pretty=format:%H", "%s~1..HEAD" % merge_base + ).split() + for commit in commits: + git("checkout", commit, "-b%s-black" % commit) + check_output(black_command, shell=True) + git("commit", "-aqm", "blackify") + + git("checkout", base_branch, "-b%s-black" % current_branch) + + for last_commit, commit in zip(commits, commits[1:]): + allow_empty = ( + b"--allow-empty" in run(["git", "apply", "-h"], stdout=PIPE).stdout + ) + quiet = b"--quiet" in run(["git", "apply", "-h"], stdout=PIPE).stdout + git_diff = Popen( + [ + "git", + "diff", + "--binary", + "--find-copies", + "%s-black..%s-black" % (last_commit, commit), + ], + stdout=PIPE, + ) + git_apply = Popen( + [ + "git", + "apply", + ] + + (["--quiet"] if quiet else []) + + [ + "-3", + "--intent-to-add", + ] + + (["--allow-empty"] if allow_empty else []) + + [ + "-", + ], + stdin=git_diff.stdout, + ) + if git_diff.stdout is not None: + git_diff.stdout.close() + git_apply.communicate() + git("commit", "--allow-empty", "-aqC", commit) + + for commit in commits: + git("branch", "-qD", "%s-black" % commit) + + return 0 + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("base_branch") + parser.add_argument("--black_command", default="black -q .") + parser.add_argument("--logfile", type=argparse.FileType("w"), default=sys.stdout) + args = parser.parse_args() + logger = logging.getLogger(__name__) + logger.addHandler(logging.StreamHandler(args.logfile)) + logger.setLevel(logging.INFO) + sys.exit(blackify(args.base_branch, args.black_command, logger)) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 55c66ad..0000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[options] -setup_requires = setuptools_scm diff --git a/setup.py b/setup.py deleted file mode 100644 index f1792a4..0000000 --- a/setup.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2020 Łukasz Langa -from setuptools import setup -import sys -import os - -assert sys.version_info >= (3, 6, 2), "black requires Python 3.6.2+" -from pathlib import Path # noqa E402 - -CURRENT_DIR = Path(__file__).parent -sys.path.insert(0, str(CURRENT_DIR)) # for setuptools.build_meta - - -def get_long_description() -> str: - return ( - (CURRENT_DIR / "README.md").read_text(encoding="utf8") - + "\n\n" - + (CURRENT_DIR / "CHANGES.md").read_text(encoding="utf8") - ) - - -USE_MYPYC = False -# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH -if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc": - sys.argv.pop(1) - USE_MYPYC = True -if os.getenv("BLACK_USE_MYPYC", None) == "1": - USE_MYPYC = True - -if USE_MYPYC: - mypyc_targets = [ - "src/black/__init__.py", - "src/blib2to3/pytree.py", - "src/blib2to3/pygram.py", - "src/blib2to3/pgen2/parse.py", - "src/blib2to3/pgen2/grammar.py", - "src/blib2to3/pgen2/token.py", - "src/blib2to3/pgen2/driver.py", - "src/blib2to3/pgen2/pgen.py", - ] - - from mypyc.build import mypycify - - opt_level = os.getenv("MYPYC_OPT_LEVEL", "3") - ext_modules = mypycify(mypyc_targets, opt_level=opt_level) -else: - ext_modules = [] - -setup( - name="black", - use_scm_version={ - "write_to": "src/_black_version.py", - "write_to_template": 'version = "{version}"\n', - }, - description="The uncompromising code formatter.", - long_description=get_long_description(), - long_description_content_type="text/markdown", - keywords="automation formatter yapf autopep8 pyfmt gofmt rustfmt", - author="Łukasz Langa", - author_email="lukasz@langa.pl", - url="https://github.com/psf/black", - project_urls={"Changelog": "https://github.com/psf/black/blob/master/CHANGES.md"}, - license="MIT", - py_modules=["_black_version"], - ext_modules=ext_modules, - packages=["blackd", "black", "blib2to3", "blib2to3.pgen2", "black_primer"], - package_dir={"": "src"}, - package_data={ - "blib2to3": ["*.txt"], - "black": ["py.typed"], - "black_primer": ["primer.json"], - }, - python_requires=">=3.6.2", - zip_safe=False, - install_requires=[ - "click>=7.1.2", - "appdirs", - "toml>=0.10.1", - "typed-ast>=1.4.2; python_version < '3.8'", - "regex>=2020.1.8", - "pathspec>=0.8.1, <1", - "dataclasses>=0.6; python_version < '3.7'", - "typing_extensions>=3.7.4; python_version < '3.8'", - "mypy_extensions>=0.4.3", - ], - extras_require={ - "d": ["aiohttp>=3.3.2", "aiohttp-cors"], - "colorama": ["colorama>=0.4.3"], - "python2": ["typed-ast>=1.4.2"], - }, - test_suite="tests.test_black", - classifiers=[ - "Development Status :: 4 - Beta", - "Environment :: Console", - "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3 :: Only", - "Topic :: Software Development :: Libraries :: Python Modules", - "Topic :: Software Development :: Quality Assurance", - ], - entry_points={ - "console_scripts": [ - "black=black:patched_main", - "blackd=blackd:patched_main [d]", - "black-primer=black_primer.cli:main", - ] - }, -) diff --git a/src/black/__init__.py b/src/black/__init__.py index 1c69cc4..afd71e5 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -1,165 +1,96 @@ -import ast -import asyncio -from abc import ABC, abstractmethod -from collections import defaultdict -from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor -from contextlib import contextmanager -from datetime import datetime -from enum import Enum -from functools import lru_cache, partial, wraps import io -import itertools -import logging -from multiprocessing import Manager, freeze_support -import os -from pathlib import Path -import pickle -import regex as re -import signal +import json +import platform +import re import sys -import tempfile import tokenize import traceback +from contextlib import contextmanager +from dataclasses import replace +from datetime import datetime +from enum import Enum +from json.decoder import JSONDecodeError +from pathlib import Path from typing import ( Any, - Callable, - Collection, Dict, Generator, - Generic, - Iterable, Iterator, List, + MutableMapping, Optional, Pattern, Sequence, Set, Sized, Tuple, - Type, - TypeVar, Union, - cast, - TYPE_CHECKING, ) -from mypy_extensions import mypyc_attr -from appdirs import user_cache_dir -from dataclasses import dataclass, field, replace import click -import toml - -try: - from typed_ast import ast3, ast27 -except ImportError: - if sys.version_info < (3, 8): - print( - "The typed_ast package is not installed.\n" - "You can install it with `python3 -m pip install typed-ast`.", - file=sys.stderr, - ) - sys.exit(1) - else: - ast3 = ast27 = ast - -from pathspec import PathSpec - -# lib2to3 fork -from blib2to3.pytree import Node, Leaf, type_repr -from blib2to3 import pygram, pytree -from blib2to3.pgen2 import driver, token -from blib2to3.pgen2.grammar import Grammar -from blib2to3.pgen2.parse import ParseError +from click.core import ParameterSource +from mypy_extensions import mypyc_attr +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError from _black_version import version as __version__ +from black.cache import Cache, get_cache_info, read_cache, write_cache +from black.comments import normalize_fmt_off +from black.const import ( + DEFAULT_EXCLUDES, + DEFAULT_INCLUDES, + DEFAULT_LINE_LENGTH, + STDIN_PLACEHOLDER, +) +from black.files import ( + find_project_root, + find_pyproject_toml, + find_user_pyproject_toml, + gen_python_files, + get_gitignore, + normalize_path_maybe_ignore, + parse_pyproject_toml, + wrap_stream_for_windows, +) +from black.handle_ipynb_magics import ( + PYTHON_CELL_MAGICS, + TRANSFORMED_MAGICS, + jupyter_dependencies_are_installed, + mask_cell, + put_trailing_semicolon_back, + remove_trailing_semicolon, + unmask_cell, +) +from black.linegen import LN, LineGenerator, transform_line +from black.lines import EmptyLineTracker, Line +from black.mode import ( + FUTURE_FLAG_TO_FEATURE, + VERSION_TO_FEATURES, + Feature, + Mode, + TargetVersion, + supports_feature, +) +from black.nodes import ( + STARS, + is_number_token, + is_simple_decorator_expression, + is_string_token, + syms, +) +from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out +from black.parsing import InvalidInput # noqa F401 +from black.parsing import lib2to3_parse, parse_ast, stringify_ast +from black.report import Changed, NothingChanged, Report +from black.trans import iter_fexpr_spans +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node -if sys.version_info < (3, 8): - from typing_extensions import Final -else: - from typing import Final - -if TYPE_CHECKING: - import colorama # noqa: F401 - -DEFAULT_LINE_LENGTH = 88 -DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 -DEFAULT_INCLUDES = r"\.pyi?$" -CACHE_DIR = Path(user_cache_dir("black", version=__version__)) -STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" - -STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. - +COMPILED = Path(__file__).suffix in (".pyd", ".so") # types FileContent = str Encoding = str NewLine = str -Depth = int -NodeType = int -ParserState = int -LeafID = int -StringID = int -Priority = int -Index = int -LN = Union[Leaf, Node] -Transformer = Callable[["Line", Collection["Feature"]], Iterator["Line"]] -Timestamp = float -FileSize = int -CacheInfo = Tuple[Timestamp, FileSize] -Cache = Dict[str, CacheInfo] -out = partial(click.secho, bold=True, err=True) -err = partial(click.secho, fg="red", err=True) - -pygram.initialize(CACHE_DIR) -syms = pygram.python_symbols - - -class NothingChanged(UserWarning): - """Raised when reformatted code is the same as source.""" - - -class CannotTransform(Exception): - """Base class for errors raised by Transformers.""" - - -class CannotSplit(CannotTransform): - """A readable split that fits the allotted line length is impossible.""" - - -class InvalidInput(ValueError): - """Raised when input source code fails all parse attempts.""" - - -class BracketMatchError(KeyError): - """Raised when an opening bracket is unable to be matched to a closing bracket.""" - - -T = TypeVar("T") -E = TypeVar("E", bound=Exception) - - -class Ok(Generic[T]): - def __init__(self, value: T) -> None: - self._value = value - - def ok(self) -> T: - return self._value - - -class Err(Generic[E]): - def __init__(self, e: E) -> None: - self._e = e - - def err(self) -> E: - return self._e - - -# The 'Result' return type is used to implement an error-handling model heavily -# influenced by that used by the Rust programming language -# (see https://doc.rust-lang.org/book/ch09-00-error-handling.html). -Result = Union[Ok[T], Err[E]] -TResult = Result[T, CannotTransform] # (T)ransform Result -TMatchResult = TResult[Index] class WriteBack(Enum): @@ -182,158 +113,10 @@ def from_configuration( return cls.DIFF if diff else cls.YES -class Changed(Enum): - NO = 0 - CACHED = 1 - YES = 2 - - -class TargetVersion(Enum): - PY27 = 2 - PY33 = 3 - PY34 = 4 - PY35 = 5 - PY36 = 6 - PY37 = 7 - PY38 = 8 - PY39 = 9 - - def is_python2(self) -> bool: - return self is TargetVersion.PY27 - - -class Feature(Enum): - # All string literals are unicode - UNICODE_LITERALS = 1 - F_STRINGS = 2 - NUMERIC_UNDERSCORES = 3 - TRAILING_COMMA_IN_CALL = 4 - TRAILING_COMMA_IN_DEF = 5 - # The following two feature-flags are mutually exclusive, and exactly one should be - # set for every version of python. - ASYNC_IDENTIFIERS = 6 - ASYNC_KEYWORDS = 7 - ASSIGNMENT_EXPRESSIONS = 8 - POS_ONLY_ARGUMENTS = 9 - RELAXED_DECORATORS = 10 - FORCE_OPTIONAL_PARENTHESES = 50 - - -VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { - TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY35: { - Feature.UNICODE_LITERALS, - Feature.TRAILING_COMMA_IN_CALL, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY36: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY37: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - }, - TargetVersion.PY38: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - Feature.ASSIGNMENT_EXPRESSIONS, - Feature.POS_ONLY_ARGUMENTS, - }, - TargetVersion.PY39: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - Feature.ASSIGNMENT_EXPRESSIONS, - Feature.RELAXED_DECORATORS, - Feature.POS_ONLY_ARGUMENTS, - }, -} - - -@dataclass -class Mode: - target_versions: Set[TargetVersion] = field(default_factory=set) - line_length: int = DEFAULT_LINE_LENGTH - string_normalization: bool = True - is_pyi: bool = False - magic_trailing_comma: bool = True - experimental_string_processing: bool = False - - def get_cache_key(self) -> str: - if self.target_versions: - version_str = ",".join( - str(version.value) - for version in sorted(self.target_versions, key=lambda v: v.value) - ) - else: - version_str = "-" - parts = [ - version_str, - str(self.line_length), - str(int(self.string_normalization)), - str(int(self.is_pyi)), - str(int(self.magic_trailing_comma)), - str(int(self.experimental_string_processing)), - ] - return ".".join(parts) - - # Legacy name, left for integrations. FileMode = Mode -def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: - return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) - - -def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: - """Find the absolute filepath to a pyproject.toml if it exists""" - path_project_root = find_project_root(path_search_start) - path_pyproject_toml = path_project_root / "pyproject.toml" - if path_pyproject_toml.is_file(): - return str(path_pyproject_toml) - - try: - path_user_pyproject_toml = find_user_pyproject_toml() - return ( - str(path_user_pyproject_toml) - if path_user_pyproject_toml.is_file() - else None - ) - except PermissionError as e: - # We do not have access to the user-level config directory, so ignore it. - err(f"Ignoring user configuration directory due to {e!r}") - return None - - -def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: - """Parse a pyproject toml file, pulling out relevant parts for Black - - If parsing fails, will raise a toml.TomlDecodeError - """ - pyproject_toml = toml.load(path_config) - config = pyproject_toml.get("tool", {}).get("black", {}) - return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} - - def read_pyproject_toml( ctx: click.Context, param: click.Parameter, value: Optional[str] ) -> Optional[str]: @@ -349,10 +132,10 @@ def read_pyproject_toml( try: config = parse_pyproject_toml(value) - except (toml.TomlDecodeError, OSError) as e: + except (OSError, ValueError) as e: raise click.FileError( filename=value, hint=f"Error reading configuration file: {e}" - ) + ) from None if not config: return None @@ -391,18 +174,34 @@ def target_version_option_callback( return [TargetVersion[val.upper()] for val in v] +def re_compile_maybe_verbose(regex: str) -> Pattern[str]: + """Compile a regular expression string in `regex`. + + If it contains newlines, use verbose mode. + """ + if "\n" in regex: + regex = "(?x)" + regex + compiled: Pattern[str] = re.compile(regex) + return compiled + + def validate_regex( ctx: click.Context, param: click.Parameter, value: Optional[str], -) -> Optional[Pattern]: +) -> Optional[Pattern[str]]: try: return re_compile_maybe_verbose(value) if value is not None else None - except re.error: - raise click.BadParameter("Not a valid regular expression") + except re.error as e: + raise click.BadParameter(f"Not a valid regular expression: {e}") from None -@click.command(context_settings=dict(help_option_names=["-h", "--help"])) +@click.command( + context_settings={"help_option_names": ["-h", "--help"]}, + # While Click does set this field automatically using the docstring, mypyc + # (annoyingly) strips 'em so we need to set it here too. + help="The uncompromising code formatter.", +) @click.option("-c", "--code", type=str, help="Format the code passed in as a string.") @click.option( "-l", @@ -431,6 +230,30 @@ def validate_regex( " when piping source on standard input)." ), ) +@click.option( + "--ipynb", + is_flag=True, + help=( + "Format all input files like Jupyter Notebooks regardless of file extension " + "(useful when piping source on standard input)." + ), +) +@click.option( + "--python-cell-magics", + multiple=True, + help=( + "When processing Jupyter Notebooks, add the given magic to the list" + f" of known python-magics ({', '.join(PYTHON_CELL_MAGICS)})." + " Useful for formatting cells with custom python magics." + ), + default=[], +) +@click.option( + "-x", + "--skip-source-first-line", + is_flag=True, + help="Skip the first line of the source code.", +) @click.option( "-S", "--skip-string-normalization", @@ -447,9 +270,14 @@ def validate_regex( "--experimental-string-processing", is_flag=True, hidden=True, + help="(DEPRECATED and now included in --preview) Normalize string literals.", +) +@click.option( + "--preview", + is_flag=True, help=( - "Experimental option that performs more normalization on string literals." - " Currently disabled because it leads to some crashes." + "Enable potentially disruptive style changes that may be added to Black's main" + " functionality in the next major release." ), ) @click.option( @@ -476,6 +304,15 @@ def validate_regex( is_flag=True, help="If --fast given, skip temporary sanity checks. [default: --safe]", ) +@click.option( + "--required-version", + type=str, + help=( + "Require a specific version of Black to be running (useful for unifying results" + " across many environments e.g. with a pyproject.toml file). It can be" + " either a major version number or an exact version." + ), +) @click.option( "--include", type=str, @@ -492,15 +329,15 @@ def validate_regex( @click.option( "--exclude", type=str, - default=DEFAULT_EXCLUDES, callback=validate_regex, help=( "A regular expression that matches files and directories that should be" " excluded on recursive searches. An empty value means no paths are excluded." " Use forward slashes for directories on all platforms (Windows, too)." - " Exclusions are calculated first, inclusions later." + " Exclusions are calculated first, inclusions later. [default:" + f" {DEFAULT_EXCLUDES}]" ), - show_default=True, + show_default=False, ) @click.option( "--extend-exclude", @@ -529,6 +366,13 @@ def validate_regex( "editors that rely on using stdin." ), ) +@click.option( + "-W", + "--workers", + type=click.IntRange(min=1), + default=None, + help="Number of parallel workers [default: number of CPUs in the system]", +) @click.option( "-q", "--quiet", @@ -547,7 +391,13 @@ def validate_regex( " due to exclusion patterns." ), ) -@click.version_option(version=__version__) +@click.version_option( + version=__version__, + message=( + f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n" + f"Python ({platform.python_implementation()}) {platform.python_version()}" + ), +) @click.argument( "src", nargs=-1, @@ -555,6 +405,7 @@ def validate_regex( exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True ), is_eager=True, + metavar="SRC ...", ) @click.option( "--config", @@ -571,7 +422,7 @@ def validate_regex( help="Read configuration from FILE path.", ) @click.pass_context -def main( +def main( # noqa: C901 ctx: click.Context, code: Optional[str], line_length: int, @@ -581,20 +432,98 @@ def main( color: bool, fast: bool, pyi: bool, + ipynb: bool, + python_cell_magics: Sequence[str], + skip_source_first_line: bool, skip_string_normalization: bool, skip_magic_trailing_comma: bool, experimental_string_processing: bool, + preview: bool, quiet: bool, verbose: bool, - include: Pattern, - exclude: Pattern, - extend_exclude: Optional[Pattern], - force_exclude: Optional[Pattern], + required_version: Optional[str], + include: Pattern[str], + exclude: Optional[Pattern[str]], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], stdin_filename: Optional[str], + workers: Optional[int], src: Tuple[str, ...], config: Optional[str], ) -> None: """The uncompromising code formatter.""" + ctx.ensure_object(dict) + + if src and code is not None: + out( + main.get_usage(ctx) + + "\n\n'SRC' and 'code' cannot be passed simultaneously." + ) + ctx.exit(1) + if not src and code is None: + out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.") + ctx.exit(1) + + root, method = ( + find_project_root(src, stdin_filename) if code is None else (None, None) + ) + ctx.obj["root"] = root + + if verbose: + if root: + out( + f"Identified `{root}` as project root containing a {method}.", + fg="blue", + ) + + normalized = [ + (source, source) + if source == "-" + else (normalize_path_maybe_ignore(Path(source), root), source) + for source in src + ] + srcs_string = ", ".join( + [ + f'"{_norm}"' + if _norm + else f'\033[31m"{source} (skipping - invalid)"\033[34m' + for _norm, source in normalized + ] + ) + out(f"Sources to be formatted: {srcs_string}", fg="blue") + + if config: + config_source = ctx.get_parameter_source("config") + user_level_config = str(find_user_pyproject_toml()) + if config == user_level_config: + out( + "Using configuration from user-level config at " + f"'{user_level_config}'.", + fg="blue", + ) + elif config_source in ( + ParameterSource.DEFAULT, + ParameterSource.DEFAULT_MAP, + ): + out("Using configuration from project root.", fg="blue") + else: + out(f"Using configuration in '{config}'.", fg="blue") + + error_msg = "Oh no! 💥 💔 💥" + if ( + required_version + and required_version != __version__ + and required_version != __version__.split(".")[0] + ): + err( + f"{error_msg} The required version `{required_version}` does not match" + f" the running version `{__version__}`!" + ) + ctx.exit(1) + if ipynb and pyi: + err("Cannot pass both `pyi` and `ipynb` flags!") + ctx.exit(1) + write_back = WriteBack.from_configuration(check=check, diff=diff, color=color) if target_version: versions = set(target_version) @@ -605,53 +534,77 @@ def main( target_versions=versions, line_length=line_length, is_pyi=pyi, + is_ipynb=ipynb, + skip_source_first_line=skip_source_first_line, string_normalization=not skip_string_normalization, magic_trailing_comma=not skip_magic_trailing_comma, experimental_string_processing=experimental_string_processing, + preview=preview, + python_cell_magics=set(python_cell_magics), ) - if config and verbose: - out(f"Using configuration from {config}.", bold=False, fg="blue") + if code is not None: - print(format_str(code, mode=mode)) - ctx.exit(0) - report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) - sources = get_sources( - ctx=ctx, - src=src, - quiet=quiet, - verbose=verbose, - include=include, - exclude=exclude, - extend_exclude=extend_exclude, - force_exclude=force_exclude, - report=report, - stdin_filename=stdin_filename, - ) + # Run in quiet mode by default with -c; the extra output isn't useful. + # You can still pass -v to get verbose output. + quiet = True - path_empty( - sources, - "No Python files are present to be formatted. Nothing to do 😴", - quiet, - verbose, - ctx, - ) + report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose) - if len(sources) == 1: - reformat_one( - src=sources.pop(), - fast=fast, - write_back=write_back, - mode=mode, - report=report, + if code is not None: + reformat_code( + content=code, fast=fast, write_back=write_back, mode=mode, report=report ) else: - reformat_many( - sources=sources, fast=fast, write_back=write_back, mode=mode, report=report + try: + sources = get_sources( + ctx=ctx, + src=src, + quiet=quiet, + verbose=verbose, + include=include, + exclude=exclude, + extend_exclude=extend_exclude, + force_exclude=force_exclude, + report=report, + stdin_filename=stdin_filename, + ) + except GitWildMatchPatternError: + ctx.exit(1) + + path_empty( + sources, + "No Python files are present to be formatted. Nothing to do 😴", + quiet, + verbose, + ctx, ) + if len(sources) == 1: + reformat_one( + src=sources.pop(), + fast=fast, + write_back=write_back, + mode=mode, + report=report, + ) + else: + from black.concurrency import reformat_many + + reformat_many( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + workers=workers, + ) + if verbose or not quiet: - out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨") - click.secho(str(report), err=True) + if code is None and (verbose or report.change_count or report.failure_count): + out() + out(error_msg if report.return_code else "All done! ✨ 🍰 ✨") + if code is None: + click.echo(str(report), err=True) ctx.exit(report.return_code) @@ -662,18 +615,15 @@ def get_sources( quiet: bool, verbose: bool, include: Pattern[str], - exclude: Pattern[str], + exclude: Optional[Pattern[str]], extend_exclude: Optional[Pattern[str]], force_exclude: Optional[Pattern[str]], report: "Report", stdin_filename: Optional[str], ) -> Set[Path]: """Compute the set of files to be formatted.""" - - root = find_project_root(src) sources: Set[Path] = set() - path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx) - gitignore = get_gitignore(root) + root = ctx.obj["root"] for s in src: if s == "-" and stdin_filename: @@ -684,7 +634,7 @@ def get_sources( is_stdin = False if is_stdin or p.is_file(): - normalized_path = normalize_path_maybe_ignore(p, root, report) + normalized_path = normalize_path_maybe_ignore(p, ctx.obj["root"], report) if normalized_path is None: continue @@ -701,18 +651,35 @@ def get_sources( if is_stdin: p = Path(f"{STDIN_PLACEHOLDER}{str(p)}") + if p.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + sources.add(p) elif p.is_dir(): + if exclude is None: + exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) + gitignore = get_gitignore(root) + p_gitignore = get_gitignore(p) + # No need to use p's gitignore if it is identical to root's gitignore + # (i.e. root and p point to the same directory). + if gitignore != p_gitignore: + gitignore += p_gitignore + else: + gitignore = None sources.update( gen_python_files( p.iterdir(), - root, + ctx.obj["root"], include, exclude, extend_exclude, force_exclude, report, gitignore, + verbose=verbose, + quiet=quiet, ) ) elif s == "-": @@ -728,11 +695,39 @@ def path_empty( """ Exit if there is no `src` provided for formatting """ - if not src and (verbose or not quiet): - out(msg) + if not src: + if verbose or not quiet: + out(msg) ctx.exit(0) +def reformat_code( + content: str, fast: bool, write_back: WriteBack, mode: Mode, report: Report +) -> None: + """ + Reformat and print out `content` without spawning child processes. + Similar to `reformat_one`, but for string content. + + `fast`, `write_back`, and `mode` options are passed to + :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. + """ + path = Path("") + try: + changed = Changed.NO + if format_stdin_to_stdout( + content=content, fast=fast, write_back=write_back, mode=mode + ): + changed = Changed.YES + report.done(path, changed) + except Exception as exc: + if report.verbose: + traceback.print_exc() + report.failed(path, str(exc)) + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) def reformat_one( src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" ) -> None: @@ -755,6 +750,10 @@ def reformat_one( is_stdin = False if is_stdin: + if src.suffix == ".pyi": + mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): changed = Changed.YES else: @@ -780,114 +779,6 @@ def reformat_one( report.failed(src, str(exc)) -def reformat_many( - sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report" -) -> None: - """Reformat multiple files using a ProcessPoolExecutor.""" - executor: Executor - loop = asyncio.get_event_loop() - worker_count = os.cpu_count() - if sys.platform == "win32": - # Work around https://bugs.python.org/issue26903 - worker_count = min(worker_count, 60) - try: - executor = ProcessPoolExecutor(max_workers=worker_count) - except (ImportError, OSError): - # we arrive here if the underlying system does not support multi-processing - # like in AWS Lambda or Termux, in which case we gracefully fallback to - # a ThreadPoolExecutor with just a single worker (more workers would not do us - # any good due to the Global Interpreter Lock) - executor = ThreadPoolExecutor(max_workers=1) - - try: - loop.run_until_complete( - schedule_formatting( - sources=sources, - fast=fast, - write_back=write_back, - mode=mode, - report=report, - loop=loop, - executor=executor, - ) - ) - finally: - shutdown(loop) - if executor is not None: - executor.shutdown() - - -async def schedule_formatting( - sources: Set[Path], - fast: bool, - write_back: WriteBack, - mode: Mode, - report: "Report", - loop: asyncio.AbstractEventLoop, - executor: Executor, -) -> None: - """Run formatting of `sources` in parallel using the provided `executor`. - - (Use ProcessPoolExecutors for actual parallelism.) - - `write_back`, `fast`, and `mode` options are passed to - :func:`format_file_in_place`. - """ - cache: Cache = {} - if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): - cache = read_cache(mode) - sources, cached = filter_cached(cache, sources) - for src in sorted(cached): - report.done(src, Changed.CACHED) - if not sources: - return - - cancelled = [] - sources_to_cache = [] - lock = None - if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): - # For diff output, we need locks to ensure we don't interleave output - # from different processes. - manager = Manager() - lock = manager.Lock() - tasks = { - asyncio.ensure_future( - loop.run_in_executor( - executor, format_file_in_place, src, fast, mode, write_back, lock - ) - ): src - for src in sorted(sources) - } - pending = tasks.keys() - try: - loop.add_signal_handler(signal.SIGINT, cancel, pending) - loop.add_signal_handler(signal.SIGTERM, cancel, pending) - except NotImplementedError: - # There are no good alternatives for these on Windows. - pass - while pending: - done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) - for task in done: - src = tasks.pop(task) - if task.cancelled(): - cancelled.append(task) - elif task.exception(): - report.failed(src, str(task.exception())) - else: - changed = Changed.YES if task.result() else Changed.NO - # If the file was written back or was successfully checked as - # well-formatted, store this information in the cache. - if write_back is WriteBack.YES or ( - write_back is WriteBack.CHECK and changed is Changed.NO - ): - sources_to_cache.append(src) - report.done(src, changed) - if cancelled: - await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) - if sources_to_cache: - write_cache(cache, sources_to_cache, mode) - - def format_file_in_place( src: Path, fast: bool, @@ -903,14 +794,25 @@ def format_file_in_place( """ if src.suffix == ".pyi": mode = replace(mode, is_pyi=True) + elif src.suffix == ".ipynb": + mode = replace(mode, is_ipynb=True) then = datetime.utcfromtimestamp(src.stat().st_mtime) + header = b"" with open(src, "rb") as buf: + if mode.skip_source_first_line: + header = buf.readline() src_contents, encoding, newline = decode_bytes(buf.read()) try: dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) except NothingChanged: return False + except JSONDecodeError: + raise ValueError( + f"File '{src}' cannot be parsed as valid Jupyter notebook." + ) from None + src_contents = header.decode(encoding) + src_contents + dst_contents = header.decode(encoding) + dst_contents if write_back == WriteBack.YES: with open(src, "w", encoding=encoding, newline=newline) as f: @@ -919,7 +821,10 @@ def format_file_in_place( now = datetime.utcnow() src_name = f"{src}\t{then} +0000" dst_name = f"{src}\t{now} +0000" - diff_contents = diff(src_contents, dst_contents, src_name, dst_name) + if mode.is_ipynb: + diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name) + else: + diff_contents = diff(src_contents, dst_contents, src_name, dst_name) if write_back == WriteBack.COLOR_DIFF: diff_contents = color_diff(diff_contents) @@ -938,53 +843,28 @@ def format_file_in_place( return True -def color_diff(contents: str) -> str: - """Inject the ANSI color codes to the diff.""" - lines = contents.split("\n") - for i, line in enumerate(lines): - if line.startswith("+++") or line.startswith("---"): - line = "\033[1;37m" + line + "\033[0m" # bold white, reset - elif line.startswith("@@"): - line = "\033[36m" + line + "\033[0m" # cyan, reset - elif line.startswith("+"): - line = "\033[32m" + line + "\033[0m" # green, reset - elif line.startswith("-"): - line = "\033[31m" + line + "\033[0m" # red, reset - lines[i] = line - return "\n".join(lines) - - -def wrap_stream_for_windows( - f: io.TextIOWrapper, -) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: - """ - Wrap stream with colorama's wrap_stream so colors are shown on Windows. - - If `colorama` is unavailable, the original stream is returned unmodified. - Otherwise, the `wrap_stream()` function determines whether the stream needs - to be wrapped for a Windows environment and will accordingly either return - an `AnsiToWin32` wrapper or the original stream. - """ - try: - from colorama.initialise import wrap_stream - except ImportError: - return f - else: - # Set `strip=False` to avoid needing to modify test_express_diff_with_color. - return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) - - def format_stdin_to_stdout( - fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: Mode + fast: bool, + *, + content: Optional[str] = None, + write_back: WriteBack = WriteBack.NO, + mode: Mode, ) -> bool: """Format file on stdin. Return True if changed. + If content is None, it's read from sys.stdin. + If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, write a diff to stdout. The `mode` argument is passed to :func:`format_file_contents`. """ then = datetime.utcnow() - src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + + if content is None: + src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) + else: + src, encoding, newline = content, "utf-8", "" + dst = src try: dst = format_file_contents(src, fast=fast, mode=mode) @@ -998,6 +878,9 @@ def format_stdin_to_stdout( sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True ) if write_back == WriteBack.YES: + # Make sure there's a newline after the content + if dst and dst[-1] != "\n": + dst += "\n" f.write(dst) elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): now = datetime.utcnow() @@ -1011,6 +894,19 @@ def format_stdin_to_stdout( f.detach() +def check_stability_and_equivalence( + src_contents: str, dst_contents: str, *, mode: Mode +) -> None: + """Perform stability and equivalence checks. + + Raise AssertionError if source and destination contents are not + equivalent, or if a second pass of the formatter would format the + content differently. + """ + assert_equivalent(src_contents, dst_contents) + assert_stable(src_contents, dst_contents, mode=mode) + + def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: """Reformat contents of a file and return new contents. @@ -1021,27 +917,124 @@ def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileCo if not src_contents.strip(): raise NothingChanged - dst_contents = format_str(src_contents, mode=mode) + if mode.is_ipynb: + dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode) + else: + dst_contents = format_str(src_contents, mode=mode) if src_contents == dst_contents: raise NothingChanged - if not fast: - assert_equivalent(src_contents, dst_contents) - - # Forced second pass to work around optional trailing commas (becoming - # forced trailing commas on pass 2) interacting differently with optional - # parentheses. Admittedly ugly. - dst_contents_pass2 = format_str(dst_contents, mode=mode) - if dst_contents != dst_contents_pass2: - dst_contents = dst_contents_pass2 - assert_equivalent(src_contents, dst_contents, pass_num=2) - assert_stable(src_contents, dst_contents, mode=mode) - # Note: no need to explicitly call `assert_stable` if `dst_contents` was - # the same as `dst_contents_pass2`. + if not fast and not mode.is_ipynb: + # Jupyter notebooks will already have been checked above. + check_stability_and_equivalence(src_contents, dst_contents, mode=mode) return dst_contents -def format_str(src_contents: str, *, mode: Mode) -> FileContent: +def validate_cell(src: str, mode: Mode) -> None: + """Check that cell does not already contain TransformerManager transformations, + or non-Python cell magics, which might cause tokenizer_rt to break because of + indentations. + + If a cell contains ``!ls``, then it'll be transformed to + ``get_ipython().system('ls')``. However, if the cell originally contained + ``get_ipython().system('ls')``, then it would get transformed in the same way: + + >>> TransformerManager().transform_cell("get_ipython().system('ls')") + "get_ipython().system('ls')\n" + >>> TransformerManager().transform_cell("!ls") + "get_ipython().system('ls')\n" + + Due to the impossibility of safely roundtripping in such situations, cells + containing transformed magics will be ignored. + """ + if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS): + raise NothingChanged + if ( + src[:2] == "%%" + and src.split()[0][2:] not in PYTHON_CELL_MAGICS | mode.python_cell_magics + ): + raise NothingChanged + + +def format_cell(src: str, *, fast: bool, mode: Mode) -> str: + """Format code in given cell of Jupyter notebook. + + General idea is: + + - if cell has trailing semicolon, remove it; + - if cell has IPython magics, mask them; + - format cell; + - reinstate IPython magics; + - reinstate trailing semicolon (if originally present); + - strip trailing newlines. + + Cells with syntax errors will not be processed, as they + could potentially be automagics or multi-line magics, which + are currently not supported. + """ + validate_cell(src, mode) + src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon( + src + ) + try: + masked_src, replacements = mask_cell(src_without_trailing_semicolon) + except SyntaxError: + raise NothingChanged from None + masked_dst = format_str(masked_src, mode=mode) + if not fast: + check_stability_and_equivalence(masked_src, masked_dst, mode=mode) + dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements) + dst = put_trailing_semicolon_back( + dst_without_trailing_semicolon, has_trailing_semicolon + ) + dst = dst.rstrip("\n") + if dst == src: + raise NothingChanged from None + return dst + + +def validate_metadata(nb: MutableMapping[str, Any]) -> None: + """If notebook is marked as non-Python, don't format it. + + All notebook metadata fields are optional, see + https://nbformat.readthedocs.io/en/latest/format_description.html. So + if a notebook has empty metadata, we will try to parse it anyway. + """ + language = nb.get("metadata", {}).get("language_info", {}).get("name", None) + if language is not None and language != "python": + raise NothingChanged from None + + +def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent: + """Format Jupyter notebook. + + Operate cell-by-cell, only on code cells, only for Python notebooks. + If the ``.ipynb`` originally had a trailing newline, it'll be preserved. + """ + trailing_newline = src_contents[-1] == "\n" + modified = False + nb = json.loads(src_contents) + validate_metadata(nb) + for cell in nb["cells"]: + if cell.get("cell_type", None) == "code": + try: + src = "".join(cell["source"]) + dst = format_cell(src, fast=fast, mode=mode) + except NothingChanged: + pass + else: + cell["source"] = dst.splitlines(keepends=True) + modified = True + if modified: + dst_contents = json.dumps(nb, indent=1, ensure_ascii=False) + if trailing_newline: + dst_contents = dst_contents + "\n" + return dst_contents + else: + raise NothingChanged + + +def format_str(src_contents: str, *, mode: Mode) -> str: """Reformat a string and return new contents. `mode` determines formatting options, such as how many characters per line are @@ -1071,19 +1064,26 @@ def f( hey """ + dst_contents = _format_str_once(src_contents, mode=mode) + # Forced second pass to work around optional trailing commas (becoming + # forced trailing commas on pass 2) interacting differently with optional + # parentheses. Admittedly ugly. + if src_contents != dst_contents: + return _format_str_once(dst_contents, mode=mode) + return dst_contents + + +def _format_str_once(src_contents: str, *, mode: Mode) -> str: src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) dst_contents = [] - future_imports = get_future_imports(src_node) if mode.target_versions: versions = mode.target_versions else: - versions = detect_target_versions(src_node) - normalize_fmt_off(src_node) - lines = LineGenerator( - mode=mode, - remove_u_prefix="unicode_literals" in future_imports - or supports_feature(versions, Feature.UNICODE_LITERALS), - ) + future_imports = get_future_imports(src_node) + versions = detect_target_versions(src_node, future_imports=future_imports) + + normalize_fmt_off(src_node, preview=mode.preview) + lines = LineGenerator(mode=mode) elt = EmptyLineTracker(is_pyi=mode.is_pyi) empty_line = Line(mode=mode) after = 0 @@ -1120,4881 +1120,51 @@ def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: return tiow.read(), encoding, newline -def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: - if not target_versions: - # No target_version specified, so try all grammars. - return [ - # Python 3.7+ - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, - # Python 3.0-3.6 - pygram.python_grammar_no_print_statement_no_exec_statement, - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - if all(version.is_python2() for version in target_versions): - # Python 2-only code, so try Python 2 grammars. - return [ - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - # Python 3-compatible code, so only try Python 3 grammar. - grammars = [] - # If we have to parse both, try to parse async as a keyword first - if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS): - # Python 3.7+ - grammars.append( - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords - ) - if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): - # Python 3.0-3.6 - grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) - # At least one of the above branches must have been taken, because every Python - # version has exactly one of the two 'ASYNC_*' flags - return grammars - - -def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: - """Given a string with source, return the lib2to3 Node.""" - if not src_txt.endswith("\n"): - src_txt += "\n" - - for grammar in get_grammars(set(target_versions)): - drv = driver.Driver(grammar, pytree.convert) - try: - result = drv.parse_string(src_txt, True) - break - - except ParseError as pe: - lineno, column = pe.context[1] - lines = src_txt.splitlines() - try: - faulty_line = lines[lineno - 1] - except IndexError: - faulty_line = "" - exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") - else: - raise exc from None - - if isinstance(result, Leaf): - result = Node(syms.file_input, [result]) - return result - - -def lib2to3_unparse(node: Node) -> str: - """Given a lib2to3 node, return its string representation.""" - code = str(node) - return code - - -class Visitor(Generic[T]): - """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" - - def visit(self, node: LN) -> Iterator[T]: - """Main method to visit `node` and its children. - - It tries to find a `visit_*()` method for the given `node.type`, like - `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. - If no dedicated `visit_*()` method is found, chooses `visit_default()` - instead. - - Then yields objects of type `T` from the selected visitor. - """ - if node.type < 256: - name = token.tok_name[node.type] - else: - name = str(type_repr(node.type)) - # We explicitly branch on whether a visitor exists (instead of - # using self.visit_default as the default arg to getattr) in order - # to save needing to create a bound method object and so mypyc can - # generate a native call to visit_default. - visitf = getattr(self, f"visit_{name}", None) - if visitf: - yield from visitf(node) - else: - yield from self.visit_default(node) - - def visit_default(self, node: LN) -> Iterator[T]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Node): - for child in node.children: - yield from self.visit(child) - - -@dataclass -class DebugVisitor(Visitor[T]): - tree_depth: int = 0 - - def visit_default(self, node: LN) -> Iterator[T]: - indent = " " * (2 * self.tree_depth) - if isinstance(node, Node): - _type = type_repr(node.type) - out(f"{indent}{_type}", fg="yellow") - self.tree_depth += 1 - for child in node.children: - yield from self.visit(child) - - self.tree_depth -= 1 - out(f"{indent}/{_type}", fg="yellow", bold=False) - else: - _type = token.tok_name.get(node.type, str(node.type)) - out(f"{indent}{_type}", fg="blue", nl=False) - if node.prefix: - # We don't have to handle prefixes for `Node` objects since - # that delegates to the first child anyway. - out(f" {node.prefix!r}", fg="green", bold=False, nl=False) - out(f" {node.value!r}", fg="blue", bold=False) - - @classmethod - def show(cls, code: Union[str, Leaf, Node]) -> None: - """Pretty-print the lib2to3 AST of a given string of `code`. - - Convenience method for debugging. - """ - v: DebugVisitor[None] = DebugVisitor() - if isinstance(code, str): - code = lib2to3_parse(code) - list(v.visit(code)) - - -WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} -STATEMENT: Final = { - syms.if_stmt, - syms.while_stmt, - syms.for_stmt, - syms.try_stmt, - syms.except_clause, - syms.with_stmt, - syms.funcdef, - syms.classdef, -} -STANDALONE_COMMENT: Final = 153 -token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" -LOGIC_OPERATORS: Final = {"and", "or"} -COMPARATORS: Final = { - token.LESS, - token.GREATER, - token.EQEQUAL, - token.NOTEQUAL, - token.LESSEQUAL, - token.GREATEREQUAL, -} -MATH_OPERATORS: Final = { - token.VBAR, - token.CIRCUMFLEX, - token.AMPER, - token.LEFTSHIFT, - token.RIGHTSHIFT, - token.PLUS, - token.MINUS, - token.STAR, - token.SLASH, - token.DOUBLESLASH, - token.PERCENT, - token.AT, - token.TILDE, - token.DOUBLESTAR, -} -STARS: Final = {token.STAR, token.DOUBLESTAR} -VARARGS_SPECIALS: Final = STARS | {token.SLASH} -VARARGS_PARENTS: Final = { - syms.arglist, - syms.argument, # double star in arglist - syms.trailer, # single argument to call - syms.typedargslist, - syms.varargslist, # lambdas -} -UNPACKING_PARENTS: Final = { - syms.atom, # single element of a list or set literal - syms.dictsetmaker, - syms.listmaker, - syms.testlist_gexp, - syms.testlist_star_expr, -} -TEST_DESCENDANTS: Final = { - syms.test, - syms.lambdef, - syms.or_test, - syms.and_test, - syms.not_test, - syms.comparison, - syms.star_expr, - syms.expr, - syms.xor_expr, - syms.and_expr, - syms.shift_expr, - syms.arith_expr, - syms.trailer, - syms.term, - syms.power, -} -ASSIGNMENTS: Final = { - "=", - "+=", - "-=", - "*=", - "@=", - "/=", - "%=", - "&=", - "|=", - "^=", - "<<=", - ">>=", - "**=", - "//=", -} -COMPREHENSION_PRIORITY: Final = 20 -COMMA_PRIORITY: Final = 18 -TERNARY_PRIORITY: Final = 16 -LOGIC_PRIORITY: Final = 14 -STRING_PRIORITY: Final = 12 -COMPARATOR_PRIORITY: Final = 10 -MATH_PRIORITIES: Final = { - token.VBAR: 9, - token.CIRCUMFLEX: 8, - token.AMPER: 7, - token.LEFTSHIFT: 6, - token.RIGHTSHIFT: 6, - token.PLUS: 5, - token.MINUS: 5, - token.STAR: 4, - token.SLASH: 4, - token.DOUBLESLASH: 4, - token.PERCENT: 4, - token.AT: 4, - token.TILDE: 3, - token.DOUBLESTAR: 2, -} -DOT_PRIORITY: Final = 1 - - -@dataclass -class BracketTracker: - """Keeps track of brackets on a line.""" - - depth: int = 0 - bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) - delimiters: Dict[LeafID, Priority] = field(default_factory=dict) - previous: Optional[Leaf] = None - _for_loop_depths: List[int] = field(default_factory=list) - _lambda_argument_depths: List[int] = field(default_factory=list) - invisible: List[Leaf] = field(default_factory=list) - - def mark(self, leaf: Leaf) -> None: - """Mark `leaf` with bracket-related metadata. Keep track of delimiters. - - All leaves receive an int `bracket_depth` field that stores how deep - within brackets a given leaf is. 0 means there are no enclosing brackets - that started on this line. - - If a leaf is itself a closing bracket, it receives an `opening_bracket` - field that it forms a pair with. This is a one-directional link to - avoid reference cycles. - - If a leaf is a delimiter (a token on which Black can split the line if - needed) and it's on depth 0, its `id()` is stored in the tracker's - `delimiters` field. - """ - if leaf.type == token.COMMENT: - return - - self.maybe_decrement_after_for_loop_variable(leaf) - self.maybe_decrement_after_lambda_arguments(leaf) - if leaf.type in CLOSING_BRACKETS: - self.depth -= 1 - try: - opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) - except KeyError as e: - raise BracketMatchError( - "Unable to match a closing bracket to the following opening" - f" bracket: {leaf}" - ) from e - leaf.opening_bracket = opening_bracket - if not leaf.value: - self.invisible.append(leaf) - leaf.bracket_depth = self.depth - if self.depth == 0: - delim = is_split_before_delimiter(leaf, self.previous) - if delim and self.previous is not None: - self.delimiters[id(self.previous)] = delim - else: - delim = is_split_after_delimiter(leaf, self.previous) - if delim: - self.delimiters[id(leaf)] = delim - if leaf.type in OPENING_BRACKETS: - self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf - self.depth += 1 - if not leaf.value: - self.invisible.append(leaf) - self.previous = leaf - self.maybe_increment_lambda_arguments(leaf) - self.maybe_increment_for_loop_variable(leaf) - - def any_open_brackets(self) -> bool: - """Return True if there is an yet unmatched open bracket on the line.""" - return bool(self.bracket_match) - - def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: - """Return the highest priority of a delimiter found on the line. - - Values are consistent with what `is_split_*_delimiter()` return. - Raises ValueError on no delimiters. - """ - return max(v for k, v in self.delimiters.items() if k not in exclude) - - def delimiter_count_with_priority(self, priority: Priority = 0) -> int: - """Return the number of delimiters with the given `priority`. - - If no `priority` is passed, defaults to max priority on the line. - """ - if not self.delimiters: - return 0 - - priority = priority or self.max_delimiter_priority() - return sum(1 for p in self.delimiters.values() if p == priority) - - def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: - """In a for loop, or comprehension, the variables are often unpacks. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `for` and `in`. - """ - if leaf.type == token.NAME and leaf.value == "for": - self.depth += 1 - self._for_loop_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: - """See `maybe_increment_for_loop_variable` above for explanation.""" - if ( - self._for_loop_depths - and self._for_loop_depths[-1] == self.depth - and leaf.type == token.NAME - and leaf.value == "in" - ): - self.depth -= 1 - self._for_loop_depths.pop() - return True - - return False - - def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: - """In a lambda expression, there might be more than one argument. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `lambda` and `:`. - """ - if leaf.type == token.NAME and leaf.value == "lambda": - self.depth += 1 - self._lambda_argument_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: - """See `maybe_increment_lambda_arguments` above for explanation.""" - if ( - self._lambda_argument_depths - and self._lambda_argument_depths[-1] == self.depth - and leaf.type == token.COLON - ): - self.depth -= 1 - self._lambda_argument_depths.pop() - return True - - return False - - def get_open_lsqb(self) -> Optional[Leaf]: - """Return the most recent opening square bracket (if any).""" - return self.bracket_match.get((self.depth - 1, token.RSQB)) - - -@dataclass -class Line: - """Holds leaves and comments. Can be printed with `str(line)`.""" - - mode: Mode - depth: int = 0 - leaves: List[Leaf] = field(default_factory=list) - # keys ordered like `leaves` - comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) - bracket_tracker: BracketTracker = field(default_factory=BracketTracker) - inside_brackets: bool = False - should_split_rhs: bool = False - magic_trailing_comma: Optional[Leaf] = None - - def append(self, leaf: Leaf, preformatted: bool = False) -> None: - """Add a new `leaf` to the end of the line. - - Unless `preformatted` is True, the `leaf` will receive a new consistent - whitespace prefix and metadata applied by :class:`BracketTracker`. - Trailing commas are maybe removed, unpacked for loop variables are - demoted from being delimiters. - - Inline comments are put aside. - """ - has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) - if not has_value: - return - - if token.COLON == leaf.type and self.is_class_paren_empty: - del self.leaves[-2:] - if self.leaves and not preformatted: - # Note: at this point leaf.prefix should be empty except for - # imports, for which we only preserve newlines. - leaf.prefix += whitespace( - leaf, complex_subscript=self.is_complex_subscript(leaf) - ) - if self.inside_brackets or not preformatted: - self.bracket_tracker.mark(leaf) - if self.mode.magic_trailing_comma: - if self.has_magic_trailing_comma(leaf): - self.magic_trailing_comma = leaf - elif self.has_magic_trailing_comma(leaf, ensure_removable=True): - self.remove_trailing_comma() - if not self.append_comment(leaf): - self.leaves.append(leaf) - - def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: - """Like :func:`append()` but disallow invalid standalone comment structure. - - Raises ValueError when any `leaf` is appended after a standalone comment - or when a standalone comment is not the first leaf on the line. - """ - if self.bracket_tracker.depth == 0: - if self.is_comment: - raise ValueError("cannot append to standalone comments") - - if self.leaves and leaf.type == STANDALONE_COMMENT: - raise ValueError( - "cannot append standalone comments to a populated line" - ) - - self.append(leaf, preformatted=preformatted) - - @property - def is_comment(self) -> bool: - """Is this line a standalone comment?""" - return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT - - @property - def is_decorator(self) -> bool: - """Is this line a decorator?""" - return bool(self) and self.leaves[0].type == token.AT - - @property - def is_import(self) -> bool: - """Is this an import line?""" - return bool(self) and is_import(self.leaves[0]) - - @property - def is_class(self) -> bool: - """Is this line a class definition?""" - return ( - bool(self) - and self.leaves[0].type == token.NAME - and self.leaves[0].value == "class" - ) - - @property - def is_stub_class(self) -> bool: - """Is this line a class definition with a body consisting only of "..."?""" - return self.is_class and self.leaves[-3:] == [ - Leaf(token.DOT, ".") for _ in range(3) - ] - - @property - def is_def(self) -> bool: - """Is this a function definition? (Also returns True for async defs.)""" - try: - first_leaf = self.leaves[0] - except IndexError: - return False - - try: - second_leaf: Optional[Leaf] = self.leaves[1] - except IndexError: - second_leaf = None - return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( - first_leaf.type == token.ASYNC - and second_leaf is not None - and second_leaf.type == token.NAME - and second_leaf.value == "def" - ) - - @property - def is_class_paren_empty(self) -> bool: - """Is this a class with no base classes but using parentheses? - - Those are unnecessary and should be removed. - """ - return ( - bool(self) - and len(self.leaves) == 4 - and self.is_class - and self.leaves[2].type == token.LPAR - and self.leaves[2].value == "(" - and self.leaves[3].type == token.RPAR - and self.leaves[3].value == ")" - ) - - @property - def is_triple_quoted_string(self) -> bool: - """Is the line a triple quoted string?""" - return ( - bool(self) - and self.leaves[0].type == token.STRING - and self.leaves[0].value.startswith(('"""', "'''")) - ) - - def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: - """If so, needs to be split before emitting.""" - for leaf in self.leaves: - if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: - return True - - return False - - def contains_uncollapsable_type_comments(self) -> bool: - ignored_ids = set() - try: - last_leaf = self.leaves[-1] - ignored_ids.add(id(last_leaf)) - if last_leaf.type == token.COMMA or ( - last_leaf.type == token.RPAR and not last_leaf.value - ): - # When trailing commas or optional parens are inserted by Black for - # consistency, comments after the previous last element are not moved - # (they don't have to, rendering will still be correct). So we ignore - # trailing commas and invisible. - last_leaf = self.leaves[-2] - ignored_ids.add(id(last_leaf)) - except IndexError: - return False - - # A type comment is uncollapsable if it is attached to a leaf - # that isn't at the end of the line (since that could cause it - # to get associated to a different argument) or if there are - # comments before it (since that could cause it to get hidden - # behind a comment. - comment_seen = False - for leaf_id, comments in self.comments.items(): - for comment in comments: - if is_type_comment(comment): - if comment_seen or ( - not is_type_comment(comment, " ignore") - and leaf_id not in ignored_ids - ): - return True - - comment_seen = True - - return False - - def contains_unsplittable_type_ignore(self) -> bool: - if not self.leaves: - return False - - # If a 'type: ignore' is attached to the end of a line, we - # can't split the line, because we can't know which of the - # subexpressions the ignore was meant to apply to. - # - # We only want this to apply to actual physical lines from the - # original source, though: we don't want the presence of a - # 'type: ignore' at the end of a multiline expression to - # justify pushing it all onto one line. Thus we - # (unfortunately) need to check the actual source lines and - # only report an unsplittable 'type: ignore' if this line was - # one line in the original code. - - # Grab the first and last line numbers, skipping generated leaves - first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) - last_line = next( - (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 - ) - - if first_line == last_line: - # We look at the last two leaves since a comma or an - # invisible paren could have been added at the end of the - # line. - for node in self.leaves[-2:]: - for comment in self.comments.get(id(node), []): - if is_type_comment(comment, " ignore"): - return True - - return False - - def contains_multiline_strings(self) -> bool: - return any(is_multiline_string(leaf) for leaf in self.leaves) - - def has_magic_trailing_comma( - self, closing: Leaf, ensure_removable: bool = False - ) -> bool: - """Return True if we have a magic trailing comma, that is when: - - there's a trailing comma here - - it's not a one-tuple - Additionally, if ensure_removable: - - it's not from square bracket indexing - """ - if not ( - closing.type in CLOSING_BRACKETS - and self.leaves - and self.leaves[-1].type == token.COMMA - ): - return False - - if closing.type == token.RBRACE: - return True - - if closing.type == token.RSQB: - if not ensure_removable: - return True - comma = self.leaves[-1] - return bool(comma.parent and comma.parent.type == syms.listmaker) - - if self.is_import: - return True - - if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves): - return True - - return False - - def append_comment(self, comment: Leaf) -> bool: - """Add an inline or standalone comment to the line.""" - if ( - comment.type == STANDALONE_COMMENT - and self.bracket_tracker.any_open_brackets() - ): - comment.prefix = "" - return False - - if comment.type != token.COMMENT: - return False - - if not self.leaves: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-1] - if ( - last_leaf.type == token.RPAR - and not last_leaf.value - and last_leaf.parent - and len(list(last_leaf.parent.leaves())) <= 3 - and not is_type_comment(comment) - ): - # Comments on an optional parens wrapping a single leaf should belong to - # the wrapped node except if it's a type comment. Pinning the comment like - # this avoids unstable formatting caused by comment migration. - if len(self.leaves) < 2: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-2] - self.comments.setdefault(id(last_leaf), []).append(comment) - return True - - def comments_after(self, leaf: Leaf) -> List[Leaf]: - """Generate comments that should appear directly after `leaf`.""" - return self.comments.get(id(leaf), []) - - def remove_trailing_comma(self) -> None: - """Remove the trailing comma and moves the comments attached to it.""" - trailing_comma = self.leaves.pop() - trailing_comma_comments = self.comments.pop(id(trailing_comma), []) - self.comments.setdefault(id(self.leaves[-1]), []).extend( - trailing_comma_comments - ) - - def is_complex_subscript(self, leaf: Leaf) -> bool: - """Return True iff `leaf` is part of a slice with non-trivial exprs.""" - open_lsqb = self.bracket_tracker.get_open_lsqb() - if open_lsqb is None: - return False - - subscript_start = open_lsqb.next_sibling - - if isinstance(subscript_start, Node): - if subscript_start.type == syms.listmaker: - return False - - if subscript_start.type == syms.subscriptlist: - subscript_start = child_towards(subscript_start, leaf) - return subscript_start is not None and any( - n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() - ) - - def clone(self) -> "Line": - return Line( - mode=self.mode, - depth=self.depth, - inside_brackets=self.inside_brackets, - should_split_rhs=self.should_split_rhs, - magic_trailing_comma=self.magic_trailing_comma, - ) - - def __str__(self) -> str: - """Render the line.""" - if not self: - return "\n" - - indent = " " * self.depth - leaves = iter(self.leaves) - first = next(leaves) - res = f"{first.prefix}{indent}{first.value}" - for leaf in leaves: - res += str(leaf) - for comment in itertools.chain.from_iterable(self.comments.values()): - res += str(comment) - - return res + "\n" - - def __bool__(self) -> bool: - """Return True if the line has leaves or comments.""" - return bool(self.leaves or self.comments) - - -@dataclass -class EmptyLineTracker: - """Provides a stateful method that returns the number of potential extra - empty lines needed before and after the currently processed line. - - Note: this tracker works on lines that haven't been split yet. It assumes - the prefix of the first leaf consists of optional newlines. Those newlines - are consumed by `maybe_empty_lines()` and included in the computation. - """ - - is_pyi: bool = False - previous_line: Optional[Line] = None - previous_after: int = 0 - previous_defs: List[int] = field(default_factory=list) - - def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - """Return the number of extra empty lines before and after the `current_line`. - - This is for separating `def`, `async def` and `class` with extra empty - lines (two on module-level). - """ - before, after = self._maybe_empty_lines(current_line) - before = ( - # Black should not insert empty lines at the beginning - # of the file - 0 - if self.previous_line is None - else before - self.previous_after - ) - self.previous_after = after - self.previous_line = current_line - return before, after - - def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - max_allowed = 1 - if current_line.depth == 0: - max_allowed = 1 if self.is_pyi else 2 - if current_line.leaves: - # Consume the first leaf's extra newlines. - first_leaf = current_line.leaves[0] - before = first_leaf.prefix.count("\n") - before = min(before, max_allowed) - first_leaf.prefix = "" - else: - before = 0 - depth = current_line.depth - while self.previous_defs and self.previous_defs[-1] >= depth: - self.previous_defs.pop() - if self.is_pyi: - before = 0 if depth else 1 - else: - before = 1 if depth else 2 - if current_line.is_decorator or current_line.is_def or current_line.is_class: - return self._maybe_empty_lines_for_class_or_def(current_line, before) - - if ( - self.previous_line - and self.previous_line.is_import - and not current_line.is_import - and depth == self.previous_line.depth - ): - return (before or 1), 0 - - if ( - self.previous_line - and self.previous_line.is_class - and current_line.is_triple_quoted_string - ): - return before, 1 - - return before, 0 - - def _maybe_empty_lines_for_class_or_def( - self, current_line: Line, before: int - ) -> Tuple[int, int]: - if not current_line.is_decorator: - self.previous_defs.append(current_line.depth) - if self.previous_line is None: - # Don't insert empty lines before the first line in the file. - return 0, 0 - - if self.previous_line.is_decorator: - if self.is_pyi and current_line.is_stub_class: - # Insert an empty line after a decorated stub class - return 0, 1 - - return 0, 0 - - if self.previous_line.depth < current_line.depth and ( - self.previous_line.is_class or self.previous_line.is_def - ): - return 0, 0 - - if ( - self.previous_line.is_comment - and self.previous_line.depth == current_line.depth - and before == 0 - ): - return 0, 0 - - if self.is_pyi: - if self.previous_line.depth > current_line.depth: - newlines = 1 - elif current_line.is_class or self.previous_line.is_class: - if current_line.is_stub_class and self.previous_line.is_stub_class: - # No blank line between classes with an empty body - newlines = 0 - else: - newlines = 1 - elif ( - current_line.is_def or current_line.is_decorator - ) and not self.previous_line.is_def: - # Blank line between a block of functions (maybe with preceding - # decorators) and a block of non-functions - newlines = 1 - else: - newlines = 0 - else: - newlines = 2 - if current_line.depth and newlines: - newlines -= 1 - return newlines, 0 - - -@dataclass -class LineGenerator(Visitor[Line]): - """Generates reformatted Line objects. Empty lines are not emitted. - - Note: destroys the tree it's visiting by mutating prefixes of its leaves - in ways that will no longer stringify to valid Python code on the tree. - """ - - mode: Mode - remove_u_prefix: bool = False - current_line: Line = field(init=False) - - def line(self, indent: int = 0) -> Iterator[Line]: - """Generate a line. - - If the line is empty, only emit if it makes sense. - If the line is too long, split it first and then generate. - - If any lines were generated, set up a new current_line. - """ - if not self.current_line: - self.current_line.depth += indent - return # Line is empty, don't emit. Creating a new one unnecessary. - - complete_line = self.current_line - self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) - yield complete_line - - def visit_default(self, node: LN) -> Iterator[Line]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Leaf): - any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() - for comment in generate_comments(node): - if any_open_brackets: - # any comment within brackets is subject to splitting - self.current_line.append(comment) - elif comment.type == token.COMMENT: - # regular trailing comment - self.current_line.append(comment) - yield from self.line() - - else: - # regular standalone comment - yield from self.line() - - self.current_line.append(comment) - yield from self.line() - - normalize_prefix(node, inside_brackets=any_open_brackets) - if self.mode.string_normalization and node.type == token.STRING: - normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix) - normalize_string_quotes(node) - if node.type == token.NUMBER: - normalize_numeric_literal(node) - if node.type not in WHITESPACE: - self.current_line.append(node) - yield from super().visit_default(node) - - def visit_INDENT(self, node: Leaf) -> Iterator[Line]: - """Increase indentation level, maybe yield a line.""" - # In blib2to3 INDENT never holds comments. - yield from self.line(+1) - yield from self.visit_default(node) - - def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: - """Decrease indentation level, maybe yield a line.""" - # The current line might still wait for trailing comments. At DEDENT time - # there won't be any (they would be prefixes on the preceding NEWLINE). - # Emit the line then. - yield from self.line() - - # While DEDENT has no value, its prefix may contain standalone comments - # that belong to the current indentation level. Get 'em. - yield from self.visit_default(node) - - # Finally, emit the dedent. - yield from self.line(-1) - - def visit_stmt( - self, node: Node, keywords: Set[str], parens: Set[str] - ) -> Iterator[Line]: - """Visit a statement. - - This implementation is shared for `if`, `while`, `for`, `try`, `except`, - `def`, `with`, `class`, `assert` and assignments. - - The relevant Python language `keywords` for a given statement will be - NAME leaves within it. This methods puts those on a separate line. - - `parens` holds a set of string leaf values immediately after which - invisible parens should be put. - """ - normalize_invisible_parens(node, parens_after=parens) - for child in node.children: - if child.type == token.NAME and child.value in keywords: # type: ignore - yield from self.line() - - yield from self.visit(child) - - def visit_suite(self, node: Node) -> Iterator[Line]: - """Visit a suite.""" - if self.mode.is_pyi and is_stub_suite(node): - yield from self.visit(node.children[2]) - else: - yield from self.visit_default(node) - - def visit_simple_stmt(self, node: Node) -> Iterator[Line]: - """Visit a statement without nested statements.""" - if first_child_is_arith(node): - wrap_in_parentheses(node, node.children[0], visible=False) - is_suite_like = node.parent and node.parent.type in STATEMENT - if is_suite_like: - if self.mode.is_pyi and is_stub_body(node): - yield from self.visit_default(node) - else: - yield from self.line(+1) - yield from self.visit_default(node) - yield from self.line(-1) - - else: - if ( - not self.mode.is_pyi - or not node.parent - or not is_stub_suite(node.parent) - ): - yield from self.line() - yield from self.visit_default(node) - - def visit_async_stmt(self, node: Node) -> Iterator[Line]: - """Visit `async def`, `async for`, `async with`.""" - yield from self.line() - - children = iter(node.children) - for child in children: - yield from self.visit(child) - - if child.type == token.ASYNC: - break - - internal_stmt = next(children) - for child in internal_stmt.children: - yield from self.visit(child) - - def visit_decorators(self, node: Node) -> Iterator[Line]: - """Visit decorators.""" - for child in node.children: - yield from self.line() - yield from self.visit(child) - - def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: - """Remove a semicolon and put the other statement on a separate line.""" - yield from self.line() - - def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: - """End of file. Process outstanding comments and end with a newline.""" - yield from self.visit_default(leaf) - yield from self.line() - - def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: - if not self.current_line.bracket_tracker.any_open_brackets(): - yield from self.line() - yield from self.visit_default(leaf) - - def visit_factor(self, node: Node) -> Iterator[Line]: - """Force parentheses between a unary op and a binary power: - - -2 ** 8 -> -(2 ** 8) - """ - _operator, operand = node.children - if ( - operand.type == syms.power - and len(operand.children) == 3 - and operand.children[1].type == token.DOUBLESTAR - ): - lpar = Leaf(token.LPAR, "(") - rpar = Leaf(token.RPAR, ")") - index = operand.remove() or 0 - node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) - yield from self.visit_default(node) - - def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: - if is_docstring(leaf) and "\\\n" not in leaf.value: - # We're ignoring docstrings with backslash newline escapes because changing - # indentation of those changes the AST representation of the code. - prefix = get_string_prefix(leaf.value) - docstring = leaf.value[len(prefix) :] # Remove the prefix - quote_char = docstring[0] - # A natural way to remove the outer quotes is to do: - # docstring = docstring.strip(quote_char) - # but that breaks on """""x""" (which is '""x'). - # So we actually need to remove the first character and the next two - # characters but only if they are the same as the first. - quote_len = 1 if docstring[1] != quote_char else 3 - docstring = docstring[quote_len:-quote_len] - - if is_multiline_string(leaf): - indent = " " * 4 * self.current_line.depth - docstring = fix_docstring(docstring, indent) - else: - docstring = docstring.strip() - - if docstring: - # Add some padding if the docstring starts / ends with a quote mark. - if docstring[0] == quote_char: - docstring = " " + docstring - if docstring[-1] == quote_char: - docstring += " " - if docstring[-1] == "\\": - backslash_count = len(docstring) - len(docstring.rstrip("\\")) - if backslash_count % 2: - # Odd number of tailing backslashes, add some padding to - # avoid escaping the closing string quote. - docstring += " " - else: - # Add some padding if the docstring is empty. - docstring = " " - - # We could enforce triple quotes at this point. - quote = quote_char * quote_len - leaf.value = prefix + quote + docstring + quote - - yield from self.visit_default(leaf) - - def __post_init__(self) -> None: - """You are in a twisty little maze of passages.""" - self.current_line = Line(mode=self.mode) - - v = self.visit_stmt - Ø: Set[str] = set() - self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) - self.visit_if_stmt = partial( - v, keywords={"if", "else", "elif"}, parens={"if", "elif"} - ) - self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) - self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) - self.visit_try_stmt = partial( - v, keywords={"try", "except", "else", "finally"}, parens=Ø - ) - self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) - self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) - self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) - self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) - self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) - self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) - self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) - self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) - self.visit_async_funcdef = self.visit_async_stmt - self.visit_decorated = self.visit_decorators - - -IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist} -BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE} -OPENING_BRACKETS = set(BRACKET.keys()) -CLOSING_BRACKETS = set(BRACKET.values()) -BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS -ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} - - -def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 - """Return whitespace prefix if needed for the given `leaf`. - - `complex_subscript` signals whether the given leaf is part of a subscription - which has non-trivial arguments, like arithmetic expressions or function calls. - """ - NO = "" - SPACE = " " - DOUBLESPACE = " " - t = leaf.type - p = leaf.parent - v = leaf.value - if t in ALWAYS_NO_SPACE: - return NO - - if t == token.COMMENT: - return DOUBLESPACE - - assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" - if t == token.COLON and p.type not in { - syms.subscript, - syms.subscriptlist, - syms.sliceop, - }: - return NO - - prev = leaf.prev_sibling - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - if t == token.COLON: - if prevp.type == token.COLON: - return NO - - elif prevp.type != token.COMMA and not complex_subscript: - return NO - - return SPACE - - if prevp.type == token.EQUAL: - if prevp.parent: - if prevp.parent.type in { - syms.arglist, - syms.argument, - syms.parameters, - syms.varargslist, - }: - return NO - - elif prevp.parent.type == syms.typedargslist: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using - # that, too. - return prevp.prefix - - elif prevp.type in VARARGS_SPECIALS: - if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): - return NO - - elif prevp.type == token.COLON: - if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: - return SPACE if complex_subscript else NO - - elif ( - prevp.parent - and prevp.parent.type == syms.factor - and prevp.type in MATH_OPERATORS - ): - return NO - - elif ( - prevp.type == token.RIGHTSHIFT - and prevp.parent - and prevp.parent.type == syms.shift_expr - and prevp.prev_sibling - and prevp.prev_sibling.type == token.NAME - and prevp.prev_sibling.value == "print" # type: ignore - ): - # Python 2 print chevron - return NO - elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: - # no space in decorators - return NO - - elif prev.type in OPENING_BRACKETS: - return NO - - if p.type in {syms.parameters, syms.arglist}: - # untyped function signatures or calls - if not prev or prev.type != token.COMMA: - return NO - - elif p.type == syms.varargslist: - # lambdas - if prev and prev.type != token.COMMA: - return NO - - elif p.type == syms.typedargslist: - # typed function signatures - if not prev: - return NO - - if t == token.EQUAL: - if prev.type != syms.tname: - return NO - - elif prev.type == token.EQUAL: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using that, too. - return prev.prefix - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.tname: - # type names - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.COMMA: - return NO - - elif p.type == syms.trailer: - # attributes and calls - if t == token.LPAR or t == token.RPAR: - return NO - - if not prev: - if t == token.DOT: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.NUMBER: - return NO - - elif t == token.LSQB: - return NO - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.argument: - # single argument - if t == token.EQUAL: - return NO - - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.LPAR: - return NO - - elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: - return NO - - elif p.type == syms.decorator: - # decorators - return NO - - elif p.type == syms.dotted_name: - if prev: - return NO - - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.AT or prevp.type == token.DOT: - return NO - - elif p.type == syms.classdef: - if t == token.LPAR: - return NO - - if prev and prev.type == token.LPAR: - return NO - - elif p.type in {syms.subscript, syms.sliceop}: - # indexing - if not prev: - assert p.parent is not None, "subscripts are always parented" - if p.parent.type == syms.subscriptlist: - return SPACE - - return NO - - elif not complex_subscript: - return NO - - elif p.type == syms.atom: - if prev and t == token.DOT: - # dots, but not the first one. - return NO - - elif p.type == syms.dictsetmaker: - # dict unpacking - if prev and prev.type == token.DOUBLESTAR: - return NO - - elif p.type in {syms.factor, syms.star_expr}: - # unary ops - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - prevp_parent = prevp.parent - assert prevp_parent is not None - if prevp.type == token.COLON and prevp_parent.type in { - syms.subscript, - syms.sliceop, - }: - return NO - - elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: - return NO - - elif t in {token.NAME, token.NUMBER, token.STRING}: - return NO - - elif p.type == syms.import_from: - if t == token.DOT: - if prev and prev.type == token.DOT: - return NO - - elif t == token.NAME: - if v == "import": - return SPACE - - if prev and prev.type == token.DOT: - return NO - - elif p.type == syms.sliceop: - return NO - - return SPACE - - -def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: - """Return the first leaf that precedes `node`, if any.""" - while node: - res = node.prev_sibling - if res: - if isinstance(res, Leaf): - return res - - try: - return list(res.leaves())[-1] - - except IndexError: - return None - - node = node.parent - return None - - -def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: - """Return if the `node` and its previous siblings match types against the provided - list of tokens; the provided `node`has its type matched against the last element in - the list. `None` can be used as the first element to declare that the start of the - list is anchored at the start of its parent's children.""" - if not tokens: - return True - if tokens[-1] is None: - return node is None - if not node: - return False - if node.type != tokens[-1]: - return False - return prev_siblings_are(node.prev_sibling, tokens[:-1]) - - -def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: - """Return the child of `ancestor` that contains `descendant`.""" - node: Optional[LN] = descendant - while node and node.parent != ancestor: - node = node.parent - return node - - -def container_of(leaf: Leaf) -> LN: - """Return `leaf` or one of its ancestors that is the topmost container of it. - - By "container" we mean a node where `leaf` is the very first child. - """ - same_prefix = leaf.prefix - container: LN = leaf - while container: - parent = container.parent - if parent is None: - break - - if parent.children[0].prefix != same_prefix: - break - - if parent.type == syms.file_input: - break - - if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: - break - - container = parent - return container - - -def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break after it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break after themselves. - - Higher numbers are higher priority. - """ - if leaf.type == token.COMMA: - return COMMA_PRIORITY - - return 0 - - -def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break before it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break before themselves. - - Higher numbers are higher priority. - """ - if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): - # * and ** might also be MATH_OPERATORS but in this case they are not. - # Don't treat them as a delimiter. - return 0 - - if ( - leaf.type == token.DOT - and leaf.parent - and leaf.parent.type not in {syms.import_from, syms.dotted_name} - and (previous is None or previous.type in CLOSING_BRACKETS) - ): - return DOT_PRIORITY - - if ( - leaf.type in MATH_OPERATORS - and leaf.parent - and leaf.parent.type not in {syms.factor, syms.star_expr} - ): - return MATH_PRIORITIES[leaf.type] - - if leaf.type in COMPARATORS: - return COMPARATOR_PRIORITY - - if ( - leaf.type == token.STRING - and previous is not None - and previous.type == token.STRING - ): - return STRING_PRIORITY - - if leaf.type not in {token.NAME, token.ASYNC}: - return 0 - - if ( - leaf.value == "for" - and leaf.parent - and leaf.parent.type in {syms.comp_for, syms.old_comp_for} - or leaf.type == token.ASYNC - ): - if ( - not isinstance(leaf.prev_sibling, Leaf) - or leaf.prev_sibling.value != "async" - ): - return COMPREHENSION_PRIORITY - - if ( - leaf.value == "if" - and leaf.parent - and leaf.parent.type in {syms.comp_if, syms.old_comp_if} - ): - return COMPREHENSION_PRIORITY - - if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: - return TERNARY_PRIORITY - - if leaf.value == "is": - return COMPARATOR_PRIORITY - - if ( - leaf.value == "in" - and leaf.parent - and leaf.parent.type in {syms.comp_op, syms.comparison} - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "not" - ) - ): - return COMPARATOR_PRIORITY - - if ( - leaf.value == "not" - and leaf.parent - and leaf.parent.type == syms.comp_op - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "is" - ) - ): - return COMPARATOR_PRIORITY - - if leaf.value in LOGIC_OPERATORS and leaf.parent: - return LOGIC_PRIORITY - - return 0 - - -FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"} -FMT_SKIP = {"# fmt: skip", "# fmt:skip"} -FMT_PASS = {*FMT_OFF, *FMT_SKIP} -FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"} - - -def generate_comments(leaf: LN) -> Iterator[Leaf]: - """Clean the prefix of the `leaf` and generate comments from it, if any. - - Comments in lib2to3 are shoved into the whitespace prefix. This happens - in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation - move because it does away with modifying the grammar to include all the - possible places in which comments can be placed. - - The sad consequence for us though is that comments don't "belong" anywhere. - This is why this function generates simple parentless Leaf objects for - comments. We simply don't know what the correct parent should be. - - No matter though, we can live without this. We really only need to - differentiate between inline and standalone comments. The latter don't - share the line with any code. - - Inline comments are emitted as regular token.COMMENT leaves. Standalone - are emitted with a fake STANDALONE_COMMENT token identifier. - """ - for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): - yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) - - -@dataclass -class ProtoComment: - """Describes a piece of syntax that is a comment. - - It's not a :class:`blib2to3.pytree.Leaf` so that: - - * it can be cached (`Leaf` objects should not be reused more than once as - they store their lineno, column, prefix, and parent information); - * `newlines` and `consumed` fields are kept separate from the `value`. This - simplifies handling of special marker comments like ``# fmt: off/on``. - """ - - type: int # token.COMMENT or STANDALONE_COMMENT - value: str # content of the comment - newlines: int # how many newlines before the comment - consumed: int # how many characters of the original leaf's prefix did we consume - - -@lru_cache(maxsize=4096) -def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: - """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" - result: List[ProtoComment] = [] - if not prefix or "#" not in prefix: - return result - - consumed = 0 - nlines = 0 - ignored_lines = 0 - for index, line in enumerate(re.split("\r?\n", prefix)): - consumed += len(line) + 1 # adding the length of the split '\n' - line = line.lstrip() - if not line: - nlines += 1 - if not line.startswith("#"): - # Escaped newlines outside of a comment are not really newlines at - # all. We treat a single-line comment following an escaped newline - # as a simple trailing comment. - if line.endswith("\\"): - ignored_lines += 1 - continue - - if index == ignored_lines and not is_endmarker: - comment_type = token.COMMENT # simple trailing comment - else: - comment_type = STANDALONE_COMMENT - comment = make_comment(line) - result.append( - ProtoComment( - type=comment_type, value=comment, newlines=nlines, consumed=consumed - ) - ) - nlines = 0 - return result - - -def make_comment(content: str) -> str: - """Return a consistently formatted comment from the given `content` string. - - All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single - space between the hash sign and the content. - - If `content` didn't start with a hash sign, one is provided. - """ - content = content.rstrip() - if not content: - return "#" - - if content[0] == "#": - content = content[1:] - NON_BREAKING_SPACE = " " - if ( - content - and content[0] == NON_BREAKING_SPACE - and not content.lstrip().startswith("type:") - ): - content = " " + content[1:] # Replace NBSP by a simple space - if content and content[0] not in " !:#'%": - content = " " + content - return "#" + content - - -def transform_line( - line: Line, mode: Mode, features: Collection[Feature] = () -) -> Iterator[Line]: - """Transform a `line`, potentially splitting it into many lines. - - They should fit in the allotted `line_length` but might not be able to. - - `features` are syntactical features that may be used in the output. - """ - if line.is_comment: - yield line - return - - line_str = line_to_string(line) - - def init_st(ST: Type[StringTransformer]) -> StringTransformer: - """Initialize StringTransformer""" - return ST(mode.line_length, mode.string_normalization) - - string_merge = init_st(StringMerger) - string_paren_strip = init_st(StringParenStripper) - string_split = init_st(StringSplitter) - string_paren_wrap = init_st(StringParenWrapper) - - transformers: List[Transformer] - if ( - not line.contains_uncollapsable_type_comments() - and not line.should_split_rhs - and not line.magic_trailing_comma - and ( - is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) - or line.contains_unsplittable_type_ignore() - ) - and not (line.inside_brackets and line.contains_standalone_comments()) - ): - # Only apply basic string preprocessing, since lines shouldn't be split here. - if mode.experimental_string_processing: - transformers = [string_merge, string_paren_strip] - else: - transformers = [] - elif line.is_def: - transformers = [left_hand_split] - else: - - def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: - """Wraps calls to `right_hand_split`. - - The calls increasingly `omit` right-hand trailers (bracket pairs with - content), meaning the trailers get glued together to split on another - bracket pair instead. - """ - for omit in generate_trailers_to_omit(line, mode.line_length): - lines = list( - right_hand_split(line, mode.line_length, features, omit=omit) - ) - # Note: this check is only able to figure out if the first line of the - # *current* transformation fits in the line length. This is true only - # for simple cases. All others require running more transforms via - # `transform_line()`. This check doesn't know if those would succeed. - if is_line_short_enough(lines[0], line_length=mode.line_length): - yield from lines - return - - # All splits failed, best effort split with no omits. - # This mostly happens to multiline strings that are by definition - # reported as not fitting a single line, as well as lines that contain - # trailing commas (those have to be exploded). - yield from right_hand_split( - line, line_length=mode.line_length, features=features - ) - - if mode.experimental_string_processing: - if line.inside_brackets: - transformers = [ - string_merge, - string_paren_strip, - string_split, - delimiter_split, - standalone_comment_split, - string_paren_wrap, - rhs, - ] - else: - transformers = [ - string_merge, - string_paren_strip, - string_split, - string_paren_wrap, - rhs, - ] - else: - if line.inside_brackets: - transformers = [delimiter_split, standalone_comment_split, rhs] - else: - transformers = [rhs] - - for transform in transformers: - # We are accumulating lines in `result` because we might want to abort - # mission and return the original line in the end, or attempt a different - # split altogether. - try: - result = run_transformer(line, transform, mode, features, line_str=line_str) - except CannotTransform: - continue - else: - yield from result - break - - else: - yield line - - -@dataclass # type: ignore -class StringTransformer(ABC): - """ - An implementation of the Transformer protocol that relies on its - subclasses overriding the template methods `do_match(...)` and - `do_transform(...)`. - - This Transformer works exclusively on strings (for example, by merging - or splitting them). - - The following sections can be found among the docstrings of each concrete - StringTransformer subclass. - - Requirements: - Which requirements must be met of the given Line for this - StringTransformer to be applied? - - Transformations: - If the given Line meets all of the above requirements, which string - transformations can you expect to be applied to it by this - StringTransformer? - - Collaborations: - What contractual agreements does this StringTransformer have with other - StringTransfomers? Such collaborations should be eliminated/minimized - as much as possible. - """ - - line_length: int - normalize_strings: bool - __name__ = "StringTransformer" - - @abstractmethod - def do_match(self, line: Line) -> TMatchResult: - """ - Returns: - * Ok(string_idx) such that `line.leaves[string_idx]` is our target - string, if a match was able to be made. - OR - * Err(CannotTransform), if a match was not able to be made. - """ - - @abstractmethod - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - """ - Yields: - * Ok(new_line) where new_line is the new transformed line. - OR - * Err(CannotTransform) if the transformation failed for some reason. The - `do_match(...)` template method should usually be used to reject - the form of the given Line, but in some cases it is difficult to - know whether or not a Line meets the StringTransformer's - requirements until the transformation is already midway. - - Side Effects: - This method should NOT mutate @line directly, but it MAY mutate the - Line's underlying Node structure. (WARNING: If the underlying Node - structure IS altered, then this method should NOT be allowed to - yield an CannotTransform after that point.) - """ - - def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: - """ - StringTransformer instances have a call signature that mirrors that of - the Transformer type. - - Raises: - CannotTransform(...) if the concrete StringTransformer class is unable - to transform @line. - """ - # Optimization to avoid calling `self.do_match(...)` when the line does - # not contain any string. - if not any(leaf.type == token.STRING for leaf in line.leaves): - raise CannotTransform("There are no strings in this line.") - - match_result = self.do_match(line) - - if isinstance(match_result, Err): - cant_transform = match_result.err() - raise CannotTransform( - f"The string transformer {self.__class__.__name__} does not recognize" - " this line as one that it can transform." - ) from cant_transform - - string_idx = match_result.ok() - - for line_result in self.do_transform(line, string_idx): - if isinstance(line_result, Err): - cant_transform = line_result.err() - raise CannotTransform( - "StringTransformer failed while attempting to transform string." - ) from cant_transform - line = line_result.ok() - yield line - - -@dataclass -class CustomSplit: - """A custom (i.e. manual) string split. - - A single CustomSplit instance represents a single substring. - - Examples: - Consider the following string: - ``` - "Hi there friend." - " This is a custom" - f" string {split}." - ``` - - This string will correspond to the following three CustomSplit instances: - ``` - CustomSplit(False, 16) - CustomSplit(False, 17) - CustomSplit(True, 16) - ``` - """ - - has_prefix: bool - break_idx: int - - -class CustomSplitMapMixin: - """ - This mixin class is used to map merged strings to a sequence of - CustomSplits, which will then be used to re-split the strings iff none of - the resultant substrings go over the configured max line length. - """ - - _Key = Tuple[StringID, str] - _CUSTOM_SPLIT_MAP: Dict[_Key, Tuple[CustomSplit, ...]] = defaultdict(tuple) - - @staticmethod - def _get_key(string: str) -> "CustomSplitMapMixin._Key": - """ - Returns: - A unique identifier that is used internally to map @string to a - group of custom splits. - """ - return (id(string), string) - - def add_custom_splits( - self, string: str, custom_splits: Iterable[CustomSplit] - ) -> None: - """Custom Split Map Setter Method - - Side Effects: - Adds a mapping from @string to the custom splits @custom_splits. - """ - key = self._get_key(string) - self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) - - def pop_custom_splits(self, string: str) -> List[CustomSplit]: - """Custom Split Map Getter Method - - Returns: - * A list of the custom splits that are mapped to @string, if any - exist. - OR - * [], otherwise. - - Side Effects: - Deletes the mapping between @string and its associated custom - splits (which are returned to the caller). - """ - key = self._get_key(string) - - custom_splits = self._CUSTOM_SPLIT_MAP[key] - del self._CUSTOM_SPLIT_MAP[key] - - return list(custom_splits) - - def has_custom_splits(self, string: str) -> bool: - """ - Returns: - True iff @string is associated with a set of custom splits. - """ - key = self._get_key(string) - return key in self._CUSTOM_SPLIT_MAP - - -class StringMerger(CustomSplitMapMixin, StringTransformer): - """StringTransformer that merges strings together. - - Requirements: - (A) The line contains adjacent strings such that ALL of the validation checks - listed in StringMerger.__validate_msg(...)'s docstring pass. - OR - (B) The line contains a string which uses line continuation backslashes. - - Transformations: - Depending on which of the two requirements above where met, either: - - (A) The string group associated with the target string is merged. - OR - (B) All line-continuation backslashes are removed from the target string. - - Collaborations: - StringMerger provides custom split information to StringSplitter. - """ - - def do_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - if ( - leaf.type == token.STRING - and is_valid_index(i + 1) - and LL[i + 1].type == token.STRING - ): - return Ok(i) - - if leaf.type == token.STRING and "\\\n" in leaf.value: - return Ok(i) - - return TErr("This line has no strings that need merging.") - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - new_line = line - rblc_result = self.__remove_backslash_line_continuation_chars( - new_line, string_idx - ) - if isinstance(rblc_result, Ok): - new_line = rblc_result.ok() - - msg_result = self.__merge_string_group(new_line, string_idx) - if isinstance(msg_result, Ok): - new_line = msg_result.ok() - - if isinstance(rblc_result, Err) and isinstance(msg_result, Err): - msg_cant_transform = msg_result.err() - rblc_cant_transform = rblc_result.err() - cant_transform = CannotTransform( - "StringMerger failed to merge any strings in this line." - ) - - # Chain the errors together using `__cause__`. - msg_cant_transform.__cause__ = rblc_cant_transform - cant_transform.__cause__ = msg_cant_transform - - yield Err(cant_transform) - else: - yield Ok(new_line) - - @staticmethod - def __remove_backslash_line_continuation_chars( - line: Line, string_idx: int - ) -> TResult[Line]: - """ - Merge strings that were split across multiple lines using - line-continuation backslashes. - - Returns: - Ok(new_line), if @line contains backslash line-continuation - characters. - OR - Err(CannotTransform), otherwise. - """ - LL = line.leaves - - string_leaf = LL[string_idx] - if not ( - string_leaf.type == token.STRING - and "\\\n" in string_leaf.value - and not has_triple_quotes(string_leaf.value) - ): - return TErr( - f"String leaf {string_leaf} does not contain any backslash line" - " continuation characters." - ) - - new_line = line.clone() - new_line.comments = line.comments.copy() - append_leaves(new_line, line, LL) - - new_string_leaf = new_line.leaves[string_idx] - new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") - - return Ok(new_line) - - def __merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]: - """ - Merges string group (i.e. set of adjacent strings) where the first - string in the group is `line.leaves[string_idx]`. - - Returns: - Ok(new_line), if ALL of the validation checks found in - __validate_msg(...) pass. - OR - Err(CannotTransform), otherwise. - """ - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - vresult = self.__validate_msg(line, string_idx) - if isinstance(vresult, Err): - return vresult - - # If the string group is wrapped inside an Atom node, we must make sure - # to later replace that Atom with our new (merged) string leaf. - atom_node = LL[string_idx].parent - - # We will place BREAK_MARK in between every two substrings that we - # merge. We will then later go through our final result and use the - # various instances of BREAK_MARK we find to add the right values to - # the custom split map. - BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" - - QUOTE = LL[string_idx].value[-1] - - def make_naked(string: str, string_prefix: str) -> str: - """Strip @string (i.e. make it a "naked" string) - - Pre-conditions: - * assert_is_leaf_string(@string) - - Returns: - A string that is identical to @string except that - @string_prefix has been stripped, the surrounding QUOTE - characters have been removed, and any remaining QUOTE - characters have been escaped. - """ - assert_is_leaf_string(string) - - RE_EVEN_BACKSLASHES = r"(?:(?= 0 - ), "Logic error while filling the custom string breakpoint cache." - - temp_string = temp_string[mark_idx + len(BREAK_MARK) :] - breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 - custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) - - string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) - - if atom_node is not None: - replace_child(atom_node, string_leaf) - - # Build the final line ('new_line') that this method will later return. - new_line = line.clone() - for (i, leaf) in enumerate(LL): - if i == string_idx: - new_line.append(string_leaf) - - if string_idx <= i < string_idx + num_of_strings: - for comment_leaf in line.comments_after(LL[i]): - new_line.append(comment_leaf, preformatted=True) - continue - - append_leaves(new_line, line, [leaf]) - - self.add_custom_splits(string_leaf.value, custom_splits) - return Ok(new_line) - - @staticmethod - def __validate_msg(line: Line, string_idx: int) -> TResult[None]: - """Validate (M)erge (S)tring (G)roup - - Transform-time string validation logic for __merge_string_group(...). - - Returns: - * Ok(None), if ALL validation checks (listed below) pass. - OR - * Err(CannotTransform), if any of the following are true: - - The target string group does not contain ANY stand-alone comments. - - The target string is not in a string group (i.e. it has no - adjacent strings). - - The string group has more than one inline comment. - - The string group has an inline comment that appears to be a pragma. - - The set of all string prefixes in the string group is of - length greater than one and is not equal to {"", "f"}. - - The string group consists of raw strings. - """ - # We first check for "inner" stand-alone comments (i.e. stand-alone - # comments that have a string leaf before them AND after them). - for inc in [1, -1]: - i = string_idx - found_sa_comment = False - is_valid_index = is_valid_index_factory(line.leaves) - while is_valid_index(i) and line.leaves[i].type in [ - token.STRING, - STANDALONE_COMMENT, - ]: - if line.leaves[i].type == STANDALONE_COMMENT: - found_sa_comment = True - elif found_sa_comment: - return TErr( - "StringMerger does NOT merge string groups which contain " - "stand-alone comments." - ) - - i += inc - - num_of_inline_string_comments = 0 - set_of_prefixes = set() - num_of_strings = 0 - for leaf in line.leaves[string_idx:]: - if leaf.type != token.STRING: - # If the string group is trailed by a comma, we count the - # comments trailing the comma to be one of the string group's - # comments. - if leaf.type == token.COMMA and id(leaf) in line.comments: - num_of_inline_string_comments += 1 - break - - if has_triple_quotes(leaf.value): - return TErr("StringMerger does NOT merge multiline strings.") - - num_of_strings += 1 - prefix = get_string_prefix(leaf.value) - if "r" in prefix: - return TErr("StringMerger does NOT merge raw strings.") - - set_of_prefixes.add(prefix) - - if id(leaf) in line.comments: - num_of_inline_string_comments += 1 - if contains_pragma_comment(line.comments[id(leaf)]): - return TErr("Cannot merge strings which have pragma comments.") - - if num_of_strings < 2: - return TErr( - f"Not enough strings to merge (num_of_strings={num_of_strings})." - ) - - if num_of_inline_string_comments > 1: - return TErr( - f"Too many inline string comments ({num_of_inline_string_comments})." - ) - - if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: - return TErr(f"Too many different prefixes ({set_of_prefixes}).") - - return Ok(None) - - -class StringParenStripper(StringTransformer): - """StringTransformer that strips surrounding parentheses from strings. - - Requirements: - The line contains a string which is surrounded by parentheses and: - - The target string is NOT the only argument to a function call. - - The target string is NOT a "pointless" string. - - If the target string contains a PERCENT, the brackets are not - preceeded or followed by an operator with higher precedence than - PERCENT. - - Transformations: - The parentheses mentioned in the 'Requirements' section are stripped. - - Collaborations: - StringParenStripper has its own inherent usefulness, but it is also - relied on to clean up the parentheses created by StringParenWrapper (in - the event that they are no longer needed). - """ - - def do_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - for (idx, leaf) in enumerate(LL): - # Should be a string... - if leaf.type != token.STRING: - continue - - # If this is a "pointless" string... - if ( - leaf.parent - and leaf.parent.parent - and leaf.parent.parent.type == syms.simple_stmt - ): - continue - - # Should be preceded by a non-empty LPAR... - if ( - not is_valid_index(idx - 1) - or LL[idx - 1].type != token.LPAR - or is_empty_lpar(LL[idx - 1]) - ): - continue - - # That LPAR should NOT be preceded by a function name or a closing - # bracket (which could be a function which returns a function or a - # list/dictionary that contains a function)... - if is_valid_index(idx - 2) and ( - LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS - ): - continue - - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - next_idx = string_parser.parse(LL, string_idx) - - # if the leaves in the parsed string include a PERCENT, we need to - # make sure the initial LPAR is NOT preceded by an operator with - # higher or equal precedence to PERCENT - if is_valid_index(idx - 2): - # mypy can't quite follow unless we name this - before_lpar = LL[idx - 2] - if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( - ( - before_lpar.type - in { - token.STAR, - token.AT, - token.SLASH, - token.DOUBLESLASH, - token.PERCENT, - token.TILDE, - token.DOUBLESTAR, - token.AWAIT, - token.LSQB, - token.LPAR, - } - ) - or ( - # only unary PLUS/MINUS - before_lpar.parent - and before_lpar.parent.type == syms.factor - and (before_lpar.type in {token.PLUS, token.MINUS}) - ) - ): - continue - - # Should be followed by a non-empty RPAR... - if ( - is_valid_index(next_idx) - and LL[next_idx].type == token.RPAR - and not is_empty_rpar(LL[next_idx]) - ): - # That RPAR should NOT be followed by anything with higher - # precedence than PERCENT - if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { - token.DOUBLESTAR, - token.LSQB, - token.LPAR, - token.DOT, - }: - continue - - return Ok(string_idx) - - return TErr("This line has no strings wrapped in parens.") - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - string_parser = StringParser() - rpar_idx = string_parser.parse(LL, string_idx) - - for leaf in (LL[string_idx - 1], LL[rpar_idx]): - if line.comments_after(leaf): - yield TErr( - "Will not strip parentheses which have comments attached to them." - ) - return - - new_line = line.clone() - new_line.comments = line.comments.copy() - try: - append_leaves(new_line, line, LL[: string_idx - 1]) - except BracketMatchError: - # HACK: I believe there is currently a bug somewhere in - # right_hand_split() that is causing brackets to not be tracked - # properly by a shared BracketTracker. - append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True) - - string_leaf = Leaf(token.STRING, LL[string_idx].value) - LL[string_idx - 1].remove() - replace_child(LL[string_idx], string_leaf) - new_line.append(string_leaf) - - append_leaves( - new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] - ) - - LL[rpar_idx].remove() - - yield Ok(new_line) - - -class BaseStringSplitter(StringTransformer): - """ - Abstract class for StringTransformers which transform a Line's strings by splitting - them or placing them on their own lines where necessary to avoid going over - the configured line length. - - Requirements: - * The target string value is responsible for the line going over the - line length limit. It follows that after all of black's other line - split methods have been exhausted, this line (or one of the resulting - lines after all line splits are performed) would still be over the - line_length limit unless we split this string. - AND - * The target string is NOT a "pointless" string (i.e. a string that has - no parent or siblings). - AND - * The target string is not followed by an inline comment that appears - to be a pragma. - AND - * The target string is not a multiline (i.e. triple-quote) string. - """ - - @abstractmethod - def do_splitter_match(self, line: Line) -> TMatchResult: - """ - BaseStringSplitter asks its clients to override this method instead of - `StringTransformer.do_match(...)`. - - Follows the same protocol as `StringTransformer.do_match(...)`. - - Refer to `help(StringTransformer.do_match)` for more information. - """ - - def do_match(self, line: Line) -> TMatchResult: - match_result = self.do_splitter_match(line) - if isinstance(match_result, Err): - return match_result - - string_idx = match_result.ok() - vresult = self.__validate(line, string_idx) - if isinstance(vresult, Err): - return vresult - - return match_result - - def __validate(self, line: Line, string_idx: int) -> TResult[None]: - """ - Checks that @line meets all of the requirements listed in this classes' - docstring. Refer to `help(BaseStringSplitter)` for a detailed - description of those requirements. - - Returns: - * Ok(None), if ALL of the requirements are met. - OR - * Err(CannotTransform), if ANY of the requirements are NOT met. - """ - LL = line.leaves - - string_leaf = LL[string_idx] - - max_string_length = self.__get_max_string_length(line, string_idx) - if len(string_leaf.value) <= max_string_length: - return TErr( - "The string itself is not what is causing this line to be too long." - ) - - if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ - token.STRING, - token.NEWLINE, - ]: - return TErr( - f"This string ({string_leaf.value}) appears to be pointless (i.e. has" - " no parent)." - ) - - if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( - line.comments[id(line.leaves[string_idx])] - ): - return TErr( - "Line appears to end with an inline pragma comment. Splitting the line" - " could modify the pragma's behavior." - ) - - if has_triple_quotes(string_leaf.value): - return TErr("We cannot split multiline strings.") - - return Ok(None) - - def __get_max_string_length(self, line: Line, string_idx: int) -> int: - """ - Calculates the max string length used when attempting to determine - whether or not the target string is responsible for causing the line to - go over the line length limit. - - WARNING: This method is tightly coupled to both StringSplitter and - (especially) StringParenWrapper. There is probably a better way to - accomplish what is being done here. - - Returns: - max_string_length: such that `line.leaves[string_idx].value > - max_string_length` implies that the target string IS responsible - for causing this line to exceed the line length limit. - """ - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - # We use the shorthand "WMA4" in comments to abbreviate "We must - # account for". When giving examples, we use STRING to mean some/any - # valid string. - # - # Finally, we use the following convenience variables: - # - # P: The leaf that is before the target string leaf. - # N: The leaf that is after the target string leaf. - # NN: The leaf that is after N. - - # WMA4 the whitespace at the beginning of the line. - offset = line.depth * 4 - - if is_valid_index(string_idx - 1): - p_idx = string_idx - 1 - if ( - LL[string_idx - 1].type == token.LPAR - and LL[string_idx - 1].value == "" - and string_idx >= 2 - ): - # If the previous leaf is an empty LPAR placeholder, we should skip it. - p_idx -= 1 - - P = LL[p_idx] - if P.type == token.PLUS: - # WMA4 a space and a '+' character (e.g. `+ STRING`). - offset += 2 - - if P.type == token.COMMA: - # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. - offset += 3 - - if P.type in [token.COLON, token.EQUAL, token.NAME]: - # This conditional branch is meant to handle dictionary keys, - # variable assignments, 'return STRING' statement lines, and - # 'else STRING' ternary expression lines. - - # WMA4 a single space. - offset += 1 - - # WMA4 the lengths of any leaves that came before that space, - # but after any closing bracket before that space. - for leaf in reversed(LL[: p_idx + 1]): - offset += len(str(leaf)) - if leaf.type in CLOSING_BRACKETS: - break - - if is_valid_index(string_idx + 1): - N = LL[string_idx + 1] - if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: - # If the next leaf is an empty RPAR placeholder, we should skip it. - N = LL[string_idx + 2] - - if N.type == token.COMMA: - # WMA4 a single comma at the end of the string (e.g `STRING,`). - offset += 1 - - if is_valid_index(string_idx + 2): - NN = LL[string_idx + 2] - - if N.type == token.DOT and NN.type == token.NAME: - # This conditional branch is meant to handle method calls invoked - # off of a string literal up to and including the LPAR character. - - # WMA4 the '.' character. - offset += 1 - - if ( - is_valid_index(string_idx + 3) - and LL[string_idx + 3].type == token.LPAR - ): - # WMA4 the left parenthesis character. - offset += 1 - - # WMA4 the length of the method's name. - offset += len(NN.value) - - has_comments = False - for comment_leaf in line.comments_after(LL[string_idx]): - if not has_comments: - has_comments = True - # WMA4 two spaces before the '#' character. - offset += 2 - - # WMA4 the length of the inline comment. - offset += len(comment_leaf.value) - - max_string_length = self.line_length - offset - return max_string_length - - -class StringSplitter(CustomSplitMapMixin, BaseStringSplitter): - """ - StringTransformer that splits "atom" strings (i.e. strings which exist on - lines by themselves). - - Requirements: - * The line consists ONLY of a single string (with the exception of a - '+' symbol which MAY exist at the start of the line), MAYBE a string - trailer, and MAYBE a trailing comma. - AND - * All of the requirements listed in BaseStringSplitter's docstring. - - Transformations: - The string mentioned in the 'Requirements' section is split into as - many substrings as necessary to adhere to the configured line length. - - In the final set of substrings, no substring should be smaller than - MIN_SUBSTR_SIZE characters. - - The string will ONLY be split on spaces (i.e. each new substring should - start with a space). Note that the string will NOT be split on a space - which is escaped with a backslash. - - If the string is an f-string, it will NOT be split in the middle of an - f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x - else bar()} is an f-expression). - - If the string that is being split has an associated set of custom split - records and those custom splits will NOT result in any line going over - the configured line length, those custom splits are used. Otherwise the - string is split as late as possible (from left-to-right) while still - adhering to the transformation rules listed above. - - Collaborations: - StringSplitter relies on StringMerger to construct the appropriate - CustomSplit objects and add them to the custom split map. - """ - - MIN_SUBSTR_SIZE = 6 - # Matches an "f-expression" (e.g. {var}) that might be found in an f-string. - RE_FEXPR = r""" - (? TMatchResult: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - - idx = 0 - - # The first leaf MAY be a '+' symbol... - if is_valid_index(idx) and LL[idx].type == token.PLUS: - idx += 1 - - # The next/first leaf MAY be an empty LPAR... - if is_valid_index(idx) and is_empty_lpar(LL[idx]): - idx += 1 - - # The next/first leaf MUST be a string... - if not is_valid_index(idx) or LL[idx].type != token.STRING: - return TErr("Line does not start with a string.") - - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # That string MAY be followed by an empty RPAR... - if is_valid_index(idx) and is_empty_rpar(LL[idx]): - idx += 1 - - # That string / empty RPAR leaf MAY be followed by a comma... - if is_valid_index(idx) and LL[idx].type == token.COMMA: - idx += 1 - - # But no more leaves are allowed... - if is_valid_index(idx): - return TErr("This line does not end with a string.") - - return Ok(string_idx) - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - QUOTE = LL[string_idx].value[-1] - - is_valid_index = is_valid_index_factory(LL) - insert_str_child = insert_str_child_factory(LL[string_idx]) - - prefix = get_string_prefix(LL[string_idx].value) - - # We MAY choose to drop the 'f' prefix from substrings that don't - # contain any f-expressions, but ONLY if the original f-string - # contains at least one f-expression. Otherwise, we will alter the AST - # of the program. - drop_pointless_f_prefix = ("f" in prefix) and re.search( - self.RE_FEXPR, LL[string_idx].value, re.VERBOSE - ) - - first_string_line = True - starts_with_plus = LL[0].type == token.PLUS - - def line_needs_plus() -> bool: - return first_string_line and starts_with_plus - - def maybe_append_plus(new_line: Line) -> None: - """ - Side Effects: - If @line starts with a plus and this is the first line we are - constructing, this function appends a PLUS leaf to @new_line - and replaces the old PLUS leaf in the node structure. Otherwise - this function does nothing. - """ - if line_needs_plus(): - plus_leaf = Leaf(token.PLUS, "+") - replace_child(LL[0], plus_leaf) - new_line.append(plus_leaf) - - ends_with_comma = ( - is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA - ) - - def max_last_string() -> int: - """ - Returns: - The max allowed length of the string value used for the last - line we will construct. - """ - result = self.line_length - result -= line.depth * 4 - result -= 1 if ends_with_comma else 0 - result -= 2 if line_needs_plus() else 0 - return result - - # --- Calculate Max Break Index (for string value) - # We start with the line length limit - max_break_idx = self.line_length - # The last index of a string of length N is N-1. - max_break_idx -= 1 - # Leading whitespace is not present in the string value (e.g. Leaf.value). - max_break_idx -= line.depth * 4 - if max_break_idx < 0: - yield TErr( - f"Unable to split {LL[string_idx].value} at such high of a line depth:" - f" {line.depth}" - ) - return - - # Check if StringMerger registered any custom splits. - custom_splits = self.pop_custom_splits(LL[string_idx].value) - # We use them ONLY if none of them would produce lines that exceed the - # line limit. - use_custom_breakpoints = bool( - custom_splits - and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) - ) - - # Temporary storage for the remaining chunk of the string line that - # can't fit onto the line currently being constructed. - rest_value = LL[string_idx].value - - def more_splits_should_be_made() -> bool: - """ - Returns: - True iff `rest_value` (the remaining string value from the last - split), should be split again. - """ - if use_custom_breakpoints: - return len(custom_splits) > 1 - else: - return len(rest_value) > max_last_string() - - string_line_results: List[Ok[Line]] = [] - while more_splits_should_be_made(): - if use_custom_breakpoints: - # Custom User Split (manual) - csplit = custom_splits.pop(0) - break_idx = csplit.break_idx - else: - # Algorithmic Split (automatic) - max_bidx = max_break_idx - 2 if line_needs_plus() else max_break_idx - maybe_break_idx = self.__get_break_idx(rest_value, max_bidx) - if maybe_break_idx is None: - # If we are unable to algorithmically determine a good split - # and this string has custom splits registered to it, we - # fall back to using them--which means we have to start - # over from the beginning. - if custom_splits: - rest_value = LL[string_idx].value - string_line_results = [] - first_string_line = True - use_custom_breakpoints = True - continue - - # Otherwise, we stop splitting here. - break - - break_idx = maybe_break_idx - - # --- Construct `next_value` - next_value = rest_value[:break_idx] + QUOTE - if ( - # Are we allowed to try to drop a pointless 'f' prefix? - drop_pointless_f_prefix - # If we are, will we be successful? - and next_value != self.__normalize_f_string(next_value, prefix) - ): - # If the current custom split did NOT originally use a prefix, - # then `csplit.break_idx` will be off by one after removing - # the 'f' prefix. - break_idx = ( - break_idx + 1 - if use_custom_breakpoints and not csplit.has_prefix - else break_idx - ) - next_value = rest_value[:break_idx] + QUOTE - next_value = self.__normalize_f_string(next_value, prefix) - - # --- Construct `next_leaf` - next_leaf = Leaf(token.STRING, next_value) - insert_str_child(next_leaf) - self.__maybe_normalize_string_quotes(next_leaf) - - # --- Construct `next_line` - next_line = line.clone() - maybe_append_plus(next_line) - next_line.append(next_leaf) - string_line_results.append(Ok(next_line)) - - rest_value = prefix + QUOTE + rest_value[break_idx:] - first_string_line = False - - yield from string_line_results - - if drop_pointless_f_prefix: - rest_value = self.__normalize_f_string(rest_value, prefix) - - rest_leaf = Leaf(token.STRING, rest_value) - insert_str_child(rest_leaf) - - # NOTE: I could not find a test case that verifies that the following - # line is actually necessary, but it seems to be. Otherwise we risk - # not normalizing the last substring, right? - self.__maybe_normalize_string_quotes(rest_leaf) - - last_line = line.clone() - maybe_append_plus(last_line) - - # If there are any leaves to the right of the target string... - if is_valid_index(string_idx + 1): - # We use `temp_value` here to determine how long the last line - # would be if we were to append all the leaves to the right of the - # target string to the last string line. - temp_value = rest_value - for leaf in LL[string_idx + 1 :]: - temp_value += str(leaf) - if leaf.type == token.LPAR: - break - - # Try to fit them all on the same line with the last substring... - if ( - len(temp_value) <= max_last_string() - or LL[string_idx + 1].type == token.COMMA - ): - last_line.append(rest_leaf) - append_leaves(last_line, line, LL[string_idx + 1 :]) - yield Ok(last_line) - # Otherwise, place the last substring on one line and everything - # else on a line below that... - else: - last_line.append(rest_leaf) - yield Ok(last_line) - - non_string_line = line.clone() - append_leaves(non_string_line, line, LL[string_idx + 1 :]) - yield Ok(non_string_line) - # Else the target string was the last leaf... - else: - last_line.append(rest_leaf) - last_line.comments = line.comments.copy() - yield Ok(last_line) - - def __get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: - """ - This method contains the algorithm that StringSplitter uses to - determine which character to split each string at. - - Args: - @string: The substring that we are attempting to split. - @max_break_idx: The ideal break index. We will return this value if it - meets all the necessary conditions. In the likely event that it - doesn't we will try to find the closest index BELOW @max_break_idx - that does. If that fails, we will expand our search by also - considering all valid indices ABOVE @max_break_idx. - - Pre-Conditions: - * assert_is_leaf_string(@string) - * 0 <= @max_break_idx < len(@string) - - Returns: - break_idx, if an index is able to be found that meets all of the - conditions listed in the 'Transformations' section of this classes' - docstring. - OR - None, otherwise. - """ - is_valid_index = is_valid_index_factory(string) - - assert is_valid_index(max_break_idx) - assert_is_leaf_string(string) - - _fexpr_slices: Optional[List[Tuple[Index, Index]]] = None - - def fexpr_slices() -> Iterator[Tuple[Index, Index]]: - """ - Yields: - All ranges of @string which, if @string were to be split there, - would result in the splitting of an f-expression (which is NOT - allowed). - """ - nonlocal _fexpr_slices - - if _fexpr_slices is None: - _fexpr_slices = [] - for match in re.finditer(self.RE_FEXPR, string, re.VERBOSE): - _fexpr_slices.append(match.span()) - - yield from _fexpr_slices - - is_fstring = "f" in get_string_prefix(string) - - def breaks_fstring_expression(i: Index) -> bool: - """ - Returns: - True iff returning @i would result in the splitting of an - f-expression (which is NOT allowed). - """ - if not is_fstring: - return False - - for (start, end) in fexpr_slices(): - if start <= i < end: - return True - - return False - - def passes_all_checks(i: Index) -> bool: - """ - Returns: - True iff ALL of the conditions listed in the 'Transformations' - section of this classes' docstring would be be met by returning @i. - """ - is_space = string[i] == " " - - is_not_escaped = True - j = i - 1 - while is_valid_index(j) and string[j] == "\\": - is_not_escaped = not is_not_escaped - j -= 1 - - is_big_enough = ( - len(string[i:]) >= self.MIN_SUBSTR_SIZE - and len(string[:i]) >= self.MIN_SUBSTR_SIZE - ) - return ( - is_space - and is_not_escaped - and is_big_enough - and not breaks_fstring_expression(i) - ) - - # First, we check all indices BELOW @max_break_idx. - break_idx = max_break_idx - while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): - break_idx -= 1 - - if not passes_all_checks(break_idx): - # If that fails, we check all indices ABOVE @max_break_idx. - # - # If we are able to find a valid index here, the next line is going - # to be longer than the specified line length, but it's probably - # better than doing nothing at all. - break_idx = max_break_idx + 1 - while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): - break_idx += 1 - - if not is_valid_index(break_idx) or not passes_all_checks(break_idx): - return None - - return break_idx - - def __maybe_normalize_string_quotes(self, leaf: Leaf) -> None: - if self.normalize_strings: - normalize_string_quotes(leaf) - - def __normalize_f_string(self, string: str, prefix: str) -> str: - """ - Pre-Conditions: - * assert_is_leaf_string(@string) - - Returns: - * If @string is an f-string that contains no f-expressions, we - return a string identical to @string except that the 'f' prefix - has been stripped and all double braces (i.e. '{{' or '}}') have - been normalized (i.e. turned into '{' or '}'). - OR - * Otherwise, we return @string. - """ - assert_is_leaf_string(string) - - if "f" in prefix and not re.search(self.RE_FEXPR, string, re.VERBOSE): - new_prefix = prefix.replace("f", "") - - temp = string[len(prefix) :] - temp = re.sub(r"\{\{", "{", temp) - temp = re.sub(r"\}\}", "}", temp) - new_string = temp - - return f"{new_prefix}{new_string}" - else: - return string - - -class StringParenWrapper(CustomSplitMapMixin, BaseStringSplitter): - """ - StringTransformer that splits non-"atom" strings (i.e. strings that do not - exist on lines by themselves). - - Requirements: - All of the requirements listed in BaseStringSplitter's docstring in - addition to the requirements listed below: - - * The line is a return/yield statement, which returns/yields a string. - OR - * The line is part of a ternary expression (e.g. `x = y if cond else - z`) such that the line starts with `else `, where is - some string. - OR - * The line is an assert statement, which ends with a string. - OR - * The line is an assignment statement (e.g. `x = ` or `x += - `) such that the variable is being assigned the value of some - string. - OR - * The line is a dictionary key assignment where some valid key is being - assigned the value of some string. - - Transformations: - The chosen string is wrapped in parentheses and then split at the LPAR. - - We then have one line which ends with an LPAR and another line that - starts with the chosen string. The latter line is then split again at - the RPAR. This results in the RPAR (and possibly a trailing comma) - being placed on its own line. - - NOTE: If any leaves exist to the right of the chosen string (except - for a trailing comma, which would be placed after the RPAR), those - leaves are placed inside the parentheses. In effect, the chosen - string is not necessarily being "wrapped" by parentheses. We can, - however, count on the LPAR being placed directly before the chosen - string. - - In other words, StringParenWrapper creates "atom" strings. These - can then be split again by StringSplitter, if necessary. - - Collaborations: - In the event that a string line split by StringParenWrapper is - changed such that it no longer needs to be given its own line, - StringParenWrapper relies on StringParenStripper to clean up the - parentheses it created. - """ - - def do_splitter_match(self, line: Line) -> TMatchResult: - LL = line.leaves - - string_idx = ( - self._return_match(LL) - or self._else_match(LL) - or self._assert_match(LL) - or self._assign_match(LL) - or self._dict_match(LL) - ) - - if string_idx is not None: - string_value = line.leaves[string_idx].value - # If the string has no spaces... - if " " not in string_value: - # And will still violate the line length limit when split... - max_string_length = self.line_length - ((line.depth + 1) * 4) - if len(string_value) > max_string_length: - # And has no associated custom splits... - if not self.has_custom_splits(string_value): - # Then we should NOT put this string on its own line. - return TErr( - "We do not wrap long strings in parentheses when the" - " resultant line would still be over the specified line" - " length and can't be split further by StringSplitter." - ) - return Ok(string_idx) - - return TErr("This line does not contain any non-atomic strings.") - - @staticmethod - def _return_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the return/yield statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of a return/yield statement and the first leaf - # contains either the "return" or "yield" keywords... - if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ - 0 - ].value in ["return", "yield"]: - is_valid_index = is_valid_index_factory(LL) - - idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 - # The next visible leaf MUST contain a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - return idx - - return None - - @staticmethod - def _else_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the ternary expression - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of a ternary expression and the first leaf - # contains the "else" keyword... - if ( - parent_type(LL[0]) == syms.test - and LL[0].type == token.NAME - and LL[0].value == "else" - ): - is_valid_index = is_valid_index_factory(LL) - - idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 - # The next visible leaf MUST contain a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - return idx - - return None - - @staticmethod - def _assert_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the assert statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of an assert statement and the first leaf - # contains the "assert" keyword... - if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find a comma... - if leaf.type == token.COMMA: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That comma MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - @staticmethod - def _assign_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the assignment statement - requirements listed in the 'Requirements' section of this classes' - docstring. - OR - None, otherwise. - """ - # If this line is apart of an expression statement or is a function - # argument AND the first leaf contains a variable name... - if ( - parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] - and LL[0].type == token.NAME - ): - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find either an '=' or '+=' symbol... - if leaf.type in [token.EQUAL, token.PLUSEQUAL]: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That symbol MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # The next leaf MAY be a comma iff this line is apart - # of a function argument... - if ( - parent_type(LL[0]) == syms.argument - and is_valid_index(idx) - and LL[idx].type == token.COMMA - ): - idx += 1 - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - @staticmethod - def _dict_match(LL: List[Leaf]) -> Optional[int]: - """ - Returns: - string_idx such that @LL[string_idx] is equal to our target (i.e. - matched) string, if this line matches the dictionary key assignment - statement requirements listed in the 'Requirements' section of this - classes' docstring. - OR - None, otherwise. - """ - # If this line is apart of a dictionary key assignment... - if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: - is_valid_index = is_valid_index_factory(LL) - - for (i, leaf) in enumerate(LL): - # We MUST find a colon... - if leaf.type == token.COLON: - idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 - - # That colon MUST be followed by a string... - if is_valid_index(idx) and LL[idx].type == token.STRING: - string_idx = idx - - # Skip the string trailer, if one exists. - string_parser = StringParser() - idx = string_parser.parse(LL, string_idx) - - # That string MAY be followed by a comma... - if is_valid_index(idx) and LL[idx].type == token.COMMA: - idx += 1 - - # But no more leaves are allowed... - if not is_valid_index(idx): - return string_idx - - return None - - def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: - LL = line.leaves - - is_valid_index = is_valid_index_factory(LL) - insert_str_child = insert_str_child_factory(LL[string_idx]) - - comma_idx = -1 - ends_with_comma = False - if LL[comma_idx].type == token.COMMA: - ends_with_comma = True - - leaves_to_steal_comments_from = [LL[string_idx]] - if ends_with_comma: - leaves_to_steal_comments_from.append(LL[comma_idx]) - - # --- First Line - first_line = line.clone() - left_leaves = LL[:string_idx] - - # We have to remember to account for (possibly invisible) LPAR and RPAR - # leaves that already wrapped the target string. If these leaves do - # exist, we will replace them with our own LPAR and RPAR leaves. - old_parens_exist = False - if left_leaves and left_leaves[-1].type == token.LPAR: - old_parens_exist = True - leaves_to_steal_comments_from.append(left_leaves[-1]) - left_leaves.pop() - - append_leaves(first_line, line, left_leaves) - - lpar_leaf = Leaf(token.LPAR, "(") - if old_parens_exist: - replace_child(LL[string_idx - 1], lpar_leaf) - else: - insert_str_child(lpar_leaf) - first_line.append(lpar_leaf) - - # We throw inline comments that were originally to the right of the - # target string to the top line. They will now be shown to the right of - # the LPAR. - for leaf in leaves_to_steal_comments_from: - for comment_leaf in line.comments_after(leaf): - first_line.append(comment_leaf, preformatted=True) - - yield Ok(first_line) - - # --- Middle (String) Line - # We only need to yield one (possibly too long) string line, since the - # `StringSplitter` will break it down further if necessary. - string_value = LL[string_idx].value - string_line = Line( - mode=line.mode, - depth=line.depth + 1, - inside_brackets=True, - should_split_rhs=line.should_split_rhs, - magic_trailing_comma=line.magic_trailing_comma, - ) - string_leaf = Leaf(token.STRING, string_value) - insert_str_child(string_leaf) - string_line.append(string_leaf) - - old_rpar_leaf = None - if is_valid_index(string_idx + 1): - right_leaves = LL[string_idx + 1 :] - if ends_with_comma: - right_leaves.pop() - - if old_parens_exist: - assert ( - right_leaves and right_leaves[-1].type == token.RPAR - ), "Apparently, old parentheses do NOT exist?!" - old_rpar_leaf = right_leaves.pop() - - append_leaves(string_line, line, right_leaves) - - yield Ok(string_line) - - # --- Last Line - last_line = line.clone() - last_line.bracket_tracker = first_line.bracket_tracker - - new_rpar_leaf = Leaf(token.RPAR, ")") - if old_rpar_leaf is not None: - replace_child(old_rpar_leaf, new_rpar_leaf) - else: - insert_str_child(new_rpar_leaf) - last_line.append(new_rpar_leaf) - - # If the target string ended with a comma, we place this comma to the - # right of the RPAR on the last line. - if ends_with_comma: - comma_leaf = Leaf(token.COMMA, ",") - replace_child(LL[comma_idx], comma_leaf) - last_line.append(comma_leaf) - - yield Ok(last_line) - - -class StringParser: - """ - A state machine that aids in parsing a string's "trailer", which can be - either non-existent, an old-style formatting sequence (e.g. `% varX` or `% - (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, - varY)`). - - NOTE: A new StringParser object MUST be instantiated for each string - trailer we need to parse. - - Examples: - We shall assume that `line` equals the `Line` object that corresponds - to the following line of python code: - ``` - x = "Some {}.".format("String") + some_other_string - ``` - - Furthermore, we will assume that `string_idx` is some index such that: - ``` - assert line.leaves[string_idx].value == "Some {}." - ``` - - The following code snippet then holds: - ``` - string_parser = StringParser() - idx = string_parser.parse(line.leaves, string_idx) - assert line.leaves[idx].type == token.PLUS - ``` - """ - - DEFAULT_TOKEN = -1 - - # String Parser States - START = 1 - DOT = 2 - NAME = 3 - PERCENT = 4 - SINGLE_FMT_ARG = 5 - LPAR = 6 - RPAR = 7 - DONE = 8 - - # Lookup Table for Next State - _goto: Dict[Tuple[ParserState, NodeType], ParserState] = { - # A string trailer may start with '.' OR '%'. - (START, token.DOT): DOT, - (START, token.PERCENT): PERCENT, - (START, DEFAULT_TOKEN): DONE, - # A '.' MUST be followed by an attribute or method name. - (DOT, token.NAME): NAME, - # A method name MUST be followed by an '(', whereas an attribute name - # is the last symbol in the string trailer. - (NAME, token.LPAR): LPAR, - (NAME, DEFAULT_TOKEN): DONE, - # A '%' symbol can be followed by an '(' or a single argument (e.g. a - # string or variable name). - (PERCENT, token.LPAR): LPAR, - (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, - # If a '%' symbol is followed by a single argument, that argument is - # the last leaf in the string trailer. - (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, - # If present, a ')' symbol is the last symbol in a string trailer. - # (NOTE: LPARS and nested RPARS are not included in this lookup table, - # since they are treated as a special case by the parsing logic in this - # classes' implementation.) - (RPAR, DEFAULT_TOKEN): DONE, - } - - def __init__(self) -> None: - self._state = self.START - self._unmatched_lpars = 0 - - def parse(self, leaves: List[Leaf], string_idx: int) -> int: - """ - Pre-conditions: - * @leaves[@string_idx].type == token.STRING - - Returns: - The index directly after the last leaf which is apart of the string - trailer, if a "trailer" exists. - OR - @string_idx + 1, if no string "trailer" exists. - """ - assert leaves[string_idx].type == token.STRING - - idx = string_idx + 1 - while idx < len(leaves) and self._next_state(leaves[idx]): - idx += 1 - return idx - - def _next_state(self, leaf: Leaf) -> bool: - """ - Pre-conditions: - * On the first call to this function, @leaf MUST be the leaf that - was directly after the string leaf in question (e.g. if our target - string is `line.leaves[i]` then the first call to this method must - be `line.leaves[i + 1]`). - * On the next call to this function, the leaf parameter passed in - MUST be the leaf directly following @leaf. - - Returns: - True iff @leaf is apart of the string's trailer. - """ - # We ignore empty LPAR or RPAR leaves. - if is_empty_par(leaf): - return True - - next_token = leaf.type - if next_token == token.LPAR: - self._unmatched_lpars += 1 - - current_state = self._state - - # The LPAR parser state is a special case. We will return True until we - # find the matching RPAR token. - if current_state == self.LPAR: - if next_token == token.RPAR: - self._unmatched_lpars -= 1 - if self._unmatched_lpars == 0: - self._state = self.RPAR - # Otherwise, we use a lookup table to determine the next state. - else: - # If the lookup table matches the current state to the next - # token, we use the lookup table. - if (current_state, next_token) in self._goto: - self._state = self._goto[current_state, next_token] - else: - # Otherwise, we check if a the current state was assigned a - # default. - if (current_state, self.DEFAULT_TOKEN) in self._goto: - self._state = self._goto[current_state, self.DEFAULT_TOKEN] - # If no default has been assigned, then this parser has a logic - # error. - else: - raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") - - if self._state == self.DONE: - return False - - return True - - -def TErr(err_msg: str) -> Err[CannotTransform]: - """(T)ransform Err - - Convenience function used when working with the TResult type. - """ - cant_transform = CannotTransform(err_msg) - return Err(cant_transform) - - -def contains_pragma_comment(comment_list: List[Leaf]) -> bool: - """ - Returns: - True iff one of the comments in @comment_list is a pragma used by one - of the more common static analysis tools for python (e.g. mypy, flake8, - pylint). - """ - for comment in comment_list: - if comment.value.startswith(("# type:", "# noqa", "# pylint:")): - return True - - return False - - -def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: - """ - Factory for a convenience function that is used to orphan @string_leaf - and then insert multiple new leaves into the same part of the node - structure that @string_leaf had originally occupied. - - Examples: - Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = - string_leaf.parent`. Assume the node `N` has the following - original structure: - - Node( - expr_stmt, [ - Leaf(NAME, 'x'), - Leaf(EQUAL, '='), - Leaf(STRING, '"foo"'), - ] - ) - - We then run the code snippet shown below. - ``` - insert_str_child = insert_str_child_factory(string_leaf) - - lpar = Leaf(token.LPAR, '(') - insert_str_child(lpar) - - bar = Leaf(token.STRING, '"bar"') - insert_str_child(bar) - - rpar = Leaf(token.RPAR, ')') - insert_str_child(rpar) - ``` - - After which point, it follows that `string_leaf.parent is None` and - the node `N` now has the following structure: - - Node( - expr_stmt, [ - Leaf(NAME, 'x'), - Leaf(EQUAL, '='), - Leaf(LPAR, '('), - Leaf(STRING, '"bar"'), - Leaf(RPAR, ')'), - ] - ) - """ - string_parent = string_leaf.parent - string_child_idx = string_leaf.remove() - - def insert_str_child(child: LN) -> None: - nonlocal string_child_idx - - assert string_parent is not None - assert string_child_idx is not None - - string_parent.insert_child(string_child_idx, child) - string_child_idx += 1 - - return insert_str_child - - -def has_triple_quotes(string: str) -> bool: - """ - Returns: - True iff @string starts with three quotation characters. - """ - raw_string = string.lstrip(STRING_PREFIX_CHARS) - return raw_string[:3] in {'"""', "'''"} - - -def parent_type(node: Optional[LN]) -> Optional[NodeType]: - """ - Returns: - @node.parent.type, if @node is not None and has a parent. - OR - None, otherwise. - """ - if node is None or node.parent is None: - return None - - return node.parent.type - - -def is_empty_par(leaf: Leaf) -> bool: - return is_empty_lpar(leaf) or is_empty_rpar(leaf) - - -def is_empty_lpar(leaf: Leaf) -> bool: - return leaf.type == token.LPAR and leaf.value == "" - - -def is_empty_rpar(leaf: Leaf) -> bool: - return leaf.type == token.RPAR and leaf.value == "" - - -def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: - """ - Examples: - ``` - my_list = [1, 2, 3] - - is_valid_index = is_valid_index_factory(my_list) - - assert is_valid_index(0) - assert is_valid_index(2) - - assert not is_valid_index(3) - assert not is_valid_index(-1) - ``` - """ - - def is_valid_index(idx: int) -> bool: - """ - Returns: - True iff @idx is positive AND seq[@idx] does NOT raise an - IndexError. - """ - return 0 <= idx < len(seq) - - return is_valid_index - - -def line_to_string(line: Line) -> str: - """Returns the string representation of @line. - - WARNING: This is known to be computationally expensive. - """ - return str(line).strip("\n") - - -def append_leaves( - new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False -) -> None: - """ - Append leaves (taken from @old_line) to @new_line, making sure to fix the - underlying Node structure where appropriate. - - All of the leaves in @leaves are duplicated. The duplicates are then - appended to @new_line and used to replace their originals in the underlying - Node structure. Any comments attached to the old leaves are reattached to - the new leaves. - - Pre-conditions: - set(@leaves) is a subset of set(@old_line.leaves). - """ - for old_leaf in leaves: - new_leaf = Leaf(old_leaf.type, old_leaf.value) - replace_child(old_leaf, new_leaf) - new_line.append(new_leaf, preformatted=preformatted) - - for comment_leaf in old_line.comments_after(old_leaf): - new_line.append(comment_leaf, preformatted=True) - - -def replace_child(old_child: LN, new_child: LN) -> None: - """ - Side Effects: - * If @old_child.parent is set, replace @old_child with @new_child in - @old_child's underlying Node structure. - OR - * Otherwise, this function does nothing. - """ - parent = old_child.parent - if not parent: - return - - child_idx = old_child.remove() - if child_idx is not None: - parent.insert_child(child_idx, new_child) - - -def get_string_prefix(string: str) -> str: - """ - Pre-conditions: - * assert_is_leaf_string(@string) - - Returns: - @string's prefix (e.g. '', 'r', 'f', or 'rf'). - """ - assert_is_leaf_string(string) - - prefix = "" - prefix_idx = 0 - while string[prefix_idx] in STRING_PREFIX_CHARS: - prefix += string[prefix_idx].lower() - prefix_idx += 1 - - return prefix - - -def assert_is_leaf_string(string: str) -> None: - """ - Checks the pre-condition that @string has the format that you would expect - of `leaf.value` where `leaf` is some Leaf such that `leaf.type == - token.STRING`. A more precise description of the pre-conditions that are - checked are listed below. - - Pre-conditions: - * @string starts with either ', ", ', or " where - `set()` is some subset of `set(STRING_PREFIX_CHARS)`. - * @string ends with a quote character (' or "). - - Raises: - AssertionError(...) if the pre-conditions listed above are not - satisfied. - """ - dquote_idx = string.find('"') - squote_idx = string.find("'") - if -1 in [dquote_idx, squote_idx]: - quote_idx = max(dquote_idx, squote_idx) - else: - quote_idx = min(squote_idx, dquote_idx) - - assert ( - 0 <= quote_idx < len(string) - 1 - ), f"{string!r} is missing a starting quote character (' or \")." - assert string[-1] in ( - "'", - '"', - ), f"{string!r} is missing an ending quote character (' or \")." - assert set(string[:quote_idx]).issubset( - set(STRING_PREFIX_CHARS) - ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." - - -def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: - """Split line into many lines, starting with the first matching bracket pair. - - Note: this usually looks weird, only use this for function definitions. - Prefer RHS otherwise. This is why this function is not symmetrical with - :func:`right_hand_split` which also handles optional parentheses. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = head_leaves - matching_bracket: Optional[Leaf] = None - for leaf in line.leaves: - if ( - current_leaves is body_leaves - and leaf.type in CLOSING_BRACKETS - and leaf.opening_bracket is matching_bracket - ): - current_leaves = tail_leaves if body_leaves else head_leaves - current_leaves.append(leaf) - if current_leaves is head_leaves: - if leaf.type in OPENING_BRACKETS: - matching_bracket = leaf - current_leaves = body_leaves - if not matching_bracket: - raise CannotSplit("No brackets found") - - head = bracket_split_build_line(head_leaves, line, matching_bracket) - body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, matching_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - for result in (head, body, tail): - if result: - yield result - - -def right_hand_split( - line: Line, - line_length: int, - features: Collection[Feature] = (), - omit: Collection[LeafID] = (), -) -> Iterator[Line]: - """Split line into many lines, starting with the last matching bracket pair. - - If the split was by optional parentheses, attempt splitting without them, too. - `omit` is a collection of closing bracket IDs that shouldn't be considered for - this split. - - Note: running this function modifies `bracket_depth` on the leaves of `line`. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = tail_leaves - opening_bracket: Optional[Leaf] = None - closing_bracket: Optional[Leaf] = None - for leaf in reversed(line.leaves): - if current_leaves is body_leaves: - if leaf is opening_bracket: - current_leaves = head_leaves if body_leaves else tail_leaves - current_leaves.append(leaf) - if current_leaves is tail_leaves: - if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf - current_leaves = body_leaves - if not (opening_bracket and closing_bracket and head_leaves): - # If there is no opening or closing_bracket that means the split failed and - # all content is in the tail. Otherwise, if `head_leaves` are empty, it means - # the matching `opening_bracket` wasn't available on `line` anymore. - raise CannotSplit("No brackets found") - - tail_leaves.reverse() - body_leaves.reverse() - head_leaves.reverse() - head = bracket_split_build_line(head_leaves, line, opening_bracket) - body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, opening_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - if ( - Feature.FORCE_OPTIONAL_PARENTHESES not in features - # the opening bracket is an optional paren - and opening_bracket.type == token.LPAR - and not opening_bracket.value - # the closing bracket is an optional paren - and closing_bracket.type == token.RPAR - and not closing_bracket.value - # it's not an import (optional parens are the only thing we can split on - # in this case; attempting a split without them is a waste of time) - and not line.is_import - # there are no standalone comments in the body - and not body.contains_standalone_comments(0) - # and we can actually remove the parens - and can_omit_invisible_parens(body, line_length, omit_on_explode=omit) - ): - omit = {id(closing_bracket), *omit} - try: - yield from right_hand_split(line, line_length, features=features, omit=omit) - return - - except CannotSplit: - if not ( - can_be_split(body) - or is_line_short_enough(body, line_length=line_length) - ): - raise CannotSplit( - "Splitting failed, body is still too long and can't be split." - ) - - elif head.contains_multiline_strings() or tail.contains_multiline_strings(): - raise CannotSplit( - "The current optional pair of parentheses is bound to fail to" - " satisfy the splitting algorithm because the head or the tail" - " contains multiline strings which by definition never fit one" - " line." - ) - - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - for result in (head, body, tail): - if result: - yield result - - -def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: - """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. - - Do nothing otherwise. - - A left- or right-hand split is based on a pair of brackets. Content before - (and including) the opening bracket is left on one line, content inside the - brackets is put on a separate line, and finally content starting with and - following the closing bracket is put on a separate line. - - Those are called `head`, `body`, and `tail`, respectively. If the split - produced the same line (all content in `head`) or ended up with an empty `body` - and the `tail` is just the closing bracket, then it's considered failed. - """ - tail_len = len(str(tail).strip()) - if not body: - if tail_len == 0: - raise CannotSplit("Splitting brackets produced the same line") - - elif tail_len < 3: - raise CannotSplit( - f"Splitting brackets on an empty body to save {tail_len} characters is" - " not worth it" - ) - - -def bracket_split_build_line( - leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False -) -> Line: - """Return a new line with given `leaves` and respective comments from `original`. - - If `is_body` is True, the result line is one-indented inside brackets and as such - has its first leaf's prefix normalized and a trailing comma added when expected. - """ - result = Line(mode=original.mode, depth=original.depth) - if is_body: - result.inside_brackets = True - result.depth += 1 - if leaves: - # Since body is a new indent level, remove spurious leading whitespace. - normalize_prefix(leaves[0], inside_brackets=True) - # Ensure a trailing comma for imports and standalone function arguments, but - # be careful not to add one after any comments or within type annotations. - no_commas = ( - original.is_def - and opening_bracket.value == "(" - and not any(leaf.type == token.COMMA for leaf in leaves) - ) - - if original.is_import or no_commas: - for i in range(len(leaves) - 1, -1, -1): - if leaves[i].type == STANDALONE_COMMENT: - continue - - if leaves[i].type != token.COMMA: - new_comma = Leaf(token.COMMA, ",") - leaves.insert(i + 1, new_comma) - break - - # Populate the line - for leaf in leaves: - result.append(leaf, preformatted=True) - for comment_after in original.comments_after(leaf): - result.append(comment_after, preformatted=True) - if is_body and should_split_line(result, opening_bracket): - result.should_split_rhs = True - return result - - -def dont_increase_indentation(split_func: Transformer) -> Transformer: - """Normalize prefix of the first leaf in every line returned by `split_func`. - - This is a decorator over relevant split functions. - """ - - @wraps(split_func) - def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - for line in split_func(line, features): - normalize_prefix(line.leaves[0], inside_brackets=True) - yield line - - return split_wrapper - - -@dont_increase_indentation -def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - """Split according to delimiters of the highest priority. - - If the appropriate Features are given, the split will add trailing commas - also in function signatures and calls that contain `*` and `**`. - """ - try: - last_leaf = line.leaves[-1] - except IndexError: - raise CannotSplit("Line empty") - - bt = line.bracket_tracker - try: - delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) - except ValueError: - raise CannotSplit("No delimiters found") - - if delimiter_priority == DOT_PRIORITY: - if bt.delimiter_count_with_priority(delimiter_priority) == 1: - raise CannotSplit("Splitting a single attribute from its owner looks wrong") - - current_line = Line( - mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets - ) - lowest_depth = sys.maxsize - trailing_comma_safe = True - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line( - mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets - ) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - lowest_depth = min(lowest_depth, leaf.bracket_depth) - if leaf.bracket_depth == lowest_depth: - if is_vararg(leaf, within={syms.typedargslist}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features - ) - elif is_vararg(leaf, within={syms.arglist, syms.argument}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features - ) - - leaf_priority = bt.delimiters.get(id(leaf)) - if leaf_priority == delimiter_priority: - yield current_line - - current_line = Line( - mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets - ) - if current_line: - if ( - trailing_comma_safe - and delimiter_priority == COMMA_PRIORITY - and current_line.leaves[-1].type != token.COMMA - and current_line.leaves[-1].type != STANDALONE_COMMENT - ): - new_comma = Leaf(token.COMMA, ",") - current_line.append(new_comma) - yield current_line - - -@dont_increase_indentation -def standalone_comment_split( - line: Line, features: Collection[Feature] = () -) -> Iterator[Line]: - """Split standalone comments from the rest of the line.""" - if not line.contains_standalone_comments(0): - raise CannotSplit("Line does not have any standalone comments") - - current_line = Line( - mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets - ) - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line( - line.mode, depth=line.depth, inside_brackets=line.inside_brackets - ) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - if current_line: - yield current_line - - -def is_import(leaf: Leaf) -> bool: - """Return True if the given leaf starts an import statement.""" - p = leaf.parent - t = leaf.type - v = leaf.value - return bool( - t == token.NAME - and ( - (v == "import" and p and p.type == syms.import_name) - or (v == "from" and p and p.type == syms.import_from) - ) - ) - - -def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: - """Return True if the given leaf is a special comment. - Only returns true for type comments for now.""" - t = leaf.type - v = leaf.value - return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) - - -def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: - """Leave existing extra newlines if not `inside_brackets`. Remove everything - else. - - Note: don't use backslashes for formatting or you'll lose your voting rights. - """ - if not inside_brackets: - spl = leaf.prefix.split("#") - if "\\" not in spl[0]: - nl_count = spl[-1].count("\n") - if len(spl) > 1: - nl_count -= 1 - leaf.prefix = "\n" * nl_count - return - - leaf.prefix = "" - - -def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None: - """Make all string prefixes lowercase. - - If remove_u_prefix is given, also removes any u prefix from the string. - - Note: Mutates its argument. - """ - match = re.match(r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", leaf.value, re.DOTALL) - assert match is not None, f"failed to match string {leaf.value!r}" - orig_prefix = match.group(1) - new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u") - if remove_u_prefix: - new_prefix = new_prefix.replace("u", "") - leaf.value = f"{new_prefix}{match.group(2)}" - - -def normalize_string_quotes(leaf: Leaf) -> None: - """Prefer double quotes but only if it doesn't cause more escaping. - - Adds or removes backslashes as appropriate. Doesn't parse and fix - strings nested in f-strings (yet). - - Note: Mutates its argument. - """ - value = leaf.value.lstrip(STRING_PREFIX_CHARS) - if value[:3] == '"""': - return - - elif value[:3] == "'''": - orig_quote = "'''" - new_quote = '"""' - elif value[0] == '"': - orig_quote = '"' - new_quote = "'" - else: - orig_quote = "'" - new_quote = '"' - first_quote_pos = leaf.value.find(orig_quote) - if first_quote_pos == -1: - return # There's an internal error - - prefix = leaf.value[:first_quote_pos] - unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") - escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") - escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") - body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)] - if "r" in prefix.casefold(): - if unescaped_new_quote.search(body): - # There's at least one unescaped new_quote in this raw string - # so converting is impossible - return - - # Do not introduce or remove backslashes in raw strings - new_body = body - else: - # remove unnecessary escapes - new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) - if body != new_body: - # Consider the string without unnecessary escapes as the original - body = new_body - leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}" - new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) - new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) - if "f" in prefix.casefold(): - matches = re.findall( - r""" - (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { - ([^{].*?) # contents of the brackets except if begins with {{ - \}(?:[^}]|$) # A } followed by end of the string or a non-} - """, - new_body, - re.VERBOSE, - ) - for m in matches: - if "\\" in str(m): - # Do not introduce backslashes in interpolated expressions - return - - if new_quote == '"""' and new_body[-1:] == '"': - # edge case: - new_body = new_body[:-1] + '\\"' - orig_escape_count = body.count("\\") - new_escape_count = new_body.count("\\") - if new_escape_count > orig_escape_count: - return # Do not introduce more escaping - - if new_escape_count == orig_escape_count and orig_quote == '"': - return # Prefer double quotes - - leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}" - - -def normalize_numeric_literal(leaf: Leaf) -> None: - """Normalizes numeric (float, int, and complex) literals. - - All letters used in the representation are normalized to lowercase (except - in Python 2 long literals). - """ - text = leaf.value.lower() - if text.startswith(("0o", "0b")): - # Leave octal and binary literals alone. - pass - elif text.startswith("0x"): - text = format_hex(text) - elif "e" in text: - text = format_scientific_notation(text) - elif text.endswith(("j", "l")): - text = format_long_or_complex_number(text) - else: - text = format_float_or_int_string(text) - leaf.value = text - - -def format_hex(text: str) -> str: - """ - Formats a hexadecimal string like "0x12B3" - """ - before, after = text[:2], text[2:] - return f"{before}{after.upper()}" - - -def format_scientific_notation(text: str) -> str: - """Formats a numeric string utilizing scentific notation""" - before, after = text.split("e") - sign = "" - if after.startswith("-"): - after = after[1:] - sign = "-" - elif after.startswith("+"): - after = after[1:] - before = format_float_or_int_string(before) - return f"{before}e{sign}{after}" - - -def format_long_or_complex_number(text: str) -> str: - """Formats a long or complex string like `10L` or `10j`""" - number = text[:-1] - suffix = text[-1] - # Capitalize in "2L" because "l" looks too similar to "1". - if suffix == "l": - suffix = "L" - return f"{format_float_or_int_string(number)}{suffix}" - - -def format_float_or_int_string(text: str) -> str: - """Formats a float string like "1.0".""" - if "." not in text: - return text - - before, after = text.split(".") - return f"{before or 0}.{after or 0}" - - -def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: - """Make existing optional parentheses invisible or create new ones. - - `parens_after` is a set of string leaf values immediately after which parens - should be put. - - Standardizes on visible parentheses for single-element tuples, and keeps - existing visible parentheses for other tuples and generator expressions. - """ - for pc in list_comments(node.prefix, is_endmarker=False): - if pc.value in FMT_OFF: - # This `node` has a prefix with `# fmt: off`, don't mess with parens. - return - check_lpar = False - for index, child in enumerate(list(node.children)): - # Fixes a bug where invisible parens are not properly stripped from - # assignment statements that contain type annotations. - if isinstance(child, Node) and child.type == syms.annassign: - normalize_invisible_parens(child, parens_after=parens_after) - - # Add parentheses around long tuple unpacking in assignments. - if ( - index == 0 - and isinstance(child, Node) - and child.type == syms.testlist_star_expr - ): - check_lpar = True - - if check_lpar: - if child.type == syms.atom: - if maybe_make_parens_invisible_in_atom(child, parent=node): - wrap_in_parentheses(node, child, visible=False) - elif is_one_tuple(child): - wrap_in_parentheses(node, child, visible=True) - elif node.type == syms.import_from: - # "import from" nodes store parentheses directly as part of - # the statement - if child.type == token.LPAR: - # make parentheses invisible - child.value = "" # type: ignore - node.children[-1].value = "" # type: ignore - elif child.type != token.STAR: - # insert invisible parentheses - node.insert_child(index, Leaf(token.LPAR, "")) - node.append_child(Leaf(token.RPAR, "")) - break - - elif not (isinstance(child, Leaf) and is_multiline_string(child)): - wrap_in_parentheses(node, child, visible=False) - - check_lpar = isinstance(child, Leaf) and child.value in parens_after - - -def normalize_fmt_off(node: Node) -> None: - """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" - try_again = True - while try_again: - try_again = convert_one_fmt_off_pair(node) - - -def convert_one_fmt_off_pair(node: Node) -> bool: - """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. - - Returns True if a pair was converted. - """ - for leaf in node.leaves(): - previous_consumed = 0 - for comment in list_comments(leaf.prefix, is_endmarker=False): - if comment.value not in FMT_PASS: - previous_consumed = comment.consumed - continue - # We only want standalone comments. If there's no previous leaf or - # the previous leaf is indentation, it's a standalone comment in - # disguise. - if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT: - prev = preceding_leaf(leaf) - if prev: - if comment.value in FMT_OFF and prev.type not in WHITESPACE: - continue - if comment.value in FMT_SKIP and prev.type in WHITESPACE: - continue - - ignored_nodes = list(generate_ignored_nodes(leaf, comment)) - if not ignored_nodes: - continue - - first = ignored_nodes[0] # Can be a container node with the `leaf`. - parent = first.parent - prefix = first.prefix - first.prefix = prefix[comment.consumed :] - hidden_value = "".join(str(n) for n in ignored_nodes) - if comment.value in FMT_OFF: - hidden_value = comment.value + "\n" + hidden_value - if comment.value in FMT_SKIP: - hidden_value += " " + comment.value - if hidden_value.endswith("\n"): - # That happens when one of the `ignored_nodes` ended with a NEWLINE - # leaf (possibly followed by a DEDENT). - hidden_value = hidden_value[:-1] - first_idx: Optional[int] = None - for ignored in ignored_nodes: - index = ignored.remove() - if first_idx is None: - first_idx = index - assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" - assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" - parent.insert_child( - first_idx, - Leaf( - STANDALONE_COMMENT, - hidden_value, - prefix=prefix[:previous_consumed] + "\n" * comment.newlines, - ), - ) - return True - - return False - - -def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: - """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. - - If comment is skip, returns leaf only. - Stops at the end of the block. - """ - container: Optional[LN] = container_of(leaf) - if comment.value in FMT_SKIP: - prev_sibling = leaf.prev_sibling - if comment.value in leaf.prefix and prev_sibling is not None: - leaf.prefix = leaf.prefix.replace(comment.value, "") - siblings = [prev_sibling] - while ( - "\n" not in prev_sibling.prefix - and prev_sibling.prev_sibling is not None - ): - prev_sibling = prev_sibling.prev_sibling - siblings.insert(0, prev_sibling) - for sibling in siblings: - yield sibling - elif leaf.parent is not None: - yield leaf.parent - return - while container is not None and container.type != token.ENDMARKER: - if is_fmt_on(container): - return - - # fix for fmt: on in children - if contains_fmt_on_at_column(container, leaf.column): - for child in container.children: - if contains_fmt_on_at_column(child, leaf.column): - return - yield child - else: - yield container - container = container.next_sibling - - -def is_fmt_on(container: LN) -> bool: - """Determine whether formatting is switched on within a container. - Determined by whether the last `# fmt:` comment is `on` or `off`. - """ - fmt_on = False - for comment in list_comments(container.prefix, is_endmarker=False): - if comment.value in FMT_ON: - fmt_on = True - elif comment.value in FMT_OFF: - fmt_on = False - return fmt_on - - -def contains_fmt_on_at_column(container: LN, column: int) -> bool: - """Determine if children at a given column have formatting switched on.""" - for child in container.children: - if ( - isinstance(child, Node) - and first_leaf_column(child) == column - or isinstance(child, Leaf) - and child.column == column - ): - if is_fmt_on(child): - return True - - return False - - -def first_leaf_column(node: Node) -> Optional[int]: - """Returns the column of the first leaf child of a node.""" - for child in node.children: - if isinstance(child, Leaf): - return child.column - return None - - -def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: - """If it's safe, make the parens in the atom `node` invisible, recursively. - Additionally, remove repeated, adjacent invisible parens from the atom `node` - as they are redundant. - - Returns whether the node should itself be wrapped in invisible parentheses. - - """ - - if ( - node.type != syms.atom - or is_empty_tuple(node) - or is_one_tuple(node) - or (is_yield(node) and parent.type != syms.expr_stmt) - or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY - ): - return False - - if is_walrus_assignment(node): - if parent.type in [ - syms.annassign, - syms.expr_stmt, - syms.assert_stmt, - syms.return_stmt, - # these ones aren't useful to end users, but they do please fuzzers - syms.for_stmt, - syms.del_stmt, - ]: - return False - - first = node.children[0] - last = node.children[-1] - if first.type == token.LPAR and last.type == token.RPAR: - middle = node.children[1] - # make parentheses invisible - first.value = "" # type: ignore - last.value = "" # type: ignore - maybe_make_parens_invisible_in_atom(middle, parent=parent) - - if is_atom_with_invisible_parens(middle): - # Strip the invisible parens from `middle` by replacing - # it with the child in-between the invisible parens - middle.replace(middle.children[1]) - - return False - - return True - - -def is_atom_with_invisible_parens(node: LN) -> bool: - """Given a `LN`, determines whether it's an atom `node` with invisible - parens. Useful in dedupe-ing and normalizing parens. - """ - if isinstance(node, Leaf) or node.type != syms.atom: - return False - - first, last = node.children[0], node.children[-1] - return ( - isinstance(first, Leaf) - and first.type == token.LPAR - and first.value == "" - and isinstance(last, Leaf) - and last.type == token.RPAR - and last.value == "" - ) - - -def is_empty_tuple(node: LN) -> bool: - """Return True if `node` holds an empty tuple.""" - return ( - node.type == syms.atom - and len(node.children) == 2 - and node.children[0].type == token.LPAR - and node.children[1].type == token.RPAR - ) - - -def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: - """Returns `wrapped` if `node` is of the shape ( wrapped ). - - Parenthesis can be optional. Returns None otherwise""" - if len(node.children) != 3: - return None - - lpar, wrapped, rpar = node.children - if not (lpar.type == token.LPAR and rpar.type == token.RPAR): - return None - - return wrapped - - -def first_child_is_arith(node: Node) -> bool: - """Whether first child is an arithmetic or a binary arithmetic expression""" - expr_types = { - syms.arith_expr, - syms.shift_expr, - syms.xor_expr, - syms.and_expr, - } - return bool(node.children and node.children[0].type in expr_types) - - -def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: - """Wrap `child` in parentheses. - - This replaces `child` with an atom holding the parentheses and the old - child. That requires moving the prefix. - - If `visible` is False, the leaves will be valueless (and thus invisible). - """ - lpar = Leaf(token.LPAR, "(" if visible else "") - rpar = Leaf(token.RPAR, ")" if visible else "") - prefix = child.prefix - child.prefix = "" - index = child.remove() or 0 - new_child = Node(syms.atom, [lpar, child, rpar]) - new_child.prefix = prefix - parent.insert_child(index, new_child) - - -def is_one_tuple(node: LN) -> bool: - """Return True if `node` holds a tuple with one element, with or without parens.""" - if node.type == syms.atom: - gexp = unwrap_singleton_parenthesis(node) - if gexp is None or gexp.type != syms.testlist_gexp: - return False - - return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA - - return ( - node.type in IMPLICIT_TUPLE - and len(node.children) == 2 - and node.children[1].type == token.COMMA - ) - - -def is_walrus_assignment(node: LN) -> bool: - """Return True iff `node` is of the shape ( test := test )""" - inner = unwrap_singleton_parenthesis(node) - return inner is not None and inner.type == syms.namedexpr_test - - -def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: - """Return True iff `node` is a trailer valid in a simple decorator""" - return node.type == syms.trailer and ( - ( - len(node.children) == 2 - and node.children[0].type == token.DOT - and node.children[1].type == token.NAME - ) - # last trailer can be arguments - or ( - last - and len(node.children) == 3 - and node.children[0].type == token.LPAR - # and node.children[1].type == syms.argument - and node.children[2].type == token.RPAR - ) - ) - - -def is_simple_decorator_expression(node: LN) -> bool: - """Return True iff `node` could be a 'dotted name' decorator - - This function takes the node of the 'namedexpr_test' of the new decorator - grammar and test if it would be valid under the old decorator grammar. - - The old grammar was: decorator: @ dotted_name [arguments] NEWLINE - The new grammar is : decorator: @ namedexpr_test NEWLINE - """ - if node.type == token.NAME: - return True - if node.type == syms.power: - if node.children: - return ( - node.children[0].type == token.NAME - and all(map(is_simple_decorator_trailer, node.children[1:-1])) - and ( - len(node.children) < 2 - or is_simple_decorator_trailer(node.children[-1], last=True) - ) - ) - return False - - -def is_yield(node: LN) -> bool: - """Return True if `node` holds a `yield` or `yield from` expression.""" - if node.type == syms.yield_expr: - return True - - if node.type == token.NAME and node.value == "yield": # type: ignore - return True - - if node.type != syms.atom: - return False - - if len(node.children) != 3: - return False - - lpar, expr, rpar = node.children - if lpar.type == token.LPAR and rpar.type == token.RPAR: - return is_yield(expr) - - return False - - -def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: - """Return True if `leaf` is a star or double star in a vararg or kwarg. - - If `within` includes VARARGS_PARENTS, this applies to function signatures. - If `within` includes UNPACKING_PARENTS, it applies to right hand-side - extended iterable unpacking (PEP 3132) and additional unpacking - generalizations (PEP 448). - """ - if leaf.type not in VARARGS_SPECIALS or not leaf.parent: - return False - - p = leaf.parent - if p.type == syms.star_expr: - # Star expressions are also used as assignment targets in extended - # iterable unpacking (PEP 3132). See what its parent is instead. - if not p.parent: - return False - - p = p.parent - - return p.type in within - - -def is_multiline_string(leaf: Leaf) -> bool: - """Return True if `leaf` is a multiline string that actually spans many lines.""" - return has_triple_quotes(leaf.value) and "\n" in leaf.value - - -def is_stub_suite(node: Node) -> bool: - """Return True if `node` is a suite with a stub body.""" - if ( - len(node.children) != 4 - or node.children[0].type != token.NEWLINE - or node.children[1].type != token.INDENT - or node.children[3].type != token.DEDENT - ): - return False - - return is_stub_body(node.children[2]) - - -def is_stub_body(node: LN) -> bool: - """Return True if `node` is a simple statement containing an ellipsis.""" - if not isinstance(node, Node) or node.type != syms.simple_stmt: - return False - - if len(node.children) != 2: - return False - - child = node.children[0] - return ( - child.type == syms.atom - and len(child.children) == 3 - and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) - ) - - -def max_delimiter_priority_in_atom(node: LN) -> Priority: - """Return maximum delimiter priority inside `node`. - - This is specific to atoms with contents contained in a pair of parentheses. - If `node` isn't an atom or there are no enclosing parentheses, returns 0. - """ - if node.type != syms.atom: - return 0 - - first = node.children[0] - last = node.children[-1] - if not (first.type == token.LPAR and last.type == token.RPAR): - return 0 - - bt = BracketTracker() - for c in node.children[1:-1]: - if isinstance(c, Leaf): - bt.mark(c) - else: - for leaf in c.leaves(): - bt.mark(leaf) - try: - return bt.max_delimiter_priority() - - except ValueError: - return 0 - - -def ensure_visible(leaf: Leaf) -> None: - """Make sure parentheses are visible. - - They could be invisible as part of some statements (see - :func:`normalize_invisible_parens` and :func:`visit_import_from`). - """ - if leaf.type == token.LPAR: - leaf.value = "(" - elif leaf.type == token.RPAR: - leaf.value = ")" - - -def should_split_line(line: Line, opening_bracket: Leaf) -> bool: - """Should `line` be immediately split with `delimiter_split()` after RHS?""" - - if not (opening_bracket.parent and opening_bracket.value in "[{("): - return False - - # We're essentially checking if the body is delimited by commas and there's more - # than one of them (we're excluding the trailing comma and if the delimiter priority - # is still commas, that means there's more). - exclude = set() - trailing_comma = False - try: - last_leaf = line.leaves[-1] - if last_leaf.type == token.COMMA: - trailing_comma = True - exclude.add(id(last_leaf)) - max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) - except (IndexError, ValueError): - return False - - return max_priority == COMMA_PRIORITY and ( - (line.mode.magic_trailing_comma and trailing_comma) - # always explode imports - or opening_bracket.parent.type in {syms.atom, syms.import_from} - ) - - -def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool: - """Return True if content between `opening` and `closing` looks like a one-tuple.""" - if opening.type != token.LPAR and closing.type != token.RPAR: - return False - - depth = closing.bracket_depth + 1 - for _opening_index, leaf in enumerate(leaves): - if leaf is opening: - break - - else: - raise LookupError("Opening paren not found in `leaves`") - - commas = 0 - _opening_index += 1 - for leaf in leaves[_opening_index:]: - if leaf is closing: - break - - bracket_depth = leaf.bracket_depth - if bracket_depth == depth and leaf.type == token.COMMA: - commas += 1 - if leaf.parent and leaf.parent.type in { - syms.arglist, - syms.typedargslist, - }: - commas += 1 - break - - return commas < 2 - - -def get_features_used(node: Node) -> Set[Feature]: +def get_features_used( # noqa: C901 + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[Feature]: """Return a set of (relatively) new Python features used in this file. Currently looking for: - f-strings; + - self-documenting expressions in f-strings (f"{x=}"); - underscores in numeric literals; - trailing commas after * or ** in function signatures and calls; - positional only arguments in function signatures and lambdas; - assignment expression; - relaxed decorator syntax; + - usage of __future__ flags (annotations); + - print / exec statements; """ features: Set[Feature] = set() + if future_imports: + features |= { + FUTURE_FLAG_TO_FEATURE[future_import] + for future_import in future_imports + if future_import in FUTURE_FLAG_TO_FEATURE + } + for n in node.pre_order(): - if n.type == token.STRING: - value_head = n.value[:2] # type: ignore + if is_string_token(n): + value_head = n.value[:2] if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: features.add(Feature.F_STRINGS) - - elif n.type == token.NUMBER: - if "_" in n.value: # type: ignore + if Feature.DEBUG_F_STRINGS not in features: + for span_beg, span_end in iter_fexpr_spans(n.value): + if n.value[span_beg : span_end - 1].rstrip().endswith("="): + features.add(Feature.DEBUG_F_STRINGS) + break + + elif is_number_token(n): + if "_" in n.value: features.add(Feature.NUMERIC_UNDERSCORES) elif n.type == token.SLASH: - if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}: + if n.parent and n.parent.type in { + syms.typedargslist, + syms.arglist, + syms.varargslist, + }: features.add(Feature.POS_ONLY_ARGUMENTS) elif n.type == token.COLONEQUAL: @@ -6025,89 +1195,51 @@ def get_features_used(node: Node) -> Set[Feature]: if argch.type in STARS: features.add(feature) - return features - - -def detect_target_versions(node: Node) -> Set[TargetVersion]: - """Detect the version to target based on the nodes used.""" - features = get_features_used(node) - return { - version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] - } - - -def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: - """Generate sets of closing bracket IDs that should be omitted in a RHS. - - Brackets can be omitted if the entire trailer up to and including - a preceding closing bracket fits in one line. + elif ( + n.type in {syms.return_stmt, syms.yield_expr} + and len(n.children) >= 2 + and n.children[1].type == syms.testlist_star_expr + and any(child.type == syms.star_expr for child in n.children[1].children) + ): + features.add(Feature.UNPACKING_ON_FLOW) - Yielded sets are cumulative (contain results of previous yields, too). First - set is empty, unless the line should explode, in which case bracket pairs until - the one that needs to explode are omitted. - """ + elif ( + n.type == syms.annassign + and len(n.children) >= 4 + and n.children[3].type == syms.testlist_star_expr + ): + features.add(Feature.ANN_ASSIGN_EXTENDED_RHS) - omit: Set[LeafID] = set() - if not line.magic_trailing_comma: - yield omit - - length = 4 * line.depth - opening_bracket: Optional[Leaf] = None - closing_bracket: Optional[Leaf] = None - inner_brackets: Set[LeafID] = set() - for index, leaf, leaf_length in enumerate_with_length(line, reversed=True): - length += leaf_length - if length > line_length: - break + elif ( + n.type == syms.except_clause + and len(n.children) >= 2 + and n.children[1].type == token.STAR + ): + features.add(Feature.EXCEPT_STAR) - has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) - if leaf.type == STANDALONE_COMMENT or has_inline_comment: - break + elif n.type in {syms.subscriptlist, syms.trailer} and any( + child.type == syms.star_expr for child in n.children + ): + features.add(Feature.VARIADIC_GENERICS) - if opening_bracket: - if leaf is opening_bracket: - opening_bracket = None - elif leaf.type in CLOSING_BRACKETS: - prev = line.leaves[index - 1] if index > 0 else None - if ( - prev - and prev.type == token.COMMA - and not is_one_tuple_between( - leaf.opening_bracket, leaf, line.leaves - ) - ): - # Never omit bracket pairs with trailing commas. - # We need to explode on those. - break - - inner_brackets.add(id(leaf)) - elif leaf.type in CLOSING_BRACKETS: - prev = line.leaves[index - 1] if index > 0 else None - if prev and prev.type in OPENING_BRACKETS: - # Empty brackets would fail a split so treat them as "inner" - # brackets (e.g. only add them to the `omit` set if another - # pair of brackets was good enough. - inner_brackets.add(id(leaf)) - continue + elif ( + n.type == syms.tname_star + and len(n.children) == 3 + and n.children[2].type == syms.star_expr + ): + features.add(Feature.VARIADIC_GENERICS) - if closing_bracket: - omit.add(id(closing_bracket)) - omit.update(inner_brackets) - inner_brackets.clear() - yield omit + return features - if ( - prev - and prev.type == token.COMMA - and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves) - ): - # Never omit bracket pairs with trailing commas. - # We need to explode on those. - break - if leaf.value: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf +def detect_target_versions( + node: Node, *, future_imports: Optional[Set[str]] = None +) -> Set[TargetVersion]: + """Detect the version to target based on the nodes used.""" + features = get_features_used(node, future_imports=future_imports) + return { + version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] + } def get_future_imports(node: Node) -> Set[str]: @@ -6160,399 +1292,45 @@ def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: return imports -@lru_cache() -def get_gitignore(root: Path) -> PathSpec: - """Return a PathSpec matching gitignore content if present.""" - gitignore = root / ".gitignore" - lines: List[str] = [] - if gitignore.is_file(): - with gitignore.open() as gf: - lines = gf.readlines() - return PathSpec.from_lines("gitwildmatch", lines) - - -def normalize_path_maybe_ignore( - path: Path, root: Path, report: "Report" -) -> Optional[str]: - """Normalize `path`. May return `None` if `path` was ignored. - - `report` is where "path ignored" output goes. - """ - try: - abspath = path if path.is_absolute() else Path.cwd() / path - normalized_path = abspath.resolve().relative_to(root).as_posix() - except OSError as e: - report.path_ignored(path, f"cannot be read because {e}") - return None - - except ValueError: - if path.is_symlink(): - report.path_ignored(path, f"is a symbolic link that points outside {root}") - return None - - raise - - return normalized_path - - -def path_is_excluded( - normalized_path: str, - pattern: Optional[Pattern[str]], -) -> bool: - match = pattern.search(normalized_path) if pattern else None - return bool(match and match.group(0)) - - -def gen_python_files( - paths: Iterable[Path], - root: Path, - include: Optional[Pattern[str]], - exclude: Pattern[str], - extend_exclude: Optional[Pattern[str]], - force_exclude: Optional[Pattern[str]], - report: "Report", - gitignore: PathSpec, -) -> Iterator[Path]: - """Generate all files under `path` whose paths are not excluded by the - `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, - but are included by the `include` regex. - - Symbolic links pointing outside of the `root` directory are ignored. - - `report` is where output about exclusions goes. - """ - assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" - for child in paths: - normalized_path = normalize_path_maybe_ignore(child, root, report) - if normalized_path is None: - continue - - # First ignore files matching .gitignore - if gitignore.match_file(normalized_path): - report.path_ignored(child, "matches the .gitignore file content") - continue - - # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. - normalized_path = "/" + normalized_path - if child.is_dir(): - normalized_path += "/" - - if path_is_excluded(normalized_path, exclude): - report.path_ignored(child, "matches the --exclude regular expression") - continue - - if path_is_excluded(normalized_path, extend_exclude): - report.path_ignored( - child, "matches the --extend-exclude regular expression" - ) - continue - - if path_is_excluded(normalized_path, force_exclude): - report.path_ignored(child, "matches the --force-exclude regular expression") - continue - - if child.is_dir(): - yield from gen_python_files( - child.iterdir(), - root, - include, - exclude, - extend_exclude, - force_exclude, - report, - gitignore, - ) - - elif child.is_file(): - include_match = include.search(normalized_path) if include else True - if include_match: - yield child - - -@lru_cache() -def find_project_root(srcs: Tuple[str, ...]) -> Path: - """Return a directory containing .git, .hg, or pyproject.toml. - - That directory will be a common parent of all files and directories - passed in `srcs`. - - If no directory in the tree contains a marker that would specify it's the - project root, the root of the file system is returned. - """ - if not srcs: - return Path("/").resolve() - - path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] - - # A list of lists of parents for each 'src'. 'src' is included as a - # "parent" of itself if it is a directory - src_parents = [ - list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs - ] - - common_base = max( - set.intersection(*(set(parents) for parents in src_parents)), - key=lambda path: path.parts, - ) - - for directory in (common_base, *common_base.parents): - if (directory / ".git").exists(): - return directory - - if (directory / ".hg").is_dir(): - return directory - - if (directory / "pyproject.toml").is_file(): - return directory - - return directory - - -@lru_cache() -def find_user_pyproject_toml() -> Path: - r"""Return the path to the top-level user configuration for black. - - This looks for ~\.black on Windows and ~/.config/black on Linux and other - Unix systems. - """ - if sys.platform == "win32": - # Windows - user_config_path = Path.home() / ".black" - else: - config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") - user_config_path = Path(config_root).expanduser() / "black" - return user_config_path.resolve() - - -@dataclass -class Report: - """Provides a reformatting counter. Can be rendered with `str(report)`.""" - - check: bool = False - diff: bool = False - quiet: bool = False - verbose: bool = False - change_count: int = 0 - same_count: int = 0 - failure_count: int = 0 - - def done(self, src: Path, changed: Changed) -> None: - """Increment the counter for successful reformatting. Write out a message.""" - if changed is Changed.YES: - reformatted = "would reformat" if self.check or self.diff else "reformatted" - if self.verbose or not self.quiet: - out(f"{reformatted} {src}") - self.change_count += 1 - else: - if self.verbose: - if changed is Changed.NO: - msg = f"{src} already well formatted, good job." - else: - msg = f"{src} wasn't modified on disk since last run." - out(msg, bold=False) - self.same_count += 1 - - def failed(self, src: Path, message: str) -> None: - """Increment the counter for failed reformatting. Write out a message.""" - err(f"error: cannot format {src}: {message}") - self.failure_count += 1 - - def path_ignored(self, path: Path, message: str) -> None: - if self.verbose: - out(f"{path} ignored: {message}", bold=False) - - @property - def return_code(self) -> int: - """Return the exit code that the app should use. - - This considers the current state of changed files and failures: - - if there were any failures, return 123; - - if any files were changed and --check is being used, return 1; - - otherwise return 0. - """ - # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with - # 126 we have special return codes reserved by the shell. - if self.failure_count: - return 123 - - elif self.change_count and self.check: - return 1 - - return 0 - - def __str__(self) -> str: - """Render a color report of the current state. - - Use `click.unstyle` to remove colors. - """ - if self.check or self.diff: - reformatted = "would be reformatted" - unchanged = "would be left unchanged" - failed = "would fail to reformat" - else: - reformatted = "reformatted" - unchanged = "left unchanged" - failed = "failed to reformat" - report = [] - if self.change_count: - s = "s" if self.change_count > 1 else "" - report.append( - click.style(f"{self.change_count} file{s} {reformatted}", bold=True) - ) - if self.same_count: - s = "s" if self.same_count > 1 else "" - report.append(f"{self.same_count} file{s} {unchanged}") - if self.failure_count: - s = "s" if self.failure_count > 1 else "" - report.append( - click.style(f"{self.failure_count} file{s} {failed}", fg="red") - ) - return ", ".join(report) + "." - - -def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: - filename = "" - if sys.version_info >= (3, 8): - # TODO: support Python 4+ ;) - for minor_version in range(sys.version_info[1], 4, -1): - try: - return ast.parse(src, filename, feature_version=(3, minor_version)) - except SyntaxError: - continue - else: - for feature_version in (7, 6): - try: - return ast3.parse(src, filename, feature_version=feature_version) - except SyntaxError: - continue - if ast27.__name__ == "ast": - raise SyntaxError( - "The requested source code has invalid Python 3 syntax.\n" - "If you are trying to format Python 2 files please reinstall Black" - " with the 'python2' extra: `python3 -m pip install black[python2]`." - ) - return ast27.parse(src) - - -def _fixup_ast_constants( - node: Union[ast.AST, ast3.AST, ast27.AST] -) -> Union[ast.AST, ast3.AST, ast27.AST]: - """Map ast nodes deprecated in 3.8 to Constant.""" - if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): - return ast.Constant(value=node.s) - - if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): - return ast.Constant(value=node.n) - - if isinstance(node, (ast.NameConstant, ast3.NameConstant)): - return ast.Constant(value=node.value) - - return node - - -def _stringify_ast( - node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0 -) -> Iterator[str]: - """Simple visitor generating strings to compare ASTs by content.""" - - node = _fixup_ast_constants(node) - - yield f"{' ' * depth}{node.__class__.__name__}(" - - for field in sorted(node._fields): # noqa: F402 - # TypeIgnore has only one field 'lineno' which breaks this comparison - type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) - if sys.version_info >= (3, 8): - type_ignore_classes += (ast.TypeIgnore,) - if isinstance(node, type_ignore_classes): - break - - try: - value = getattr(node, field) - except AttributeError: - continue - - yield f"{' ' * (depth+1)}{field}=" - - if isinstance(value, list): - for item in value: - # Ignore nested tuples within del statements, because we may insert - # parentheses and they change the AST. - if ( - field == "targets" - and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) - and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) - ): - for item in item.elts: - yield from _stringify_ast(item, depth + 2) - - elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): - yield from _stringify_ast(item, depth + 2) - - elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)): - yield from _stringify_ast(value, depth + 2) - - else: - # Constant strings may be indented across newlines, if they are - # docstrings; fold spaces after newlines when comparing. Similarly, - # trailing and leading space may be removed. - # Note that when formatting Python 2 code, at least with Windows - # line-endings, docstrings can end up here as bytes instead of - # str so make sure that we handle both cases. - if ( - isinstance(node, ast.Constant) - and field == "value" - and isinstance(value, (str, bytes)) - ): - lineend = "\n" if isinstance(value, str) else b"\n" - # To normalize, we strip any leading and trailing space from - # each line... - stripped = [line.strip() for line in value.splitlines()] - normalized = lineend.join(stripped) # type: ignore[attr-defined] - # ...and remove any blank lines at the beginning and end of - # the whole string - normalized = normalized.strip() - else: - normalized = value - yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" - - yield f"{' ' * depth}) # /{node.__class__.__name__}" - - -def assert_equivalent(src: str, dst: str, *, pass_num: int = 1) -> None: +def assert_equivalent(src: str, dst: str) -> None: """Raise AssertionError if `src` and `dst` aren't equivalent.""" try: src_ast = parse_ast(src) except Exception as exc: raise AssertionError( - "cannot use --safe with this file; failed to parse source file. AST" - f" error message: {exc}" - ) + "cannot use --safe with this file; failed to parse source file AST: " + f"{exc}\n" + "This could be caused by running Black with an older Python version " + "that does not support new syntax used in your source file." + ) from exc try: dst_ast = parse_ast(dst) except Exception as exc: log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) raise AssertionError( - f"INTERNAL ERROR: Black produced invalid code on pass {pass_num}: {exc}. " + f"INTERNAL ERROR: Black produced invalid code: {exc}. " "Please report a bug on https://github.com/psf/black/issues. " f"This invalid output might be helpful: {log}" ) from None - src_ast_str = "\n".join(_stringify_ast(src_ast)) - dst_ast_str = "\n".join(_stringify_ast(dst_ast)) + src_ast_str = "\n".join(stringify_ast(src_ast)) + dst_ast_str = "\n".join(stringify_ast(dst_ast)) if src_ast_str != dst_ast_str: log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) raise AssertionError( "INTERNAL ERROR: Black produced code that is not equivalent to the" - f" source on pass {pass_num}. Please report a bug on " + " source. Please report a bug on " f"https://github.com/psf/black/issues. This diff might be helpful: {log}" ) from None def assert_stable(src: str, dst: str, mode: Mode) -> None: """Raise AssertionError if `dst` reformats differently the second time.""" - newdst = format_str(dst, mode=mode) + # We shouldn't call format_str() here, because that formats the string + # twice and may hide a bug where we bounce back and forth between two + # versions. + newdst = _format_str_once(dst, mode=mode) if dst != newdst: log = dump_to_file( str(mode), @@ -6566,19 +1344,6 @@ def assert_stable(src: str, dst: str, mode: Mode) -> None: ) from None -@mypyc_attr(patchable=True) -def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: - """Dump `output` to a temporary file. Return path to the file.""" - with tempfile.NamedTemporaryFile( - mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" - ) as f: - for lines in output: - f.write(lines) - if ensure_final_newline and lines and lines[-1] != "\n": - f.write("\n") - return f.name - - @contextmanager def nullcontext() -> Iterator[None]: """Return an empty context manager. @@ -6588,403 +1353,8 @@ def nullcontext() -> Iterator[None]: yield -def diff(a: str, b: str, a_name: str, b_name: str) -> str: - """Return a unified diff string between strings `a` and `b`.""" - import difflib - - a_lines = [line for line in a.splitlines(keepends=True)] - b_lines = [line for line in b.splitlines(keepends=True)] - diff_lines = [] - for line in difflib.unified_diff( - a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 - ): - # Work around https://bugs.python.org/issue2142 - # See https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html - if line[-1] == "\n": - diff_lines.append(line) - else: - diff_lines.append(line + "\n") - diff_lines.append("\\ No newline at end of file\n") - return "".join(diff_lines) - - -def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: - """asyncio signal handler that cancels all `tasks` and reports to stderr.""" - err("Aborted!") - for task in tasks: - task.cancel() - - -def shutdown(loop: asyncio.AbstractEventLoop) -> None: - """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" - try: - if sys.version_info[:2] >= (3, 7): - all_tasks = asyncio.all_tasks - else: - all_tasks = asyncio.Task.all_tasks - # This part is borrowed from asyncio/runners.py in Python 3.7b2. - to_cancel = [task for task in all_tasks(loop) if not task.done()] - if not to_cancel: - return - - for task in to_cancel: - task.cancel() - loop.run_until_complete( - asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) - ) - finally: - # `concurrent.futures.Future` objects cannot be cancelled once they - # are already running. There might be some when the `shutdown()` happened. - # Silence their logger's spew about the event loop being closed. - cf_logger = logging.getLogger("concurrent.futures") - cf_logger.setLevel(logging.CRITICAL) - loop.close() - - -def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: - """Replace `regex` with `replacement` twice on `original`. - - This is used by string normalization to perform replaces on - overlapping matches. - """ - return regex.sub(replacement, regex.sub(replacement, original)) - - -def re_compile_maybe_verbose(regex: str) -> Pattern[str]: - """Compile a regular expression string in `regex`. - - If it contains newlines, use verbose mode. - """ - if "\n" in regex: - regex = "(?x)" + regex - compiled: Pattern[str] = re.compile(regex) - return compiled - - -def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: - """Like `reversed(enumerate(sequence))` if that were possible.""" - index = len(sequence) - 1 - for element in reversed(sequence): - yield (index, element) - index -= 1 - - -def enumerate_with_length( - line: Line, reversed: bool = False -) -> Iterator[Tuple[Index, Leaf, int]]: - """Return an enumeration of leaves with their length. - - Stops prematurely on multiline strings and standalone comments. - """ - op = cast( - Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], - enumerate_reversed if reversed else enumerate, - ) - for index, leaf in op(line.leaves): - length = len(leaf.prefix) + len(leaf.value) - if "\n" in leaf.value: - return # Multiline strings, we can't continue. - - for comment in line.comments_after(leaf): - length += len(comment.value) - - yield index, leaf, length - - -def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: - """Return True if `line` is no longer than `line_length`. - - Uses the provided `line_str` rendering, if any, otherwise computes a new one. - """ - if not line_str: - line_str = line_to_string(line) - return ( - len(line_str) <= line_length - and "\n" not in line_str # multiline strings - and not line.contains_standalone_comments() - ) - - -def can_be_split(line: Line) -> bool: - """Return False if the line cannot be split *for sure*. - - This is not an exhaustive search but a cheap heuristic that we can use to - avoid some unfortunate formattings (mostly around wrapping unsplittable code - in unnecessary parentheses). - """ - leaves = line.leaves - if len(leaves) < 2: - return False - - if leaves[0].type == token.STRING and leaves[1].type == token.DOT: - call_count = 0 - dot_count = 0 - next = leaves[-1] - for leaf in leaves[-2::-1]: - if leaf.type in OPENING_BRACKETS: - if next.type not in CLOSING_BRACKETS: - return False - - call_count += 1 - elif leaf.type == token.DOT: - dot_count += 1 - elif leaf.type == token.NAME: - if not (next.type == token.DOT or next.type in OPENING_BRACKETS): - return False - - elif leaf.type not in CLOSING_BRACKETS: - return False - - if dot_count > 1 and call_count > 1: - return False - - return True - - -def can_omit_invisible_parens( - line: Line, - line_length: int, - omit_on_explode: Collection[LeafID] = (), -) -> bool: - """Does `line` have a shape safe to reformat without optional parens around it? - - Returns True for only a subset of potentially nice looking formattings but - the point is to not return false positives that end up producing lines that - are too long. - """ - bt = line.bracket_tracker - if not bt.delimiters: - # Without delimiters the optional parentheses are useless. - return True - - max_priority = bt.max_delimiter_priority() - if bt.delimiter_count_with_priority(max_priority) > 1: - # With more than one delimiter of a kind the optional parentheses read better. - return False - - if max_priority == DOT_PRIORITY: - # A single stranded method call doesn't require optional parentheses. - return True - - assert len(line.leaves) >= 2, "Stranded delimiter" - - # With a single delimiter, omit if the expression starts or ends with - # a bracket. - first = line.leaves[0] - second = line.leaves[1] - if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: - if _can_omit_opening_paren(line, first=first, line_length=line_length): - return True - - # Note: we are not returning False here because a line might have *both* - # a leading opening bracket and a trailing closing bracket. If the - # opening bracket doesn't match our rule, maybe the closing will. - - penultimate = line.leaves[-2] - last = line.leaves[-1] - if line.magic_trailing_comma: - try: - penultimate, last = last_two_except(line.leaves, omit=omit_on_explode) - except LookupError: - # Turns out we'd omit everything. We cannot skip the optional parentheses. - return False - - if ( - last.type == token.RPAR - or last.type == token.RBRACE - or ( - # don't use indexing for omitting optional parentheses; - # it looks weird - last.type == token.RSQB - and last.parent - and last.parent.type != syms.trailer - ) - ): - if penultimate.type in OPENING_BRACKETS: - # Empty brackets don't help. - return False - - if is_multiline_string(first): - # Additional wrapping of a multiline string in this situation is - # unnecessary. - return True - - if line.magic_trailing_comma and penultimate.type == token.COMMA: - # The rightmost non-omitted bracket pair is the one we want to explode on. - return True - - if _can_omit_closing_paren(line, last=last, line_length=line_length): - return True - - return False - - -def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: - """See `can_omit_invisible_parens`.""" - remainder = False - length = 4 * line.depth - _index = -1 - for _index, leaf, leaf_length in enumerate_with_length(line): - if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: - remainder = True - if remainder: - length += leaf_length - if length > line_length: - break - - if leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - remainder = False - - else: - # checked the entire string and line length wasn't exceeded - if len(line.leaves) == _index + 1: - return True - - return False - - -def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: - """See `can_omit_invisible_parens`.""" - length = 4 * line.depth - seen_other_brackets = False - for _index, leaf, leaf_length in enumerate_with_length(line): - length += leaf_length - if leaf is last.opening_bracket: - if seen_other_brackets or length <= line_length: - return True - - elif leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - seen_other_brackets = True - - return False - - -def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]: - """Return (penultimate, last) leaves skipping brackets in `omit` and contents.""" - stop_after = None - last = None - for leaf in reversed(leaves): - if stop_after: - if leaf is stop_after: - stop_after = None - continue - - if last: - return leaf, last - - if id(leaf) in omit: - stop_after = leaf.opening_bracket - else: - last = leaf - else: - raise LookupError("Last two leaves were also skipped") - - -def run_transformer( - line: Line, - transform: Transformer, - mode: Mode, - features: Collection[Feature], - *, - line_str: str = "", -) -> List[Line]: - if not line_str: - line_str = line_to_string(line) - result: List[Line] = [] - for transformed_line in transform(line, features): - if str(transformed_line).strip("\n") == line_str: - raise CannotTransform("Line transformer returned an unchanged result") - - result.extend(transform_line(transformed_line, mode=mode, features=features)) - - if not ( - transform.__name__ == "rhs" - and line.bracket_tracker.invisible - and not any(bracket.value for bracket in line.bracket_tracker.invisible) - and not line.contains_multiline_strings() - and not result[0].contains_uncollapsable_type_comments() - and not result[0].contains_unsplittable_type_ignore() - and not is_line_short_enough(result[0], line_length=mode.line_length) - ): - return result - - line_copy = line.clone() - append_leaves(line_copy, line, line.leaves) - features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} - second_opinion = run_transformer( - line_copy, transform, mode, features_fop, line_str=line_str - ) - if all( - is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion - ): - result = second_opinion - return result - - -def get_cache_file(mode: Mode) -> Path: - return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" - - -def read_cache(mode: Mode) -> Cache: - """Read the cache if it exists and is well formed. - - If it is not well formed, the call to write_cache later should resolve the issue. - """ - cache_file = get_cache_file(mode) - if not cache_file.exists(): - return {} - - with cache_file.open("rb") as fobj: - try: - cache: Cache = pickle.load(fobj) - except (pickle.UnpicklingError, ValueError): - return {} - - return cache - - -def get_cache_info(path: Path) -> CacheInfo: - """Return the information used to check if a file is already formatted or not.""" - stat = path.stat() - return stat.st_mtime, stat.st_size - - -def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: - """Split an iterable of paths in `sources` into two sets. - - The first contains paths of files that modified on disk or are not in the - cache. The other contains paths to non-modified files. - """ - todo, done = set(), set() - for src in sources: - res_src = src.resolve() - if cache.get(str(res_src)) != get_cache_info(res_src): - todo.add(src) - else: - done.add(src) - return todo, done - - -def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: - """Update the cache file.""" - cache_file = get_cache_file(mode) - try: - CACHE_DIR.mkdir(parents=True, exist_ok=True) - new_cache = { - **cache, - **{str(src.resolve()): get_cache_info(src) for src in sources}, - } - with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: - pickle.dump(new_cache, f, protocol=4) - os.replace(f.name, cache_file) - except OSError: - pass - - def patch_click() -> None: - """Make Click not crash. + """Make Click not crash on Python 3.6 with LANG=C. On certain misconfigured environments, Python 3 selects the ASCII encoding as the default which restricts paths that it can access during the lifetime of the @@ -6994,82 +1364,39 @@ def patch_click() -> None: file paths is minimal since it's Python source code. Moreover, this crash was spurious on Python 3.7 thanks to PEP 538 and PEP 540. """ + modules: List[Any] = [] try: from click import core + except ImportError: + pass + else: + modules.append(core) + try: + # Removed in Click 8.1.0 and newer; we keep this around for users who have + # older versions installed. from click import _unicodefun # type: ignore - except ModuleNotFoundError: - return + except ImportError: + pass + else: + modules.append(_unicodefun) - for module in (core, _unicodefun): + for module in modules: if hasattr(module, "_verify_python3_env"): - module._verify_python3_env = lambda: None + module._verify_python3_env = lambda: None # type: ignore + if hasattr(module, "_verify_python_env"): + module._verify_python_env = lambda: None # type: ignore def patched_main() -> None: - freeze_support() - patch_click() - main() - - -def is_docstring(leaf: Leaf) -> bool: - if prev_siblings_are( - leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] - ): - return True - - # Multiline docstring on the same line as the `def`. - if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): - # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python - # grammar. We're safe to return True without further checks. - return True + # PyInstaller patches multiprocessing to need freeze_support() even in non-Windows + # environments so just assume we always need to call it if frozen. + if getattr(sys, "frozen", False): + from multiprocessing import freeze_support - return False + freeze_support() - -def lines_with_leading_tabs_expanded(s: str) -> List[str]: - """ - Splits string into lines and expands only leading tabs (following the normal - Python rules) - """ - lines = [] - for line in s.splitlines(): - # Find the index of the first non-whitespace character after a string of - # whitespace that includes at least one tab - match = re.match(r"\s*\t+\s*(\S)", line) - if match: - first_non_whitespace_idx = match.start(1) - - lines.append( - line[:first_non_whitespace_idx].expandtabs() - + line[first_non_whitespace_idx:] - ) - else: - lines.append(line) - return lines - - -def fix_docstring(docstring: str, prefix: str) -> str: - # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation - if not docstring: - return "" - lines = lines_with_leading_tabs_expanded(docstring) - # Determine minimum indentation (first line doesn't count): - indent = sys.maxsize - for line in lines[1:]: - stripped = line.lstrip() - if stripped: - indent = min(indent, len(line) - len(stripped)) - # Remove indentation (first line is special): - trimmed = [lines[0].strip()] - if indent < sys.maxsize: - last_line_idx = len(lines) - 2 - for i, line in enumerate(lines[1:]): - stripped_line = line[indent:].rstrip() - if stripped_line or i == last_line_idx: - trimmed.append(prefix + stripped_line) - else: - trimmed.append("") - return "\n".join(trimmed) + patch_click() + main() if __name__ == "__main__": diff --git a/src/black/brackets.py b/src/black/brackets.py new file mode 100644 index 0000000..3566f5b --- /dev/null +++ b/src/black/brackets.py @@ -0,0 +1,342 @@ +"""Builds on top of nodes.py to track brackets.""" + +import sys +from dataclasses import dataclass, field +from typing import Dict, Iterable, List, Optional, Tuple, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.nodes import ( + BRACKET, + CLOSING_BRACKETS, + COMPARATORS, + LOGIC_OPERATORS, + MATH_OPERATORS, + OPENING_BRACKETS, + UNPACKING_PARENTS, + VARARGS_PARENTS, + is_vararg, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] +Depth = int +LeafID = int +NodeType = int +Priority = int + + +COMPREHENSION_PRIORITY: Final = 20 +COMMA_PRIORITY: Final = 18 +TERNARY_PRIORITY: Final = 16 +LOGIC_PRIORITY: Final = 14 +STRING_PRIORITY: Final = 12 +COMPARATOR_PRIORITY: Final = 10 +MATH_PRIORITIES: Final = { + token.VBAR: 9, + token.CIRCUMFLEX: 8, + token.AMPER: 7, + token.LEFTSHIFT: 6, + token.RIGHTSHIFT: 6, + token.PLUS: 5, + token.MINUS: 5, + token.STAR: 4, + token.SLASH: 4, + token.DOUBLESLASH: 4, + token.PERCENT: 4, + token.AT: 4, + token.TILDE: 3, + token.DOUBLESTAR: 2, +} +DOT_PRIORITY: Final = 1 + + +class BracketMatchError(Exception): + """Raised when an opening bracket is unable to be matched to a closing bracket.""" + + +@dataclass +class BracketTracker: + """Keeps track of brackets on a line.""" + + depth: int = 0 + bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict) + delimiters: Dict[LeafID, Priority] = field(default_factory=dict) + previous: Optional[Leaf] = None + _for_loop_depths: List[int] = field(default_factory=list) + _lambda_argument_depths: List[int] = field(default_factory=list) + invisible: List[Leaf] = field(default_factory=list) + + def mark(self, leaf: Leaf) -> None: + """Mark `leaf` with bracket-related metadata. Keep track of delimiters. + + All leaves receive an int `bracket_depth` field that stores how deep + within brackets a given leaf is. 0 means there are no enclosing brackets + that started on this line. + + If a leaf is itself a closing bracket, it receives an `opening_bracket` + field that it forms a pair with. This is a one-directional link to + avoid reference cycles. + + If a leaf is a delimiter (a token on which Black can split the line if + needed) and it's on depth 0, its `id()` is stored in the tracker's + `delimiters` field. + """ + if leaf.type == token.COMMENT: + return + + self.maybe_decrement_after_for_loop_variable(leaf) + self.maybe_decrement_after_lambda_arguments(leaf) + if leaf.type in CLOSING_BRACKETS: + self.depth -= 1 + try: + opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) + except KeyError as e: + raise BracketMatchError( + "Unable to match a closing bracket to the following opening" + f" bracket: {leaf}" + ) from e + leaf.opening_bracket = opening_bracket + if not leaf.value: + self.invisible.append(leaf) + leaf.bracket_depth = self.depth + if self.depth == 0: + delim = is_split_before_delimiter(leaf, self.previous) + if delim and self.previous is not None: + self.delimiters[id(self.previous)] = delim + else: + delim = is_split_after_delimiter(leaf, self.previous) + if delim: + self.delimiters[id(leaf)] = delim + if leaf.type in OPENING_BRACKETS: + self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf + self.depth += 1 + if not leaf.value: + self.invisible.append(leaf) + self.previous = leaf + self.maybe_increment_lambda_arguments(leaf) + self.maybe_increment_for_loop_variable(leaf) + + def any_open_brackets(self) -> bool: + """Return True if there is an yet unmatched open bracket on the line.""" + return bool(self.bracket_match) + + def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: + """Return the highest priority of a delimiter found on the line. + + Values are consistent with what `is_split_*_delimiter()` return. + Raises ValueError on no delimiters. + """ + return max(v for k, v in self.delimiters.items() if k not in exclude) + + def delimiter_count_with_priority(self, priority: Priority = 0) -> int: + """Return the number of delimiters with the given `priority`. + + If no `priority` is passed, defaults to max priority on the line. + """ + if not self.delimiters: + return 0 + + priority = priority or self.max_delimiter_priority() + return sum(1 for p in self.delimiters.values() if p == priority) + + def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: + """In a for loop, or comprehension, the variables are often unpacks. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `for` and `in`. + """ + if leaf.type == token.NAME and leaf.value == "for": + self.depth += 1 + self._for_loop_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: + """See `maybe_increment_for_loop_variable` above for explanation.""" + if ( + self._for_loop_depths + and self._for_loop_depths[-1] == self.depth + and leaf.type == token.NAME + and leaf.value == "in" + ): + self.depth -= 1 + self._for_loop_depths.pop() + return True + + return False + + def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: + """In a lambda expression, there might be more than one argument. + + To avoid splitting on the comma in this situation, increase the depth of + tokens between `lambda` and `:`. + """ + if leaf.type == token.NAME and leaf.value == "lambda": + self.depth += 1 + self._lambda_argument_depths.append(self.depth) + return True + + return False + + def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: + """See `maybe_increment_lambda_arguments` above for explanation.""" + if ( + self._lambda_argument_depths + and self._lambda_argument_depths[-1] == self.depth + and leaf.type == token.COLON + ): + self.depth -= 1 + self._lambda_argument_depths.pop() + return True + + return False + + def get_open_lsqb(self) -> Optional[Leaf]: + """Return the most recent opening square bracket (if any).""" + return self.bracket_match.get((self.depth - 1, token.RSQB)) + + +def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break after it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break after themselves. + + Higher numbers are higher priority. + """ + if leaf.type == token.COMMA: + return COMMA_PRIORITY + + return 0 + + +def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: + """Return the priority of the `leaf` delimiter, given a line break before it. + + The delimiter priorities returned here are from those delimiters that would + cause a line break before themselves. + + Higher numbers are higher priority. + """ + if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): + # * and ** might also be MATH_OPERATORS but in this case they are not. + # Don't treat them as a delimiter. + return 0 + + if ( + leaf.type == token.DOT + and leaf.parent + and leaf.parent.type not in {syms.import_from, syms.dotted_name} + and (previous is None or previous.type in CLOSING_BRACKETS) + ): + return DOT_PRIORITY + + if ( + leaf.type in MATH_OPERATORS + and leaf.parent + and leaf.parent.type not in {syms.factor, syms.star_expr} + ): + return MATH_PRIORITIES[leaf.type] + + if leaf.type in COMPARATORS: + return COMPARATOR_PRIORITY + + if ( + leaf.type == token.STRING + and previous is not None + and previous.type == token.STRING + ): + return STRING_PRIORITY + + if leaf.type not in {token.NAME, token.ASYNC}: + return 0 + + if ( + leaf.value == "for" + and leaf.parent + and leaf.parent.type in {syms.comp_for, syms.old_comp_for} + or leaf.type == token.ASYNC + ): + if ( + not isinstance(leaf.prev_sibling, Leaf) + or leaf.prev_sibling.value != "async" + ): + return COMPREHENSION_PRIORITY + + if ( + leaf.value == "if" + and leaf.parent + and leaf.parent.type in {syms.comp_if, syms.old_comp_if} + ): + return COMPREHENSION_PRIORITY + + if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: + return TERNARY_PRIORITY + + if leaf.value == "is": + return COMPARATOR_PRIORITY + + if ( + leaf.value == "in" + and leaf.parent + and leaf.parent.type in {syms.comp_op, syms.comparison} + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "not" + ) + ): + return COMPARATOR_PRIORITY + + if ( + leaf.value == "not" + and leaf.parent + and leaf.parent.type == syms.comp_op + and not ( + previous is not None + and previous.type == token.NAME + and previous.value == "is" + ) + ): + return COMPARATOR_PRIORITY + + if leaf.value in LOGIC_OPERATORS and leaf.parent: + return LOGIC_PRIORITY + + return 0 + + +def max_delimiter_priority_in_atom(node: LN) -> Priority: + """Return maximum delimiter priority inside `node`. + + This is specific to atoms with contents contained in a pair of parentheses. + If `node` isn't an atom or there are no enclosing parentheses, returns 0. + """ + if node.type != syms.atom: + return 0 + + first = node.children[0] + last = node.children[-1] + if not (first.type == token.LPAR and last.type == token.RPAR): + return 0 + + bt = BracketTracker() + for c in node.children[1:-1]: + if isinstance(c, Leaf): + bt.mark(c) + else: + for leaf in c.leaves(): + bt.mark(leaf) + try: + return bt.max_delimiter_priority() + + except ValueError: + return 0 diff --git a/src/black/cache.py b/src/black/cache.py new file mode 100644 index 0000000..9455ff4 --- /dev/null +++ b/src/black/cache.py @@ -0,0 +1,97 @@ +"""Caching of formatted files with feature-based invalidation.""" + +import os +import pickle +import tempfile +from pathlib import Path +from typing import Dict, Iterable, Set, Tuple + +from platformdirs import user_cache_dir + +from _black_version import version as __version__ +from black.mode import Mode + +# types +Timestamp = float +FileSize = int +CacheInfo = Tuple[Timestamp, FileSize] +Cache = Dict[str, CacheInfo] + + +def get_cache_dir() -> Path: + """Get the cache directory used by black. + + Users can customize this directory on all systems using `BLACK_CACHE_DIR` + environment variable. By default, the cache directory is the user cache directory + under the black application. + + This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid + repeated calls. + """ + # NOTE: Function mostly exists as a clean way to test getting the cache directory. + default_cache_dir = user_cache_dir("black", version=__version__) + cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir)) + return cache_dir + + +CACHE_DIR = get_cache_dir() + + +def read_cache(mode: Mode) -> Cache: + """Read the cache if it exists and is well formed. + + If it is not well formed, the call to write_cache later should resolve the issue. + """ + cache_file = get_cache_file(mode) + if not cache_file.exists(): + return {} + + with cache_file.open("rb") as fobj: + try: + cache: Cache = pickle.load(fobj) + except (pickle.UnpicklingError, ValueError, IndexError): + return {} + + return cache + + +def get_cache_file(mode: Mode) -> Path: + return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" + + +def get_cache_info(path: Path) -> CacheInfo: + """Return the information used to check if a file is already formatted or not.""" + stat = path.stat() + return stat.st_mtime, stat.st_size + + +def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: + """Split an iterable of paths in `sources` into two sets. + + The first contains paths of files that modified on disk or are not in the + cache. The other contains paths to non-modified files. + """ + todo, done = set(), set() + for src in sources: + res_src = src.resolve() + if cache.get(str(res_src)) != get_cache_info(res_src): + todo.add(src) + else: + done.add(src) + return todo, done + + +def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None: + """Update the cache file.""" + cache_file = get_cache_file(mode) + try: + CACHE_DIR.mkdir(parents=True, exist_ok=True) + new_cache = { + **cache, + **{str(src.resolve()): get_cache_info(src) for src in sources}, + } + with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: + pickle.dump(new_cache, f, protocol=4) + os.replace(f.name, cache_file) + except OSError: + pass diff --git a/src/black/comments.py b/src/black/comments.py new file mode 100644 index 0000000..dce83ab --- /dev/null +++ b/src/black/comments.py @@ -0,0 +1,332 @@ +import re +import sys +from dataclasses import dataclass +from functools import lru_cache +from typing import Iterator, List, Optional, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + +from black.nodes import ( + CLOSING_BRACKETS, + STANDALONE_COMMENT, + WHITESPACE, + container_of, + first_leaf_of, + preceding_leaf, + syms, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LN = Union[Leaf, Node] + +FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"} +FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} +FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP} +FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} + +COMMENT_EXCEPTIONS = {True: " !:#'", False: " !:#'%"} + + +@dataclass +class ProtoComment: + """Describes a piece of syntax that is a comment. + + It's not a :class:`blib2to3.pytree.Leaf` so that: + + * it can be cached (`Leaf` objects should not be reused more than once as + they store their lineno, column, prefix, and parent information); + * `newlines` and `consumed` fields are kept separate from the `value`. This + simplifies handling of special marker comments like ``# fmt: off/on``. + """ + + type: int # token.COMMENT or STANDALONE_COMMENT + value: str # content of the comment + newlines: int # how many newlines before the comment + consumed: int # how many characters of the original leaf's prefix did we consume + + +def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]: + """Clean the prefix of the `leaf` and generate comments from it, if any. + + Comments in lib2to3 are shoved into the whitespace prefix. This happens + in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation + move because it does away with modifying the grammar to include all the + possible places in which comments can be placed. + + The sad consequence for us though is that comments don't "belong" anywhere. + This is why this function generates simple parentless Leaf objects for + comments. We simply don't know what the correct parent should be. + + No matter though, we can live without this. We really only need to + differentiate between inline and standalone comments. The latter don't + share the line with any code. + + Inline comments are emitted as regular token.COMMENT leaves. Standalone + are emitted with a fake STANDALONE_COMMENT token identifier. + """ + for pc in list_comments( + leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, preview=preview + ): + yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) + + +@lru_cache(maxsize=4096) +def list_comments( + prefix: str, *, is_endmarker: bool, preview: bool +) -> List[ProtoComment]: + """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" + result: List[ProtoComment] = [] + if not prefix or "#" not in prefix: + return result + + consumed = 0 + nlines = 0 + ignored_lines = 0 + for index, line in enumerate(re.split("\r?\n", prefix)): + consumed += len(line) + 1 # adding the length of the split '\n' + line = line.lstrip() + if not line: + nlines += 1 + if not line.startswith("#"): + # Escaped newlines outside of a comment are not really newlines at + # all. We treat a single-line comment following an escaped newline + # as a simple trailing comment. + if line.endswith("\\"): + ignored_lines += 1 + continue + + if index == ignored_lines and not is_endmarker: + comment_type = token.COMMENT # simple trailing comment + else: + comment_type = STANDALONE_COMMENT + comment = make_comment(line, preview=preview) + result.append( + ProtoComment( + type=comment_type, value=comment, newlines=nlines, consumed=consumed + ) + ) + nlines = 0 + return result + + +def make_comment(content: str, *, preview: bool) -> str: + """Return a consistently formatted comment from the given `content` string. + + All comments (except for "##", "#!", "#:", '#'") should have a single + space between the hash sign and the content. + + If `content` didn't start with a hash sign, one is provided. + """ + content = content.rstrip() + if not content: + return "#" + + if content[0] == "#": + content = content[1:] + NON_BREAKING_SPACE = " " + if ( + content + and content[0] == NON_BREAKING_SPACE + and not content.lstrip().startswith("type:") + ): + content = " " + content[1:] # Replace NBSP by a simple space + if content and content[0] not in COMMENT_EXCEPTIONS[preview]: + content = " " + content + return "#" + content + + +def normalize_fmt_off(node: Node, *, preview: bool) -> None: + """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" + try_again = True + while try_again: + try_again = convert_one_fmt_off_pair(node, preview=preview) + + +def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool: + """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. + + Returns True if a pair was converted. + """ + for leaf in node.leaves(): + previous_consumed = 0 + for comment in list_comments(leaf.prefix, is_endmarker=False, preview=preview): + if comment.value not in FMT_PASS: + previous_consumed = comment.consumed + continue + # We only want standalone comments. If there's no previous leaf or + # the previous leaf is indentation, it's a standalone comment in + # disguise. + if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT: + prev = preceding_leaf(leaf) + if prev: + if comment.value in FMT_OFF and prev.type not in WHITESPACE: + continue + if comment.value in FMT_SKIP and prev.type in WHITESPACE: + continue + + ignored_nodes = list(generate_ignored_nodes(leaf, comment, preview=preview)) + if not ignored_nodes: + continue + + first = ignored_nodes[0] # Can be a container node with the `leaf`. + parent = first.parent + prefix = first.prefix + if comment.value in FMT_OFF: + first.prefix = prefix[comment.consumed :] + if comment.value in FMT_SKIP: + first.prefix = "" + standalone_comment_prefix = prefix + else: + standalone_comment_prefix = ( + prefix[:previous_consumed] + "\n" * comment.newlines + ) + hidden_value = "".join(str(n) for n in ignored_nodes) + if comment.value in FMT_OFF: + hidden_value = comment.value + "\n" + hidden_value + if comment.value in FMT_SKIP: + hidden_value += " " + comment.value + if hidden_value.endswith("\n"): + # That happens when one of the `ignored_nodes` ended with a NEWLINE + # leaf (possibly followed by a DEDENT). + hidden_value = hidden_value[:-1] + first_idx: Optional[int] = None + for ignored in ignored_nodes: + index = ignored.remove() + if first_idx is None: + first_idx = index + assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" + assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" + parent.insert_child( + first_idx, + Leaf( + STANDALONE_COMMENT, + hidden_value, + prefix=standalone_comment_prefix, + ), + ) + return True + + return False + + +def generate_ignored_nodes( + leaf: Leaf, comment: ProtoComment, *, preview: bool +) -> Iterator[LN]: + """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. + + If comment is skip, returns leaf only. + Stops at the end of the block. + """ + if comment.value in FMT_SKIP: + yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, preview=preview) + return + container: Optional[LN] = container_of(leaf) + while container is not None and container.type != token.ENDMARKER: + if is_fmt_on(container, preview=preview): + return + + # fix for fmt: on in children + if children_contains_fmt_on(container, preview=preview): + for child in container.children: + if isinstance(child, Leaf) and is_fmt_on(child, preview=preview): + if child.type in CLOSING_BRACKETS: + # This means `# fmt: on` is placed at a different bracket level + # than `# fmt: off`. This is an invalid use, but as a courtesy, + # we include this closing bracket in the ignored nodes. + # The alternative is to fail the formatting. + yield child + return + if children_contains_fmt_on(child, preview=preview): + return + yield child + else: + if container.type == token.DEDENT and container.next_sibling is None: + # This can happen when there is no matching `# fmt: on` comment at the + # same level as `# fmt: on`. We need to keep this DEDENT. + return + yield container + container = container.next_sibling + + +def _generate_ignored_nodes_from_fmt_skip( + leaf: Leaf, comment: ProtoComment, *, preview: bool +) -> Iterator[LN]: + """Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`.""" + prev_sibling = leaf.prev_sibling + parent = leaf.parent + # Need to properly format the leaf prefix to compare it to comment.value, + # which is also formatted + comments = list_comments(leaf.prefix, is_endmarker=False, preview=preview) + if not comments or comment.value != comments[0].value: + return + if prev_sibling is not None: + leaf.prefix = "" + siblings = [prev_sibling] + while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None: + prev_sibling = prev_sibling.prev_sibling + siblings.insert(0, prev_sibling) + yield from siblings + elif ( + parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE + ): + # The `# fmt: skip` is on the colon line of the if/while/def/class/... + # statements. The ignored nodes should be previous siblings of the + # parent suite node. + leaf.prefix = "" + ignored_nodes: List[LN] = [] + parent_sibling = parent.prev_sibling + while parent_sibling is not None and parent_sibling.type != syms.suite: + ignored_nodes.insert(0, parent_sibling) + parent_sibling = parent_sibling.prev_sibling + # Special case for `async_stmt` where the ASYNC token is on the + # grandparent node. + grandparent = parent.parent + if ( + grandparent is not None + and grandparent.prev_sibling is not None + and grandparent.prev_sibling.type == token.ASYNC + ): + ignored_nodes.insert(0, grandparent.prev_sibling) + yield from iter(ignored_nodes) + + +def is_fmt_on(container: LN, preview: bool) -> bool: + """Determine whether formatting is switched on within a container. + Determined by whether the last `# fmt:` comment is `on` or `off`. + """ + fmt_on = False + for comment in list_comments(container.prefix, is_endmarker=False, preview=preview): + if comment.value in FMT_ON: + fmt_on = True + elif comment.value in FMT_OFF: + fmt_on = False + return fmt_on + + +def children_contains_fmt_on(container: LN, *, preview: bool) -> bool: + """Determine if children have formatting switched on.""" + for child in container.children: + leaf = first_leaf_of(child) + if leaf is not None and is_fmt_on(leaf, preview=preview): + return True + + return False + + +def contains_pragma_comment(comment_list: List[Leaf]) -> bool: + """ + Returns: + True iff one of the comments in @comment_list is a pragma used by one + of the more common static analysis tools for python (e.g. mypy, flake8, + pylint). + """ + for comment in comment_list: + if comment.value.startswith(("# type:", "# noqa", "# pylint:")): + return True + + return False diff --git a/src/black/concurrency.py b/src/black/concurrency.py new file mode 100644 index 0000000..10e288f --- /dev/null +++ b/src/black/concurrency.py @@ -0,0 +1,191 @@ +""" +Formatting many files at once via multiprocessing. Contains entrypoint and utilities. + +NOTE: this module is only imported if we need to format several files at once. +""" + +import asyncio +import logging +import os +import signal +import sys +from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor +from multiprocessing import Manager +from pathlib import Path +from typing import Any, Iterable, Optional, Set + +from mypy_extensions import mypyc_attr + +from black import WriteBack, format_file_in_place +from black.cache import Cache, filter_cached, read_cache, write_cache +from black.mode import Mode +from black.output import err +from black.report import Changed, Report + + +def maybe_install_uvloop() -> None: + """If our environment has uvloop installed we use it. + + This is called only from command-line entry points to avoid + interfering with the parent process if Black is used as a library. + """ + try: + import uvloop + + uvloop.install() + except ImportError: + pass + + +def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None: + """asyncio signal handler that cancels all `tasks` and reports to stderr.""" + err("Aborted!") + for task in tasks: + task.cancel() + + +def shutdown(loop: asyncio.AbstractEventLoop) -> None: + """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" + try: + if sys.version_info[:2] >= (3, 7): + all_tasks = asyncio.all_tasks + else: + all_tasks = asyncio.Task.all_tasks + # This part is borrowed from asyncio/runners.py in Python 3.7b2. + to_cancel = [task for task in all_tasks(loop) if not task.done()] + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True)) + finally: + # `concurrent.futures.Future` objects cannot be cancelled once they + # are already running. There might be some when the `shutdown()` happened. + # Silence their logger's spew about the event loop being closed. + cf_logger = logging.getLogger("concurrent.futures") + cf_logger.setLevel(logging.CRITICAL) + loop.close() + + +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) +def reformat_many( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: Report, + workers: Optional[int], +) -> None: + """Reformat multiple files using a ProcessPoolExecutor.""" + maybe_install_uvloop() + + executor: Executor + if workers is None: + workers = os.cpu_count() or 1 + if sys.platform == "win32": + # Work around https://bugs.python.org/issue26903 + workers = min(workers, 60) + try: + executor = ProcessPoolExecutor(max_workers=workers) + except (ImportError, NotImplementedError, OSError): + # we arrive here if the underlying system does not support multi-processing + # like in AWS Lambda or Termux, in which case we gracefully fallback to + # a ThreadPoolExecutor with just a single worker (more workers would not do us + # any good due to the Global Interpreter Lock) + executor = ThreadPoolExecutor(max_workers=1) + + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + loop.run_until_complete( + schedule_formatting( + sources=sources, + fast=fast, + write_back=write_back, + mode=mode, + report=report, + loop=loop, + executor=executor, + ) + ) + finally: + try: + shutdown(loop) + finally: + asyncio.set_event_loop(None) + if executor is not None: + executor.shutdown() + + +async def schedule_formatting( + sources: Set[Path], + fast: bool, + write_back: WriteBack, + mode: Mode, + report: "Report", + loop: asyncio.AbstractEventLoop, + executor: "Executor", +) -> None: + """Run formatting of `sources` in parallel using the provided `executor`. + + (Use ProcessPoolExecutors for actual parallelism.) + + `write_back`, `fast`, and `mode` options are passed to + :func:`format_file_in_place`. + """ + cache: Cache = {} + if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + cache = read_cache(mode) + sources, cached = filter_cached(cache, sources) + for src in sorted(cached): + report.done(src, Changed.CACHED) + if not sources: + return + + cancelled = [] + sources_to_cache = [] + lock = None + if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF): + # For diff output, we need locks to ensure we don't interleave output + # from different processes. + manager = Manager() + lock = manager.Lock() + tasks = { + asyncio.ensure_future( + loop.run_in_executor( + executor, format_file_in_place, src, fast, mode, write_back, lock + ) + ): src + for src in sorted(sources) + } + pending = tasks.keys() + try: + loop.add_signal_handler(signal.SIGINT, cancel, pending) + loop.add_signal_handler(signal.SIGTERM, cancel, pending) + except NotImplementedError: + # There are no good alternatives for these on Windows. + pass + while pending: + done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + for task in done: + src = tasks.pop(task) + if task.cancelled(): + cancelled.append(task) + elif task.exception(): + report.failed(src, str(task.exception())) + else: + changed = Changed.YES if task.result() else Changed.NO + # If the file was written back or was successfully checked as + # well-formatted, store this information in the cache. + if write_back is WriteBack.YES or ( + write_back is WriteBack.CHECK and changed is Changed.NO + ): + sources_to_cache.append(src) + report.done(src, changed) + if cancelled: + await asyncio.gather(*cancelled, return_exceptions=True) + if sources_to_cache: + write_cache(cache, sources_to_cache, mode) diff --git a/src/black/const.py b/src/black/const.py new file mode 100644 index 0000000..0e13f31 --- /dev/null +++ b/src/black/const.py @@ -0,0 +1,4 @@ +DEFAULT_LINE_LENGTH = 88 +DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|\.ipynb_checkpoints|_build|buck-out|build|dist|__pypackages__)/" # noqa: B950 +DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$" +STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__" diff --git a/src/black/debug.py b/src/black/debug.py new file mode 100644 index 0000000..150b448 --- /dev/null +++ b/src/black/debug.py @@ -0,0 +1,47 @@ +from dataclasses import dataclass +from typing import Iterator, TypeVar, Union + +from black.nodes import Visitor +from black.output import out +from black.parsing import lib2to3_parse +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node, type_repr + +LN = Union[Leaf, Node] +T = TypeVar("T") + + +@dataclass +class DebugVisitor(Visitor[T]): + tree_depth: int = 0 + + def visit_default(self, node: LN) -> Iterator[T]: + indent = " " * (2 * self.tree_depth) + if isinstance(node, Node): + _type = type_repr(node.type) + out(f"{indent}{_type}", fg="yellow") + self.tree_depth += 1 + for child in node.children: + yield from self.visit(child) + + self.tree_depth -= 1 + out(f"{indent}/{_type}", fg="yellow", bold=False) + else: + _type = token.tok_name.get(node.type, str(node.type)) + out(f"{indent}{_type}", fg="blue", nl=False) + if node.prefix: + # We don't have to handle prefixes for `Node` objects since + # that delegates to the first child anyway. + out(f" {node.prefix!r}", fg="green", bold=False, nl=False) + out(f" {node.value!r}", fg="blue", bold=False) + + @classmethod + def show(cls, code: Union[str, Leaf, Node]) -> None: + """Pretty-print the lib2to3 AST of a given string of `code`. + + Convenience method for debugging. + """ + v: DebugVisitor[None] = DebugVisitor() + if isinstance(code, str): + code = lib2to3_parse(code) + list(v.visit(code)) diff --git a/src/black/files.py b/src/black/files.py new file mode 100644 index 0000000..ed503f5 --- /dev/null +++ b/src/black/files.py @@ -0,0 +1,287 @@ +import io +import os +import sys +from functools import lru_cache +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Sequence, + Tuple, + Union, +) + +from mypy_extensions import mypyc_attr +from pathspec import PathSpec +from pathspec.patterns.gitwildmatch import GitWildMatchPatternError + +if sys.version_info >= (3, 11): + try: + import tomllib + except ImportError: + # Help users on older alphas + if not TYPE_CHECKING: + import tomli as tomllib +else: + import tomli as tomllib + +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from black.output import err +from black.report import Report + +if TYPE_CHECKING: + import colorama # noqa: F401 + + +@lru_cache() +def find_project_root( + srcs: Sequence[str], stdin_filename: Optional[str] = None +) -> Tuple[Path, str]: + """Return a directory containing .git, .hg, or pyproject.toml. + + That directory will be a common parent of all files and directories + passed in `srcs`. + + If no directory in the tree contains a marker that would specify it's the + project root, the root of the file system is returned. + + Returns a two-tuple with the first element as the project root path and + the second element as a string describing the method by which the + project root was discovered. + """ + if stdin_filename is not None: + srcs = tuple(stdin_filename if s == "-" else s for s in srcs) + if not srcs: + srcs = [str(Path.cwd().resolve())] + + path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs] + + # A list of lists of parents for each 'src'. 'src' is included as a + # "parent" of itself if it is a directory + src_parents = [ + list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs + ] + + common_base = max( + set.intersection(*(set(parents) for parents in src_parents)), + key=lambda path: path.parts, + ) + + for directory in (common_base, *common_base.parents): + if (directory / ".git").exists(): + return directory, ".git directory" + + if (directory / ".hg").is_dir(): + return directory, ".hg directory" + + if (directory / "pyproject.toml").is_file(): + return directory, "pyproject.toml" + + return directory, "file system root" + + +def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]: + """Find the absolute filepath to a pyproject.toml if it exists""" + path_project_root, _ = find_project_root(path_search_start) + path_pyproject_toml = path_project_root / "pyproject.toml" + if path_pyproject_toml.is_file(): + return str(path_pyproject_toml) + + try: + path_user_pyproject_toml = find_user_pyproject_toml() + return ( + str(path_user_pyproject_toml) + if path_user_pyproject_toml.is_file() + else None + ) + except (PermissionError, RuntimeError) as e: + # We do not have access to the user-level config directory, so ignore it. + err(f"Ignoring user configuration directory due to {e!r}") + return None + + +@mypyc_attr(patchable=True) +def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: + """Parse a pyproject toml file, pulling out relevant parts for Black + + If parsing fails, will raise a tomllib.TOMLDecodeError + """ + with open(path_config, "rb") as f: + pyproject_toml = tomllib.load(f) + config = pyproject_toml.get("tool", {}).get("black", {}) + return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} + + +@lru_cache() +def find_user_pyproject_toml() -> Path: + r"""Return the path to the top-level user configuration for black. + + This looks for ~\.black on Windows and ~/.config/black on Linux and other + Unix systems. + + May raise: + - RuntimeError: if the current user has no homedir + - PermissionError: if the current process cannot access the user's homedir + """ + if sys.platform == "win32": + # Windows + user_config_path = Path.home() / ".black" + else: + config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config") + user_config_path = Path(config_root).expanduser() / "black" + return user_config_path.resolve() + + +@lru_cache() +def get_gitignore(root: Path) -> PathSpec: + """Return a PathSpec matching gitignore content if present.""" + gitignore = root / ".gitignore" + lines: List[str] = [] + if gitignore.is_file(): + with gitignore.open(encoding="utf-8") as gf: + lines = gf.readlines() + try: + return PathSpec.from_lines("gitwildmatch", lines) + except GitWildMatchPatternError as e: + err(f"Could not parse {gitignore}: {e}") + raise + + +def normalize_path_maybe_ignore( + path: Path, + root: Path, + report: Optional[Report] = None, +) -> Optional[str]: + """Normalize `path`. May return `None` if `path` was ignored. + + `report` is where "path ignored" output goes. + """ + try: + abspath = path if path.is_absolute() else Path.cwd() / path + normalized_path = abspath.resolve() + try: + root_relative_path = normalized_path.relative_to(root).as_posix() + except ValueError: + if report: + report.path_ignored( + path, f"is a symbolic link that points outside {root}" + ) + return None + + except OSError as e: + if report: + report.path_ignored(path, f"cannot be read because {e}") + return None + + return root_relative_path + + +def path_is_excluded( + normalized_path: str, + pattern: Optional[Pattern[str]], +) -> bool: + match = pattern.search(normalized_path) if pattern else None + return bool(match and match.group(0)) + + +def gen_python_files( + paths: Iterable[Path], + root: Path, + include: Pattern[str], + exclude: Pattern[str], + extend_exclude: Optional[Pattern[str]], + force_exclude: Optional[Pattern[str]], + report: Report, + gitignore: Optional[PathSpec], + *, + verbose: bool, + quiet: bool, +) -> Iterator[Path]: + """Generate all files under `path` whose paths are not excluded by the + `exclude_regex`, `extend_exclude`, or `force_exclude` regexes, + but are included by the `include` regex. + + Symbolic links pointing outside of the `root` directory are ignored. + + `report` is where output about exclusions goes. + """ + assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" + for child in paths: + normalized_path = normalize_path_maybe_ignore(child, root, report) + if normalized_path is None: + continue + + # First ignore files matching .gitignore, if passed + if gitignore is not None and gitignore.match_file(normalized_path): + report.path_ignored(child, "matches the .gitignore file content") + continue + + # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options. + normalized_path = "/" + normalized_path + if child.is_dir(): + normalized_path += "/" + + if path_is_excluded(normalized_path, exclude): + report.path_ignored(child, "matches the --exclude regular expression") + continue + + if path_is_excluded(normalized_path, extend_exclude): + report.path_ignored( + child, "matches the --extend-exclude regular expression" + ) + continue + + if path_is_excluded(normalized_path, force_exclude): + report.path_ignored(child, "matches the --force-exclude regular expression") + continue + + if child.is_dir(): + # If gitignore is None, gitignore usage is disabled, while a Falsey + # gitignore is when the directory doesn't have a .gitignore file. + yield from gen_python_files( + child.iterdir(), + root, + include, + exclude, + extend_exclude, + force_exclude, + report, + gitignore + get_gitignore(child) if gitignore is not None else None, + verbose=verbose, + quiet=quiet, + ) + + elif child.is_file(): + if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed( + verbose=verbose, quiet=quiet + ): + continue + include_match = include.search(normalized_path) if include else True + if include_match: + yield child + + +def wrap_stream_for_windows( + f: io.TextIOWrapper, +) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]: + """ + Wrap stream with colorama's wrap_stream so colors are shown on Windows. + + If `colorama` is unavailable, the original stream is returned unmodified. + Otherwise, the `wrap_stream()` function determines whether the stream needs + to be wrapped for a Windows environment and will accordingly either return + an `AnsiToWin32` wrapper or the original stream. + """ + try: + from colorama.initialise import wrap_stream + except ImportError: + return f + else: + # Set `strip=False` to avoid needing to modify test_express_diff_with_color. + return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) diff --git a/src/black/handle_ipynb_magics.py b/src/black/handle_ipynb_magics.py new file mode 100644 index 0000000..693f1a6 --- /dev/null +++ b/src/black/handle_ipynb_magics.py @@ -0,0 +1,459 @@ +"""Functions to process IPython magics with.""" + +import ast +import collections +import dataclasses +import secrets +import sys +from functools import lru_cache +from typing import Dict, List, Optional, Tuple + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from black.output import out +from black.report import NothingChanged + +TRANSFORMED_MAGICS = frozenset( + ( + "get_ipython().run_cell_magic", + "get_ipython().system", + "get_ipython().getoutput", + "get_ipython().run_line_magic", + ) +) +TOKENS_TO_IGNORE = frozenset( + ( + "ENDMARKER", + "NL", + "NEWLINE", + "COMMENT", + "DEDENT", + "UNIMPORTANT_WS", + "ESCAPED_NL", + ) +) +PYTHON_CELL_MAGICS = frozenset( + ( + "capture", + "prun", + "pypy", + "python", + "python3", + "time", + "timeit", + ) +) +TOKEN_HEX = secrets.token_hex + + +@dataclasses.dataclass(frozen=True) +class Replacement: + mask: str + src: str + + +@lru_cache() +def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: + try: + import IPython # noqa:F401 + import tokenize_rt # noqa:F401 + except ModuleNotFoundError: + if verbose or not quiet: + msg = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``" + ) + out(msg) + return False + else: + return True + + +def remove_trailing_semicolon(src: str) -> Tuple[str, bool]: + """Remove trailing semicolon from Jupyter notebook cell. + + For example, + + fig, ax = plt.subplots() + ax.plot(x_data, y_data); # plot data + + would become + + fig, ax = plt.subplots() + ax.plot(x_data, y_data) # plot data + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + trailing_semicolon = False + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + if token.name == "OP" and token.src == ";": + del tokens[idx] + trailing_semicolon = True + break + if not trailing_semicolon: + return src, False + return tokens_to_src(tokens), True + + +def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str: + """Put trailing semicolon back if cell originally had it. + + Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses + ``tokenize_rt`` so that round-tripping works fine. + """ + if not has_trailing_semicolon: + return src + from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src + + tokens = src_to_tokens(src) + for idx, token in reversed_enumerate(tokens): + if token.name in TOKENS_TO_IGNORE: + continue + tokens[idx] = token._replace(src=token.src + ";") + break + else: # pragma: nocover + raise AssertionError( + "INTERNAL ERROR: Was not able to reinstate trailing semicolon. " + "Please report a bug on https://github.com/psf/black/issues. " + ) from None + return str(tokens_to_src(tokens)) + + +def mask_cell(src: str) -> Tuple[str, List[Replacement]]: + """Mask IPython magics so content becomes parseable Python code. + + For example, + + %matplotlib inline + 'foo' + + becomes + + "25716f358c32750e" + 'foo' + + The replacements are returned, along with the transformed code. + """ + replacements: List[Replacement] = [] + try: + ast.parse(src) + except SyntaxError: + # Might have IPython magics, will process below. + pass + else: + # Syntax is fine, nothing to mask, early return. + return src, replacements + + from IPython.core.inputtransformer2 import TransformerManager + + transformer_manager = TransformerManager() + transformed = transformer_manager.transform_cell(src) + transformed, cell_magic_replacements = replace_cell_magics(transformed) + replacements += cell_magic_replacements + transformed = transformer_manager.transform_cell(transformed) + transformed, magic_replacements = replace_magics(transformed) + if len(transformed.splitlines()) != len(src.splitlines()): + # Multi-line magic, not supported. + raise NothingChanged + replacements += magic_replacements + return transformed, replacements + + +def get_token(src: str, magic: str) -> str: + """Return randomly generated token to mask IPython magic with. + + For example, if 'magic' was `%matplotlib inline`, then a possible + token to mask it with would be `"43fdd17f7e5ddc83"`. The token + will be the same length as the magic, and we make sure that it was + not already present anywhere else in the cell. + """ + assert magic + nbytes = max(len(magic) // 2 - 1, 1) + token = TOKEN_HEX(nbytes) + counter = 0 + while token in src: + token = TOKEN_HEX(nbytes) + counter += 1 + if counter > 100: + raise AssertionError( + "INTERNAL ERROR: Black was not able to replace IPython magic. " + "Please report a bug on https://github.com/psf/black/issues. " + f"The magic might be helpful: {magic}" + ) from None + if len(token) + 2 < len(magic): + token = f"{token}." + return f'"{token}"' + + +def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace cell magic with token. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, + + get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n') + + becomes + + "a794." + ls =!ls + + The replacement, along with the transformed code, is returned. + """ + replacements: List[Replacement] = [] + + tree = ast.parse(src) + + cell_magic_finder = CellMagicFinder() + cell_magic_finder.visit(tree) + if cell_magic_finder.cell_magic is None: + return src, replacements + header = cell_magic_finder.cell_magic.header + mask = get_token(src, header) + replacements.append(Replacement(mask=mask, src=header)) + return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements + + +def replace_magics(src: str) -> Tuple[str, List[Replacement]]: + """Replace magics within body of cell. + + Note that 'src' will already have been processed by IPython's + TransformerManager().transform_cell. + + Example, this + + get_ipython().run_line_magic('matplotlib', 'inline') + 'foo' + + becomes + + "5e67db56d490fd39" + 'foo' + + The replacement, along with the transformed code, are returned. + """ + replacements = [] + magic_finder = MagicFinder() + magic_finder.visit(ast.parse(src)) + new_srcs = [] + for i, line in enumerate(src.splitlines(), start=1): + if i in magic_finder.magics: + offsets_and_magics = magic_finder.magics[i] + if len(offsets_and_magics) != 1: # pragma: nocover + raise AssertionError( + f"Expecting one magic per line, got: {offsets_and_magics}\n" + "Please report a bug on https://github.com/psf/black/issues." + ) + col_offset, magic = ( + offsets_and_magics[0].col_offset, + offsets_and_magics[0].magic, + ) + mask = get_token(src, magic) + replacements.append(Replacement(mask=mask, src=magic)) + line = line[:col_offset] + mask + new_srcs.append(line) + return "\n".join(new_srcs), replacements + + +def unmask_cell(src: str, replacements: List[Replacement]) -> str: + """Remove replacements from cell. + + For example + + "9b20" + foo = bar + + becomes + + %%time + foo = bar + """ + for replacement in replacements: + src = src.replace(replacement.mask, replacement.src) + return src + + +def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]: + """Check if attribute is IPython magic. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + """ + return ( + isinstance(node, ast.Attribute) + and isinstance(node.value, ast.Call) + and isinstance(node.value.func, ast.Name) + and node.value.func.id == "get_ipython" + ) + + +def _get_str_args(args: List[ast.expr]) -> List[str]: + str_args = [] + for arg in args: + assert isinstance(arg, ast.Str) + str_args.append(arg.s) + return str_args + + +@dataclasses.dataclass(frozen=True) +class CellMagic: + name: str + params: Optional[str] + body: str + + @property + def header(self) -> str: + if self.params: + return f"%%{self.name} {self.params}" + return f"%%{self.name}" + + +# ast.NodeVisitor + dataclass = breakage under mypyc. +class CellMagicFinder(ast.NodeVisitor): + """Find cell magics. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %%time\nfoo() + + would have been transformed to + + get_ipython().run_cell_magic('time', '', 'foo()\\n') + + and we look for instances of the latter. + """ + + def __init__(self, cell_magic: Optional[CellMagic] = None) -> None: + self.cell_magic = cell_magic + + def visit_Expr(self, node: ast.Expr) -> None: + """Find cell magic, extract header and body.""" + if ( + isinstance(node.value, ast.Call) + and _is_ipython_magic(node.value.func) + and node.value.func.attr == "run_cell_magic" + ): + args = _get_str_args(node.value.args) + self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2]) + self.generic_visit(node) + + +@dataclasses.dataclass(frozen=True) +class OffsetAndMagic: + col_offset: int + magic: str + + +# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here +# as mypyc will generate broken code. +class MagicFinder(ast.NodeVisitor): + """Visit cell to look for get_ipython calls. + + Note that the source of the abstract syntax tree + will already have been processed by IPython's + TransformerManager().transform_cell. + + For example, + + %matplotlib inline + + would have been transformed to + + get_ipython().run_line_magic('matplotlib', 'inline') + + and we look for instances of the latter (and likewise for other + types of magics). + """ + + def __init__(self) -> None: + self.magics: Dict[int, List[OffsetAndMagic]] = collections.defaultdict(list) + + def visit_Assign(self, node: ast.Assign) -> None: + """Look for system assign magics. + + For example, + + black_version = !black --version + env = %env var + + would have been (respectively) transformed to + + black_version = get_ipython().getoutput('black --version') + env = get_ipython().run_line_magic('env', 'var') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "getoutput": + src = f"!{args[0]}" + elif node.value.func.attr == "run_line_magic": + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + else: + raise AssertionError( + f"Unexpected IPython magic {node.value.func.attr!r} found. " + "Please report a bug on https://github.com/psf/black/issues." + ) from None + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) + + def visit_Expr(self, node: ast.Expr) -> None: + """Look for magics in body of cell. + + For examples, + + !ls + !!ls + ?ls + ??ls + + would (respectively) get transformed to + + get_ipython().system('ls') + get_ipython().getoutput('ls') + get_ipython().run_line_magic('pinfo', 'ls') + get_ipython().run_line_magic('pinfo2', 'ls') + + and we look for instances of any of the latter. + """ + if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func): + args = _get_str_args(node.value.args) + if node.value.func.attr == "run_line_magic": + if args[0] == "pinfo": + src = f"?{args[1]}" + elif args[0] == "pinfo2": + src = f"??{args[1]}" + else: + src = f"%{args[0]}" + if args[1]: + src += f" {args[1]}" + elif node.value.func.attr == "system": + src = f"!{args[0]}" + elif node.value.func.attr == "getoutput": + src = f"!!{args[0]}" + else: + raise NothingChanged # unsupported magic. + self.magics[node.value.lineno].append( + OffsetAndMagic(node.value.col_offset, src) + ) + self.generic_visit(node) diff --git a/src/black/linegen.py b/src/black/linegen.py new file mode 100644 index 0000000..a2e41bf --- /dev/null +++ b/src/black/linegen.py @@ -0,0 +1,1294 @@ +""" +Generating lines of code. +""" +import sys +from functools import partial, wraps +from typing import Collection, Iterator, List, Optional, Set, Union, cast + +from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, max_delimiter_priority_in_atom +from black.comments import FMT_OFF, generate_comments, list_comments +from black.lines import ( + Line, + append_leaves, + can_be_split, + can_omit_invisible_parens, + is_line_short_enough, + line_to_string, +) +from black.mode import Feature, Mode, Preview +from black.nodes import ( + ASSIGNMENTS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + RARROW, + STANDALONE_COMMENT, + STATEMENT, + WHITESPACE, + Visitor, + ensure_visible, + is_arith_like, + is_atom_with_invisible_parens, + is_docstring, + is_empty_tuple, + is_lpar_token, + is_multiline_string, + is_name_token, + is_one_sequence_between, + is_one_tuple, + is_rpar_token, + is_stub_body, + is_stub_suite, + is_vararg, + is_walrus_assignment, + is_yield, + syms, + wrap_in_parentheses, +) +from black.numerics import normalize_numeric_literal +from black.strings import ( + fix_docstring, + get_string_prefix, + normalize_string_prefix, + normalize_string_quotes, +) +from black.trans import ( + CannotTransform, + StringMerger, + StringParenStripper, + StringParenWrapper, + StringSplitter, + Transformer, + hug_power_op, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +LeafID = int +LN = Union[Leaf, Node] + + +class CannotSplit(CannotTransform): + """A readable split that fits the allotted line length is impossible.""" + + +# This isn't a dataclass because @dataclass + Generic breaks mypyc. +# See also https://github.com/mypyc/mypyc/issues/827. +class LineGenerator(Visitor[Line]): + """Generates reformatted Line objects. Empty lines are not emitted. + + Note: destroys the tree it's visiting by mutating prefixes of its leaves + in ways that will no longer stringify to valid Python code on the tree. + """ + + def __init__(self, mode: Mode) -> None: + self.mode = mode + self.current_line: Line + self.__post_init__() + + def line(self, indent: int = 0) -> Iterator[Line]: + """Generate a line. + + If the line is empty, only emit if it makes sense. + If the line is too long, split it first and then generate. + + If any lines were generated, set up a new current_line. + """ + if not self.current_line: + self.current_line.depth += indent + return # Line is empty, don't emit. Creating a new one unnecessary. + + complete_line = self.current_line + self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) + yield complete_line + + def visit_default(self, node: LN) -> Iterator[Line]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Leaf): + any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() + for comment in generate_comments(node, preview=self.mode.preview): + if any_open_brackets: + # any comment within brackets is subject to splitting + self.current_line.append(comment) + elif comment.type == token.COMMENT: + # regular trailing comment + self.current_line.append(comment) + yield from self.line() + + else: + # regular standalone comment + yield from self.line() + + self.current_line.append(comment) + yield from self.line() + + normalize_prefix(node, inside_brackets=any_open_brackets) + if self.mode.string_normalization and node.type == token.STRING: + node.value = normalize_string_prefix(node.value) + node.value = normalize_string_quotes(node.value) + if node.type == token.NUMBER: + normalize_numeric_literal(node) + if node.type not in WHITESPACE: + self.current_line.append(node) + yield from super().visit_default(node) + + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: + """Increase indentation level, maybe yield a line.""" + # In blib2to3 INDENT never holds comments. + yield from self.line(+1) + yield from self.visit_default(node) + + def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: + """Decrease indentation level, maybe yield a line.""" + # The current line might still wait for trailing comments. At DEDENT time + # there won't be any (they would be prefixes on the preceding NEWLINE). + # Emit the line then. + yield from self.line() + + # While DEDENT has no value, its prefix may contain standalone comments + # that belong to the current indentation level. Get 'em. + yield from self.visit_default(node) + + # Finally, emit the dedent. + yield from self.line(-1) + + def visit_stmt( + self, node: Node, keywords: Set[str], parens: Set[str] + ) -> Iterator[Line]: + """Visit a statement. + + This implementation is shared for `if`, `while`, `for`, `try`, `except`, + `def`, `with`, `class`, `assert`, and assignments. + + The relevant Python language `keywords` for a given statement will be + NAME leaves within it. This methods puts those on a separate line. + + `parens` holds a set of string leaf values immediately after which + invisible parens should be put. + """ + normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview) + for child in node.children: + if is_name_token(child) and child.value in keywords: + yield from self.line() + + yield from self.visit(child) + + def visit_funcdef(self, node: Node) -> Iterator[Line]: + """Visit function definition.""" + if Preview.annotation_parens not in self.mode: + yield from self.visit_stmt(node, keywords={"def"}, parens=set()) + else: + yield from self.line() + + # Remove redundant brackets around return type annotation. + is_return_annotation = False + for child in node.children: + if child.type == token.RARROW: + is_return_annotation = True + elif is_return_annotation: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=False, + ): + wrap_in_parentheses(node, child, visible=False) + else: + wrap_in_parentheses(node, child, visible=False) + is_return_annotation = False + + for child in node.children: + yield from self.visit(child) + + def visit_match_case(self, node: Node) -> Iterator[Line]: + """Visit either a match or case statement.""" + normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview) + + yield from self.line() + for child in node.children: + yield from self.visit(child) + + def visit_suite(self, node: Node) -> Iterator[Line]: + """Visit a suite.""" + if self.mode.is_pyi and is_stub_suite(node): + yield from self.visit(node.children[2]) + else: + yield from self.visit_default(node) + + def visit_simple_stmt(self, node: Node) -> Iterator[Line]: + """Visit a statement without nested statements.""" + prev_type: Optional[int] = None + for child in node.children: + if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child): + wrap_in_parentheses(node, child, visible=False) + prev_type = child.type + + is_suite_like = node.parent and node.parent.type in STATEMENT + if is_suite_like: + if self.mode.is_pyi and is_stub_body(node): + yield from self.visit_default(node) + else: + yield from self.line(+1) + yield from self.visit_default(node) + yield from self.line(-1) + + else: + if ( + not self.mode.is_pyi + or not node.parent + or not is_stub_suite(node.parent) + ): + yield from self.line() + yield from self.visit_default(node) + + def visit_async_stmt(self, node: Node) -> Iterator[Line]: + """Visit `async def`, `async for`, `async with`.""" + yield from self.line() + + children = iter(node.children) + for child in children: + yield from self.visit(child) + + if child.type == token.ASYNC or child.type == STANDALONE_COMMENT: + # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async + # line. + break + + internal_stmt = next(children) + for child in internal_stmt.children: + yield from self.visit(child) + + def visit_decorators(self, node: Node) -> Iterator[Line]: + """Visit decorators.""" + for child in node.children: + yield from self.line() + yield from self.visit(child) + + def visit_power(self, node: Node) -> Iterator[Line]: + for idx, leaf in enumerate(node.children[:-1]): + next_leaf = node.children[idx + 1] + + if not isinstance(leaf, Leaf): + continue + + value = leaf.value.lower() + if ( + leaf.type == token.NUMBER + and next_leaf.type == syms.trailer + # Ensure that we are in an attribute trailer + and next_leaf.children[0].type == token.DOT + # It shouldn't wrap hexadecimal, binary and octal literals + and not value.startswith(("0x", "0b", "0o")) + # It shouldn't wrap complex literals + and "j" not in value + ): + wrap_in_parentheses(node, leaf) + + if Preview.remove_redundant_parens in self.mode: + remove_await_parens(node) + + yield from self.visit_default(node) + + def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: + """Remove a semicolon and put the other statement on a separate line.""" + yield from self.line() + + def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: + """End of file. Process outstanding comments and end with a newline.""" + yield from self.visit_default(leaf) + yield from self.line() + + def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: + if not self.current_line.bracket_tracker.any_open_brackets(): + yield from self.line() + yield from self.visit_default(leaf) + + def visit_factor(self, node: Node) -> Iterator[Line]: + """Force parentheses between a unary op and a binary power: + + -2 ** 8 -> -(2 ** 8) + """ + _operator, operand = node.children + if ( + operand.type == syms.power + and len(operand.children) == 3 + and operand.children[1].type == token.DOUBLESTAR + ): + lpar = Leaf(token.LPAR, "(") + rpar = Leaf(token.RPAR, ")") + index = operand.remove() or 0 + node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) + yield from self.visit_default(node) + + def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if is_docstring(leaf) and "\\\n" not in leaf.value: + # We're ignoring docstrings with backslash newline escapes because changing + # indentation of those changes the AST representation of the code. + if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode: + # There was a bug where --skip-string-normalization wouldn't stop us + # from normalizing docstring prefixes. To maintain stability, we can + # only address this buggy behaviour while the preview style is enabled. + if self.mode.string_normalization: + docstring = normalize_string_prefix(leaf.value) + # visit_default() does handle string normalization for us, but + # since this method acts differently depending on quote style (ex. + # see padding logic below), there's a possibility for unstable + # formatting as visit_default() is called *after*. To avoid a + # situation where this function formats a docstring differently on + # the second pass, normalize it early. + docstring = normalize_string_quotes(docstring) + else: + docstring = leaf.value + else: + # ... otherwise, we'll keep the buggy behaviour >.< + docstring = normalize_string_prefix(leaf.value) + prefix = get_string_prefix(docstring) + docstring = docstring[len(prefix) :] # Remove the prefix + quote_char = docstring[0] + # A natural way to remove the outer quotes is to do: + # docstring = docstring.strip(quote_char) + # but that breaks on """""x""" (which is '""x'). + # So we actually need to remove the first character and the next two + # characters but only if they are the same as the first. + quote_len = 1 if docstring[1] != quote_char else 3 + docstring = docstring[quote_len:-quote_len] + docstring_started_empty = not docstring + indent = " " * 4 * self.current_line.depth + + if is_multiline_string(leaf): + docstring = fix_docstring(docstring, indent) + else: + docstring = docstring.strip() + + if docstring: + # Add some padding if the docstring starts / ends with a quote mark. + if docstring[0] == quote_char: + docstring = " " + docstring + if docstring[-1] == quote_char: + docstring += " " + if docstring[-1] == "\\": + backslash_count = len(docstring) - len(docstring.rstrip("\\")) + if backslash_count % 2: + # Odd number of tailing backslashes, add some padding to + # avoid escaping the closing string quote. + docstring += " " + elif not docstring_started_empty: + docstring = " " + + # We could enforce triple quotes at this point. + quote = quote_char * quote_len + + # It's invalid to put closing single-character quotes on a new line. + if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3: + # We need to find the length of the last line of the docstring + # to find if we can add the closing quotes to the line without + # exceeding the maximum line length. + # If docstring is one line, then we need to add the length + # of the indent, prefix, and starting quotes. Ending quotes are + # handled later. + lines = docstring.splitlines() + last_line_length = len(lines[-1]) if docstring else 0 + + if len(lines) == 1: + last_line_length += len(indent) + len(prefix) + quote_len + + # If adding closing quotes would cause the last line to exceed + # the maximum line length then put a line break before the + # closing quotes + if last_line_length + quote_len > self.mode.line_length: + leaf.value = prefix + quote + docstring + "\n" + indent + quote + else: + leaf.value = prefix + quote + docstring + quote + else: + leaf.value = prefix + quote + docstring + quote + + yield from self.visit_default(leaf) + + def __post_init__(self) -> None: + """You are in a twisty little maze of passages.""" + self.current_line = Line(mode=self.mode) + + v = self.visit_stmt + Ø: Set[str] = set() + self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) + self.visit_if_stmt = partial( + v, keywords={"if", "else", "elif"}, parens={"if", "elif"} + ) + self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) + self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) + self.visit_try_stmt = partial( + v, keywords={"try", "except", "else", "finally"}, parens=Ø + ) + if self.mode.preview: + self.visit_except_clause = partial( + v, keywords={"except"}, parens={"except"} + ) + self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) + else: + self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) + self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) + self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) + self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) + self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) + self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) + self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) + self.visit_async_funcdef = self.visit_async_stmt + self.visit_decorated = self.visit_decorators + + # PEP 634 + self.visit_match_stmt = self.visit_match_case + self.visit_case_block = self.visit_match_case + + +def transform_line( + line: Line, mode: Mode, features: Collection[Feature] = () +) -> Iterator[Line]: + """Transform a `line`, potentially splitting it into many lines. + + They should fit in the allotted `line_length` but might not be able to. + + `features` are syntactical features that may be used in the output. + """ + if line.is_comment: + yield line + return + + line_str = line_to_string(line) + + ll = mode.line_length + sn = mode.string_normalization + string_merge = StringMerger(ll, sn) + string_paren_strip = StringParenStripper(ll, sn) + string_split = StringSplitter(ll, sn) + string_paren_wrap = StringParenWrapper(ll, sn) + + transformers: List[Transformer] + if ( + not line.contains_uncollapsable_type_comments() + and not line.should_split_rhs + and not line.magic_trailing_comma + and ( + is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) + or line.contains_unsplittable_type_ignore() + ) + and not (line.inside_brackets and line.contains_standalone_comments()) + ): + # Only apply basic string preprocessing, since lines shouldn't be split here. + if Preview.string_processing in mode: + transformers = [string_merge, string_paren_strip] + else: + transformers = [] + elif line.is_def: + transformers = [left_hand_split] + else: + + def _rhs( + self: object, line: Line, features: Collection[Feature] + ) -> Iterator[Line]: + """Wraps calls to `right_hand_split`. + + The calls increasingly `omit` right-hand trailers (bracket pairs with + content), meaning the trailers get glued together to split on another + bracket pair instead. + """ + for omit in generate_trailers_to_omit(line, mode.line_length): + lines = list( + right_hand_split(line, mode.line_length, features, omit=omit) + ) + # Note: this check is only able to figure out if the first line of the + # *current* transformation fits in the line length. This is true only + # for simple cases. All others require running more transforms via + # `transform_line()`. This check doesn't know if those would succeed. + if is_line_short_enough(lines[0], line_length=mode.line_length): + yield from lines + return + + # All splits failed, best effort split with no omits. + # This mostly happens to multiline strings that are by definition + # reported as not fitting a single line, as well as lines that contain + # trailing commas (those have to be exploded). + yield from right_hand_split( + line, line_length=mode.line_length, features=features + ) + + # HACK: nested functions (like _rhs) compiled by mypyc don't retain their + # __name__ attribute which is needed in `run_transformer` further down. + # Unfortunately a nested class breaks mypyc too. So a class must be created + # via type ... https://github.com/mypyc/mypyc/issues/884 + rhs = type("rhs", (), {"__call__": _rhs})() + + if Preview.string_processing in mode: + if line.inside_brackets: + transformers = [ + string_merge, + string_paren_strip, + string_split, + delimiter_split, + standalone_comment_split, + string_paren_wrap, + rhs, + ] + else: + transformers = [ + string_merge, + string_paren_strip, + string_split, + string_paren_wrap, + rhs, + ] + else: + if line.inside_brackets: + transformers = [delimiter_split, standalone_comment_split, rhs] + else: + transformers = [rhs] + # It's always safe to attempt hugging of power operations and pretty much every line + # could match. + transformers.append(hug_power_op) + + for transform in transformers: + # We are accumulating lines in `result` because we might want to abort + # mission and return the original line in the end, or attempt a different + # split altogether. + try: + result = run_transformer(line, transform, mode, features, line_str=line_str) + except CannotTransform: + continue + else: + yield from result + break + + else: + yield line + + +def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: + """Split line into many lines, starting with the first matching bracket pair. + + Note: this usually looks weird, only use this for function definitions. + Prefer RHS otherwise. This is why this function is not symmetrical with + :func:`right_hand_split` which also handles optional parentheses. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = head_leaves + matching_bracket: Optional[Leaf] = None + for leaf in line.leaves: + if ( + current_leaves is body_leaves + and leaf.type in CLOSING_BRACKETS + and leaf.opening_bracket is matching_bracket + and isinstance(matching_bracket, Leaf) + ): + ensure_visible(leaf) + ensure_visible(matching_bracket) + current_leaves = tail_leaves if body_leaves else head_leaves + current_leaves.append(leaf) + if current_leaves is head_leaves: + if leaf.type in OPENING_BRACKETS: + matching_bracket = leaf + current_leaves = body_leaves + if not matching_bracket: + raise CannotSplit("No brackets found") + + head = bracket_split_build_line(head_leaves, line, matching_bracket) + body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, matching_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + for result in (head, body, tail): + if result: + yield result + + +def right_hand_split( + line: Line, + line_length: int, + features: Collection[Feature] = (), + omit: Collection[LeafID] = (), +) -> Iterator[Line]: + """Split line into many lines, starting with the last matching bracket pair. + + If the split was by optional parentheses, attempt splitting without them, too. + `omit` is a collection of closing bracket IDs that shouldn't be considered for + this split. + + Note: running this function modifies `bracket_depth` on the leaves of `line`. + """ + tail_leaves: List[Leaf] = [] + body_leaves: List[Leaf] = [] + head_leaves: List[Leaf] = [] + current_leaves = tail_leaves + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + for leaf in reversed(line.leaves): + if current_leaves is body_leaves: + if leaf is opening_bracket: + current_leaves = head_leaves if body_leaves else tail_leaves + current_leaves.append(leaf) + if current_leaves is tail_leaves: + if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + current_leaves = body_leaves + if not (opening_bracket and closing_bracket and head_leaves): + # If there is no opening or closing_bracket that means the split failed and + # all content is in the tail. Otherwise, if `head_leaves` are empty, it means + # the matching `opening_bracket` wasn't available on `line` anymore. + raise CannotSplit("No brackets found") + + tail_leaves.reverse() + body_leaves.reverse() + head_leaves.reverse() + head = bracket_split_build_line(head_leaves, line, opening_bracket) + body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) + tail = bracket_split_build_line(tail_leaves, line, opening_bracket) + bracket_split_succeeded_or_raise(head, body, tail) + if ( + Feature.FORCE_OPTIONAL_PARENTHESES not in features + # the opening bracket is an optional paren + and opening_bracket.type == token.LPAR + and not opening_bracket.value + # the closing bracket is an optional paren + and closing_bracket.type == token.RPAR + and not closing_bracket.value + # it's not an import (optional parens are the only thing we can split on + # in this case; attempting a split without them is a waste of time) + and not line.is_import + # there are no standalone comments in the body + and not body.contains_standalone_comments(0) + # and we can actually remove the parens + and can_omit_invisible_parens(body, line_length) + ): + omit = {id(closing_bracket), *omit} + try: + yield from right_hand_split(line, line_length, features=features, omit=omit) + return + + except CannotSplit as e: + if not ( + can_be_split(body) + or is_line_short_enough(body, line_length=line_length) + ): + raise CannotSplit( + "Splitting failed, body is still too long and can't be split." + ) from e + + elif head.contains_multiline_strings() or tail.contains_multiline_strings(): + raise CannotSplit( + "The current optional pair of parentheses is bound to fail to" + " satisfy the splitting algorithm because the head or the tail" + " contains multiline strings which by definition never fit one" + " line." + ) from e + + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + for result in (head, body, tail): + if result: + yield result + + +def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: + """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. + + Do nothing otherwise. + + A left- or right-hand split is based on a pair of brackets. Content before + (and including) the opening bracket is left on one line, content inside the + brackets is put on a separate line, and finally content starting with and + following the closing bracket is put on a separate line. + + Those are called `head`, `body`, and `tail`, respectively. If the split + produced the same line (all content in `head`) or ended up with an empty `body` + and the `tail` is just the closing bracket, then it's considered failed. + """ + tail_len = len(str(tail).strip()) + if not body: + if tail_len == 0: + raise CannotSplit("Splitting brackets produced the same line") + + elif tail_len < 3: + raise CannotSplit( + f"Splitting brackets on an empty body to save {tail_len} characters is" + " not worth it" + ) + + +def bracket_split_build_line( + leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False +) -> Line: + """Return a new line with given `leaves` and respective comments from `original`. + + If `is_body` is True, the result line is one-indented inside brackets and as such + has its first leaf's prefix normalized and a trailing comma added when expected. + """ + result = Line(mode=original.mode, depth=original.depth) + if is_body: + result.inside_brackets = True + result.depth += 1 + if leaves: + # Since body is a new indent level, remove spurious leading whitespace. + normalize_prefix(leaves[0], inside_brackets=True) + # Ensure a trailing comma for imports and standalone function arguments, but + # be careful not to add one after any comments or within type annotations. + no_commas = ( + original.is_def + and opening_bracket.value == "(" + and not any(leaf.type == token.COMMA for leaf in leaves) + # In particular, don't add one within a parenthesized return annotation. + # Unfortunately the indicator we're in a return annotation (RARROW) may + # be defined directly in the parent node, the parent of the parent ... + # and so on depending on how complex the return annotation is. + # This isn't perfect and there's some false negatives but they are in + # contexts were a comma is actually fine. + and not any( + node.prev_sibling.type == RARROW + for node in ( + leaves[0].parent, + getattr(leaves[0].parent, "parent", None), + ) + if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) + ) + ) + + if original.is_import or no_commas: + for i in range(len(leaves) - 1, -1, -1): + if leaves[i].type == STANDALONE_COMMENT: + continue + + if leaves[i].type != token.COMMA: + new_comma = Leaf(token.COMMA, ",") + leaves.insert(i + 1, new_comma) + break + + # Populate the line + for leaf in leaves: + result.append(leaf, preformatted=True) + for comment_after in original.comments_after(leaf): + result.append(comment_after, preformatted=True) + if is_body and should_split_line(result, opening_bracket): + result.should_split_rhs = True + return result + + +def dont_increase_indentation(split_func: Transformer) -> Transformer: + """Normalize prefix of the first leaf in every line returned by `split_func`. + + This is a decorator over relevant split functions. + """ + + @wraps(split_func) + def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + for split_line in split_func(line, features): + normalize_prefix(split_line.leaves[0], inside_brackets=True) + yield split_line + + return split_wrapper + + +@dont_increase_indentation +def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: + """Split according to delimiters of the highest priority. + + If the appropriate Features are given, the split will add trailing commas + also in function signatures and calls that contain `*` and `**`. + """ + try: + last_leaf = line.leaves[-1] + except IndexError: + raise CannotSplit("Line empty") from None + + bt = line.bracket_tracker + try: + delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) + except ValueError: + raise CannotSplit("No delimiters found") from None + + if delimiter_priority == DOT_PRIORITY: + if bt.delimiter_count_with_priority(delimiter_priority) == 1: + raise CannotSplit("Splitting a single attribute from its owner looks wrong") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + lowest_depth = sys.maxsize + trailing_comma_safe = True + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + lowest_depth = min(lowest_depth, leaf.bracket_depth) + if leaf.bracket_depth == lowest_depth: + if is_vararg(leaf, within={syms.typedargslist}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features + ) + elif is_vararg(leaf, within={syms.arglist, syms.argument}): + trailing_comma_safe = ( + trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features + ) + + leaf_priority = bt.delimiters.get(id(leaf)) + if leaf_priority == delimiter_priority: + yield current_line + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + if current_line: + if ( + trailing_comma_safe + and delimiter_priority == COMMA_PRIORITY + and current_line.leaves[-1].type != token.COMMA + and current_line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + current_line.append(new_comma) + yield current_line + + +@dont_increase_indentation +def standalone_comment_split( + line: Line, features: Collection[Feature] = () +) -> Iterator[Line]: + """Split standalone comments from the rest of the line.""" + if not line.contains_standalone_comments(0): + raise CannotSplit("Line does not have any standalone comments") + + current_line = Line( + mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + + def append_to_line(leaf: Leaf) -> Iterator[Line]: + """Append `leaf` to current line or to new line if appending impossible.""" + nonlocal current_line + try: + current_line.append_safe(leaf, preformatted=True) + except ValueError: + yield current_line + + current_line = Line( + line.mode, depth=line.depth, inside_brackets=line.inside_brackets + ) + current_line.append(leaf) + + for leaf in line.leaves: + yield from append_to_line(leaf) + + for comment_after in line.comments_after(leaf): + yield from append_to_line(comment_after) + + if current_line: + yield current_line + + +def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: + """Leave existing extra newlines if not `inside_brackets`. Remove everything + else. + + Note: don't use backslashes for formatting or you'll lose your voting rights. + """ + if not inside_brackets: + spl = leaf.prefix.split("#") + if "\\" not in spl[0]: + nl_count = spl[-1].count("\n") + if len(spl) > 1: + nl_count -= 1 + leaf.prefix = "\n" * nl_count + return + + leaf.prefix = "" + + +def normalize_invisible_parens( + node: Node, parens_after: Set[str], *, preview: bool +) -> None: + """Make existing optional parentheses invisible or create new ones. + + `parens_after` is a set of string leaf values immediately after which parens + should be put. + + Standardizes on visible parentheses for single-element tuples, and keeps + existing visible parentheses for other tuples and generator expressions. + """ + for pc in list_comments(node.prefix, is_endmarker=False, preview=preview): + if pc.value in FMT_OFF: + # This `node` has a prefix with `# fmt: off`, don't mess with parens. + return + check_lpar = False + for index, child in enumerate(list(node.children)): + # Fixes a bug where invisible parens are not properly stripped from + # assignment statements that contain type annotations. + if isinstance(child, Node) and child.type == syms.annassign: + normalize_invisible_parens( + child, parens_after=parens_after, preview=preview + ) + + # Add parentheses around long tuple unpacking in assignments. + if ( + index == 0 + and isinstance(child, Node) + and child.type == syms.testlist_star_expr + ): + check_lpar = True + + if check_lpar: + if ( + preview + and child.type == syms.atom + and node.type == syms.for_stmt + and isinstance(child.prev_sibling, Leaf) + and child.prev_sibling.type == token.NAME + and child.prev_sibling.value == "for" + ): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, child, visible=False) + elif preview and isinstance(child, Node) and node.type == syms.with_stmt: + remove_with_parens(child, node) + elif child.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + ): + wrap_in_parentheses(node, child, visible=False) + elif is_one_tuple(child): + wrap_in_parentheses(node, child, visible=True) + elif node.type == syms.import_from: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(node.children[-1]) + # make parentheses invisible + child.value = "" + node.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + node.insert_child(index, Leaf(token.LPAR, "")) + node.append_child(Leaf(token.RPAR, "")) + break + elif ( + index == 1 + and child.type == token.STAR + and node.type == syms.except_clause + ): + # In except* (PEP 654), the star is actually part of + # of the keyword. So we need to skip the insertion of + # invisible parentheses to work more precisely. + continue + + elif not (isinstance(child, Leaf) and is_multiline_string(child)): + wrap_in_parentheses(node, child, visible=False) + + comma_check = child.type == token.COMMA if preview else False + + check_lpar = isinstance(child, Leaf) and ( + child.value in parens_after or comma_check + ) + + +def remove_await_parens(node: Node) -> None: + if node.children[0].type == token.AWAIT and len(node.children) > 1: + if ( + node.children[1].type == syms.atom + and node.children[1].children[0].type == token.LPAR + ): + if maybe_make_parens_invisible_in_atom( + node.children[1], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[1], visible=False) + + # Since await is an expression we shouldn't remove + # brackets in cases where this would change + # the AST due to operator precedence. + # Therefore we only aim to remove brackets around + # power nodes that aren't also await expressions themselves. + # https://peps.python.org/pep-0492/#updated-operator-precedence-table + # N.B. We've still removed any redundant nested brackets though :) + opening_bracket = cast(Leaf, node.children[1].children[0]) + closing_bracket = cast(Leaf, node.children[1].children[-1]) + bracket_contents = cast(Node, node.children[1].children[1]) + if bracket_contents.type != syms.power: + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + elif ( + bracket_contents.type == syms.power + and bracket_contents.children[0].type == token.AWAIT + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + # If we are in a nested await then recurse down. + remove_await_parens(bracket_contents) + + +def remove_with_parens(node: Node, parent: Node) -> None: + """Recursively hide optional parens in `with` statements.""" + # Removing all unnecessary parentheses in with statements in one pass is a tad + # complex as different variations of bracketed statements result in pretty + # different parse trees: + # + # with (open("file")) as f: # this is an asexpr_test + # ... + # + # with (open("file") as f): # this is an atom containing an + # ... # asexpr_test + # + # with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA, + # ... # asexpr_test + # + # with (open("file") as f, open("file") as f): # an atom containing a + # ... # testlist_gexp which then + # # contains multiple asexpr_test(s) + if node.type == syms.atom: + if maybe_make_parens_invisible_in_atom( + node, + parent=parent, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(parent, node, visible=False) + if isinstance(node.children[1], Node): + remove_with_parens(node.children[1], node) + elif node.type == syms.testlist_gexp: + for child in node.children: + if isinstance(child, Node): + remove_with_parens(child, node) + elif node.type == syms.asexpr_test and not any( + leaf.type == token.COLONEQUAL for leaf in node.leaves() + ): + if maybe_make_parens_invisible_in_atom( + node.children[0], + parent=node, + remove_brackets_around_comma=True, + ): + wrap_in_parentheses(node, node.children[0], visible=False) + + +def maybe_make_parens_invisible_in_atom( + node: LN, + parent: LN, + remove_brackets_around_comma: bool = False, +) -> bool: + """If it's safe, make the parens in the atom `node` invisible, recursively. + Additionally, remove repeated, adjacent invisible parens from the atom `node` + as they are redundant. + + Returns whether the node should itself be wrapped in invisible parentheses. + """ + if ( + node.type != syms.atom + or is_empty_tuple(node) + or is_one_tuple(node) + or (is_yield(node) and parent.type != syms.expr_stmt) + or ( + # This condition tries to prevent removing non-optional brackets + # around a tuple, however, can be a bit overzealous so we provide + # and option to skip this check for `for` and `with` statements. + not remove_brackets_around_comma + and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + ) + ): + return False + + if is_walrus_assignment(node): + if parent.type in [ + syms.annassign, + syms.expr_stmt, + syms.assert_stmt, + syms.return_stmt, + # these ones aren't useful to end users, but they do please fuzzers + syms.for_stmt, + syms.del_stmt, + ]: + return False + + first = node.children[0] + last = node.children[-1] + if is_lpar_token(first) and is_rpar_token(last): + middle = node.children[1] + # make parentheses invisible + first.value = "" + last.value = "" + maybe_make_parens_invisible_in_atom( + middle, + parent=parent, + remove_brackets_around_comma=remove_brackets_around_comma, + ) + + if is_atom_with_invisible_parens(middle): + # Strip the invisible parens from `middle` by replacing + # it with the child in-between the invisible parens + middle.replace(middle.children[1]) + + return False + + return True + + +def should_split_line(line: Line, opening_bracket: Leaf) -> bool: + """Should `line` be immediately split with `delimiter_split()` after RHS?""" + + if not (opening_bracket.parent and opening_bracket.value in "[{("): + return False + + # We're essentially checking if the body is delimited by commas and there's more + # than one of them (we're excluding the trailing comma and if the delimiter priority + # is still commas, that means there's more). + exclude = set() + trailing_comma = False + try: + last_leaf = line.leaves[-1] + if last_leaf.type == token.COMMA: + trailing_comma = True + exclude.add(id(last_leaf)) + max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) + except (IndexError, ValueError): + return False + + return max_priority == COMMA_PRIORITY and ( + (line.mode.magic_trailing_comma and trailing_comma) + # always explode imports + or opening_bracket.parent.type in {syms.atom, syms.import_from} + ) + + +def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: + """Generate sets of closing bracket IDs that should be omitted in a RHS. + + Brackets can be omitted if the entire trailer up to and including + a preceding closing bracket fits in one line. + + Yielded sets are cumulative (contain results of previous yields, too). First + set is empty, unless the line should explode, in which case bracket pairs until + the one that needs to explode are omitted. + """ + + omit: Set[LeafID] = set() + if not line.magic_trailing_comma: + yield omit + + length = 4 * line.depth + opening_bracket: Optional[Leaf] = None + closing_bracket: Optional[Leaf] = None + inner_brackets: Set[LeafID] = set() + for index, leaf, leaf_length in line.enumerate_with_length(reversed=True): + length += leaf_length + if length > line_length: + break + + has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) + if leaf.type == STANDALONE_COMMENT or has_inline_comment: + break + + if opening_bracket: + if leaf is opening_bracket: + opening_bracket = None + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between( + leaf.opening_bracket, leaf, line.leaves + ) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + inner_brackets.add(id(leaf)) + elif leaf.type in CLOSING_BRACKETS: + prev = line.leaves[index - 1] if index > 0 else None + if prev and prev.type in OPENING_BRACKETS: + # Empty brackets would fail a split so treat them as "inner" + # brackets (e.g. only add them to the `omit` set if another + # pair of brackets was good enough. + inner_brackets.add(id(leaf)) + continue + + if closing_bracket: + omit.add(id(closing_bracket)) + omit.update(inner_brackets) + inner_brackets.clear() + yield omit + + if ( + prev + and prev.type == token.COMMA + and leaf.opening_bracket is not None + and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) + ): + # Never omit bracket pairs with trailing commas. + # We need to explode on those. + break + + if leaf.value: + opening_bracket = leaf.opening_bracket + closing_bracket = leaf + + +def run_transformer( + line: Line, + transform: Transformer, + mode: Mode, + features: Collection[Feature], + *, + line_str: str = "", +) -> List[Line]: + if not line_str: + line_str = line_to_string(line) + result: List[Line] = [] + for transformed_line in transform(line, features): + if str(transformed_line).strip("\n") == line_str: + raise CannotTransform("Line transformer returned an unchanged result") + + result.extend(transform_line(transformed_line, mode=mode, features=features)) + + if ( + transform.__class__.__name__ != "rhs" + or not line.bracket_tracker.invisible + or any(bracket.value for bracket in line.bracket_tracker.invisible) + or line.contains_multiline_strings() + or result[0].contains_uncollapsable_type_comments() + or result[0].contains_unsplittable_type_ignore() + or is_line_short_enough(result[0], line_length=mode.line_length) + # If any leaves have no parents (which _can_ occur since + # `transform(line)` potentially destroys the line's underlying node + # structure), then we can't proceed. Doing so would cause the below + # call to `append_leaves()` to fail. + or any(leaf.parent is None for leaf in line.leaves) + ): + return result + + line_copy = line.clone() + append_leaves(line_copy, line, line.leaves) + features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES} + second_opinion = run_transformer( + line_copy, transform, mode, features_fop, line_str=line_str + ) + if all( + is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion + ): + result = second_opinion + return result diff --git a/src/black/lines.py b/src/black/lines.py new file mode 100644 index 0000000..3062265 --- /dev/null +++ b/src/black/lines.py @@ -0,0 +1,811 @@ +import itertools +import sys +from dataclasses import dataclass, field +from typing import ( + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Tuple, + TypeVar, + cast, +) + +from black.brackets import DOT_PRIORITY, BracketTracker +from black.mode import Mode, Preview +from black.nodes import ( + BRACKETS, + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + TEST_DESCENDANTS, + child_towards, + is_import, + is_multiline_string, + is_one_sequence_between, + is_type_comment, + replace_child, + syms, + whitespace, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + +# types +T = TypeVar("T") +Index = int +LeafID = int + + +@dataclass +class Line: + """Holds leaves and comments. Can be printed with `str(line)`.""" + + mode: Mode + depth: int = 0 + leaves: List[Leaf] = field(default_factory=list) + # keys ordered like `leaves` + comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict) + bracket_tracker: BracketTracker = field(default_factory=BracketTracker) + inside_brackets: bool = False + should_split_rhs: bool = False + magic_trailing_comma: Optional[Leaf] = None + + def append(self, leaf: Leaf, preformatted: bool = False) -> None: + """Add a new `leaf` to the end of the line. + + Unless `preformatted` is True, the `leaf` will receive a new consistent + whitespace prefix and metadata applied by :class:`BracketTracker`. + Trailing commas are maybe removed, unpacked for loop variables are + demoted from being delimiters. + + Inline comments are put aside. + """ + has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) + if not has_value: + return + + if token.COLON == leaf.type and self.is_class_paren_empty: + del self.leaves[-2:] + if self.leaves and not preformatted: + # Note: at this point leaf.prefix should be empty except for + # imports, for which we only preserve newlines. + leaf.prefix += whitespace( + leaf, complex_subscript=self.is_complex_subscript(leaf) + ) + if self.inside_brackets or not preformatted: + self.bracket_tracker.mark(leaf) + if self.mode.magic_trailing_comma: + if self.has_magic_trailing_comma(leaf): + self.magic_trailing_comma = leaf + elif self.has_magic_trailing_comma(leaf, ensure_removable=True): + self.remove_trailing_comma() + if not self.append_comment(leaf): + self.leaves.append(leaf) + + def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: + """Like :func:`append()` but disallow invalid standalone comment structure. + + Raises ValueError when any `leaf` is appended after a standalone comment + or when a standalone comment is not the first leaf on the line. + """ + if self.bracket_tracker.depth == 0: + if self.is_comment: + raise ValueError("cannot append to standalone comments") + + if self.leaves and leaf.type == STANDALONE_COMMENT: + raise ValueError( + "cannot append standalone comments to a populated line" + ) + + self.append(leaf, preformatted=preformatted) + + @property + def is_comment(self) -> bool: + """Is this line a standalone comment?""" + return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT + + @property + def is_decorator(self) -> bool: + """Is this line a decorator?""" + return bool(self) and self.leaves[0].type == token.AT + + @property + def is_import(self) -> bool: + """Is this an import line?""" + return bool(self) and is_import(self.leaves[0]) + + @property + def is_class(self) -> bool: + """Is this line a class definition?""" + return ( + bool(self) + and self.leaves[0].type == token.NAME + and self.leaves[0].value == "class" + ) + + @property + def is_stub_class(self) -> bool: + """Is this line a class definition with a body consisting only of "..."?""" + return self.is_class and self.leaves[-3:] == [ + Leaf(token.DOT, ".") for _ in range(3) + ] + + @property + def is_def(self) -> bool: + """Is this a function definition? (Also returns True for async defs.)""" + try: + first_leaf = self.leaves[0] + except IndexError: + return False + + try: + second_leaf: Optional[Leaf] = self.leaves[1] + except IndexError: + second_leaf = None + return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( + first_leaf.type == token.ASYNC + and second_leaf is not None + and second_leaf.type == token.NAME + and second_leaf.value == "def" + ) + + @property + def is_class_paren_empty(self) -> bool: + """Is this a class with no base classes but using parentheses? + + Those are unnecessary and should be removed. + """ + return ( + bool(self) + and len(self.leaves) == 4 + and self.is_class + and self.leaves[2].type == token.LPAR + and self.leaves[2].value == "(" + and self.leaves[3].type == token.RPAR + and self.leaves[3].value == ")" + ) + + @property + def is_triple_quoted_string(self) -> bool: + """Is the line a triple quoted string?""" + return ( + bool(self) + and self.leaves[0].type == token.STRING + and self.leaves[0].value.startswith(('"""', "'''")) + ) + + @property + def opens_block(self) -> bool: + """Does this line open a new level of indentation.""" + if len(self.leaves) == 0: + return False + return self.leaves[-1].type == token.COLON + + def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: + """If so, needs to be split before emitting.""" + for leaf in self.leaves: + if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: + return True + + return False + + def contains_uncollapsable_type_comments(self) -> bool: + ignored_ids = set() + try: + last_leaf = self.leaves[-1] + ignored_ids.add(id(last_leaf)) + if last_leaf.type == token.COMMA or ( + last_leaf.type == token.RPAR and not last_leaf.value + ): + # When trailing commas or optional parens are inserted by Black for + # consistency, comments after the previous last element are not moved + # (they don't have to, rendering will still be correct). So we ignore + # trailing commas and invisible. + last_leaf = self.leaves[-2] + ignored_ids.add(id(last_leaf)) + except IndexError: + return False + + # A type comment is uncollapsable if it is attached to a leaf + # that isn't at the end of the line (since that could cause it + # to get associated to a different argument) or if there are + # comments before it (since that could cause it to get hidden + # behind a comment. + comment_seen = False + for leaf_id, comments in self.comments.items(): + for comment in comments: + if is_type_comment(comment): + if comment_seen or ( + not is_type_comment(comment, " ignore") + and leaf_id not in ignored_ids + ): + return True + + comment_seen = True + + return False + + def contains_unsplittable_type_ignore(self) -> bool: + if not self.leaves: + return False + + # If a 'type: ignore' is attached to the end of a line, we + # can't split the line, because we can't know which of the + # subexpressions the ignore was meant to apply to. + # + # We only want this to apply to actual physical lines from the + # original source, though: we don't want the presence of a + # 'type: ignore' at the end of a multiline expression to + # justify pushing it all onto one line. Thus we + # (unfortunately) need to check the actual source lines and + # only report an unsplittable 'type: ignore' if this line was + # one line in the original code. + + # Grab the first and last line numbers, skipping generated leaves + first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0) + last_line = next( + (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0 + ) + + if first_line == last_line: + # We look at the last two leaves since a comma or an + # invisible paren could have been added at the end of the + # line. + for node in self.leaves[-2:]: + for comment in self.comments.get(id(node), []): + if is_type_comment(comment, " ignore"): + return True + + return False + + def contains_multiline_strings(self) -> bool: + return any(is_multiline_string(leaf) for leaf in self.leaves) + + def has_magic_trailing_comma( + self, closing: Leaf, ensure_removable: bool = False + ) -> bool: + """Return True if we have a magic trailing comma, that is when: + - there's a trailing comma here + - it's not a one-tuple + - it's not a single-element subscript + Additionally, if ensure_removable: + - it's not from square bracket indexing + (specifically, single-element square bracket indexing with + Preview.skip_magic_trailing_comma_in_subscript) + """ + if not ( + closing.type in CLOSING_BRACKETS + and self.leaves + and self.leaves[-1].type == token.COMMA + ): + return False + + if closing.type == token.RBRACE: + return True + + if closing.type == token.RSQB: + if ( + Preview.one_element_subscript in self.mode + and closing.parent + and closing.parent.type == syms.trailer + and closing.opening_bracket + and is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ): + return False + + if not ensure_removable: + return True + + comma = self.leaves[-1] + if comma.parent is None: + return False + if Preview.skip_magic_trailing_comma_in_subscript in self.mode: + return ( + comma.parent.type != syms.subscriptlist + or closing.opening_bracket is None + or not is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ) + return comma.parent.type == syms.listmaker + + if self.is_import: + return True + + if closing.opening_bracket is not None and not is_one_sequence_between( + closing.opening_bracket, closing, self.leaves + ): + return True + + return False + + def append_comment(self, comment: Leaf) -> bool: + """Add an inline or standalone comment to the line.""" + if ( + comment.type == STANDALONE_COMMENT + and self.bracket_tracker.any_open_brackets() + ): + comment.prefix = "" + return False + + if comment.type != token.COMMENT: + return False + + if not self.leaves: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-1] + if ( + last_leaf.type == token.RPAR + and not last_leaf.value + and last_leaf.parent + and len(list(last_leaf.parent.leaves())) <= 3 + and not is_type_comment(comment) + ): + # Comments on an optional parens wrapping a single leaf should belong to + # the wrapped node except if it's a type comment. Pinning the comment like + # this avoids unstable formatting caused by comment migration. + if len(self.leaves) < 2: + comment.type = STANDALONE_COMMENT + comment.prefix = "" + return False + + last_leaf = self.leaves[-2] + self.comments.setdefault(id(last_leaf), []).append(comment) + return True + + def comments_after(self, leaf: Leaf) -> List[Leaf]: + """Generate comments that should appear directly after `leaf`.""" + return self.comments.get(id(leaf), []) + + def remove_trailing_comma(self) -> None: + """Remove the trailing comma and moves the comments attached to it.""" + trailing_comma = self.leaves.pop() + trailing_comma_comments = self.comments.pop(id(trailing_comma), []) + self.comments.setdefault(id(self.leaves[-1]), []).extend( + trailing_comma_comments + ) + + def is_complex_subscript(self, leaf: Leaf) -> bool: + """Return True iff `leaf` is part of a slice with non-trivial exprs.""" + open_lsqb = self.bracket_tracker.get_open_lsqb() + if open_lsqb is None: + return False + + subscript_start = open_lsqb.next_sibling + + if isinstance(subscript_start, Node): + if subscript_start.type == syms.listmaker: + return False + + if subscript_start.type == syms.subscriptlist: + subscript_start = child_towards(subscript_start, leaf) + return subscript_start is not None and any( + n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() + ) + + def enumerate_with_length( + self, reversed: bool = False + ) -> Iterator[Tuple[Index, Leaf, int]]: + """Return an enumeration of leaves with their length. + + Stops prematurely on multiline strings and standalone comments. + """ + op = cast( + Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], + enumerate_reversed if reversed else enumerate, + ) + for index, leaf in op(self.leaves): + length = len(leaf.prefix) + len(leaf.value) + if "\n" in leaf.value: + return # Multiline strings, we can't continue. + + for comment in self.comments_after(leaf): + length += len(comment.value) + + yield index, leaf, length + + def clone(self) -> "Line": + return Line( + mode=self.mode, + depth=self.depth, + inside_brackets=self.inside_brackets, + should_split_rhs=self.should_split_rhs, + magic_trailing_comma=self.magic_trailing_comma, + ) + + def __str__(self) -> str: + """Render the line.""" + if not self: + return "\n" + + indent = " " * self.depth + leaves = iter(self.leaves) + first = next(leaves) + res = f"{first.prefix}{indent}{first.value}" + for leaf in leaves: + res += str(leaf) + for comment in itertools.chain.from_iterable(self.comments.values()): + res += str(comment) + + return res + "\n" + + def __bool__(self) -> bool: + """Return True if the line has leaves or comments.""" + return bool(self.leaves or self.comments) + + +@dataclass +class EmptyLineTracker: + """Provides a stateful method that returns the number of potential extra + empty lines needed before and after the currently processed line. + + Note: this tracker works on lines that haven't been split yet. It assumes + the prefix of the first leaf consists of optional newlines. Those newlines + are consumed by `maybe_empty_lines()` and included in the computation. + """ + + is_pyi: bool = False + previous_line: Optional[Line] = None + previous_after: int = 0 + previous_defs: List[int] = field(default_factory=list) + + def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + """Return the number of extra empty lines before and after the `current_line`. + + This is for separating `def`, `async def` and `class` with extra empty + lines (two on module-level). + """ + before, after = self._maybe_empty_lines(current_line) + before = ( + # Black should not insert empty lines at the beginning + # of the file + 0 + if self.previous_line is None + else before - self.previous_after + ) + self.previous_after = after + self.previous_line = current_line + return before, after + + def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: + max_allowed = 1 + if current_line.depth == 0: + max_allowed = 1 if self.is_pyi else 2 + if current_line.leaves: + # Consume the first leaf's extra newlines. + first_leaf = current_line.leaves[0] + before = first_leaf.prefix.count("\n") + before = min(before, max_allowed) + first_leaf.prefix = "" + else: + before = 0 + depth = current_line.depth + while self.previous_defs and self.previous_defs[-1] >= depth: + if self.is_pyi: + assert self.previous_line is not None + if depth and not current_line.is_def and self.previous_line.is_def: + # Empty lines between attributes and methods should be preserved. + before = min(1, before) + elif depth: + before = 0 + else: + before = 1 + else: + if depth: + before = 1 + elif ( + not depth + and self.previous_defs[-1] + and current_line.leaves[-1].type == token.COLON + and ( + current_line.leaves[0].value + not in ("with", "try", "for", "while", "if", "match") + ) + ): + # We shouldn't add two newlines between an indented function and + # a dependent non-indented clause. This is to avoid issues with + # conditional function definitions that are technically top-level + # and therefore get two trailing newlines, but look weird and + # inconsistent when they're followed by elif, else, etc. This is + # worse because these functions only get *one* preceding newline + # already. + before = 1 + else: + before = 2 + self.previous_defs.pop() + if current_line.is_decorator or current_line.is_def or current_line.is_class: + return self._maybe_empty_lines_for_class_or_def(current_line, before) + + if ( + self.previous_line + and self.previous_line.is_import + and not current_line.is_import + and depth == self.previous_line.depth + ): + return (before or 1), 0 + + if ( + self.previous_line + and self.previous_line.is_class + and current_line.is_triple_quoted_string + ): + return before, 1 + + if ( + Preview.remove_block_trailing_newline in current_line.mode + and self.previous_line + and self.previous_line.opens_block + ): + return 0, 0 + return before, 0 + + def _maybe_empty_lines_for_class_or_def( + self, current_line: Line, before: int + ) -> Tuple[int, int]: + if not current_line.is_decorator: + self.previous_defs.append(current_line.depth) + if self.previous_line is None: + # Don't insert empty lines before the first line in the file. + return 0, 0 + + if self.previous_line.is_decorator: + if self.is_pyi and current_line.is_stub_class: + # Insert an empty line after a decorated stub class + return 0, 1 + + return 0, 0 + + if self.previous_line.depth < current_line.depth and ( + self.previous_line.is_class or self.previous_line.is_def + ): + return 0, 0 + + if ( + self.previous_line.is_comment + and self.previous_line.depth == current_line.depth + and before == 0 + ): + return 0, 0 + + if self.is_pyi: + if current_line.is_class or self.previous_line.is_class: + if self.previous_line.depth < current_line.depth: + newlines = 0 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + elif current_line.is_stub_class and self.previous_line.is_stub_class: + # No blank line between classes with an empty body + newlines = 0 + else: + newlines = 1 + elif ( + current_line.is_def or current_line.is_decorator + ) and not self.previous_line.is_def: + if current_line.depth: + # In classes empty lines between attributes and methods should + # be preserved. + newlines = min(1, before) + else: + # Blank line between a block of functions (maybe with preceding + # decorators) and a block of non-functions + newlines = 1 + elif self.previous_line.depth > current_line.depth: + newlines = 1 + else: + newlines = 0 + else: + newlines = 1 if current_line.depth else 2 + return newlines, 0 + + +def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: + """Like `reversed(enumerate(sequence))` if that were possible.""" + index = len(sequence) - 1 + for element in reversed(sequence): + yield (index, element) + index -= 1 + + +def append_leaves( + new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False +) -> None: + """ + Append leaves (taken from @old_line) to @new_line, making sure to fix the + underlying Node structure where appropriate. + + All of the leaves in @leaves are duplicated. The duplicates are then + appended to @new_line and used to replace their originals in the underlying + Node structure. Any comments attached to the old leaves are reattached to + the new leaves. + + Pre-conditions: + set(@leaves) is a subset of set(@old_line.leaves). + """ + for old_leaf in leaves: + new_leaf = Leaf(old_leaf.type, old_leaf.value) + replace_child(old_leaf, new_leaf) + new_line.append(new_leaf, preformatted=preformatted) + + for comment_leaf in old_line.comments_after(old_leaf): + new_line.append(comment_leaf, preformatted=True) + + +def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: + """Return True if `line` is no longer than `line_length`. + + Uses the provided `line_str` rendering, if any, otherwise computes a new one. + """ + if not line_str: + line_str = line_to_string(line) + return ( + len(line_str) <= line_length + and "\n" not in line_str # multiline strings + and not line.contains_standalone_comments() + ) + + +def can_be_split(line: Line) -> bool: + """Return False if the line cannot be split *for sure*. + + This is not an exhaustive search but a cheap heuristic that we can use to + avoid some unfortunate formattings (mostly around wrapping unsplittable code + in unnecessary parentheses). + """ + leaves = line.leaves + if len(leaves) < 2: + return False + + if leaves[0].type == token.STRING and leaves[1].type == token.DOT: + call_count = 0 + dot_count = 0 + next = leaves[-1] + for leaf in leaves[-2::-1]: + if leaf.type in OPENING_BRACKETS: + if next.type not in CLOSING_BRACKETS: + return False + + call_count += 1 + elif leaf.type == token.DOT: + dot_count += 1 + elif leaf.type == token.NAME: + if not (next.type == token.DOT or next.type in OPENING_BRACKETS): + return False + + elif leaf.type not in CLOSING_BRACKETS: + return False + + if dot_count > 1 and call_count > 1: + return False + + return True + + +def can_omit_invisible_parens( + line: Line, + line_length: int, +) -> bool: + """Does `line` have a shape safe to reformat without optional parens around it? + + Returns True for only a subset of potentially nice looking formattings but + the point is to not return false positives that end up producing lines that + are too long. + """ + bt = line.bracket_tracker + if not bt.delimiters: + # Without delimiters the optional parentheses are useless. + return True + + max_priority = bt.max_delimiter_priority() + if bt.delimiter_count_with_priority(max_priority) > 1: + # With more than one delimiter of a kind the optional parentheses read better. + return False + + if max_priority == DOT_PRIORITY: + # A single stranded method call doesn't require optional parentheses. + return True + + assert len(line.leaves) >= 2, "Stranded delimiter" + + # With a single delimiter, omit if the expression starts or ends with + # a bracket. + first = line.leaves[0] + second = line.leaves[1] + if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: + if _can_omit_opening_paren(line, first=first, line_length=line_length): + return True + + # Note: we are not returning False here because a line might have *both* + # a leading opening bracket and a trailing closing bracket. If the + # opening bracket doesn't match our rule, maybe the closing will. + + penultimate = line.leaves[-2] + last = line.leaves[-1] + + if ( + last.type == token.RPAR + or last.type == token.RBRACE + or ( + # don't use indexing for omitting optional parentheses; + # it looks weird + last.type == token.RSQB + and last.parent + and last.parent.type != syms.trailer + ) + ): + if penultimate.type in OPENING_BRACKETS: + # Empty brackets don't help. + return False + + if is_multiline_string(first): + # Additional wrapping of a multiline string in this situation is + # unnecessary. + return True + + if _can_omit_closing_paren(line, last=last, line_length=line_length): + return True + + return False + + +def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + remainder = False + length = 4 * line.depth + _index = -1 + for _index, leaf, leaf_length in line.enumerate_with_length(): + if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: + remainder = True + if remainder: + length += leaf_length + if length > line_length: + break + + if leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + remainder = False + + else: + # checked the entire string and line length wasn't exceeded + if len(line.leaves) == _index + 1: + return True + + return False + + +def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool: + """See `can_omit_invisible_parens`.""" + length = 4 * line.depth + seen_other_brackets = False + for _index, leaf, leaf_length in line.enumerate_with_length(): + length += leaf_length + if leaf is last.opening_bracket: + if seen_other_brackets or length <= line_length: + return True + + elif leaf.type in OPENING_BRACKETS: + # There are brackets we can further split on. + seen_other_brackets = True + + return False + + +def line_to_string(line: Line) -> str: + """Returns the string representation of @line. + + WARNING: This is known to be computationally expensive. + """ + return str(line).strip("\n") diff --git a/src/black/mode.py b/src/black/mode.py new file mode 100644 index 0000000..e3c3645 --- /dev/null +++ b/src/black/mode.py @@ -0,0 +1,218 @@ +"""Data structures configuring Black behavior. + +Mostly around Python language feature support per version and Black configuration +chosen by the user. +""" + +import sys +from dataclasses import dataclass, field +from enum import Enum, auto +from hashlib import sha256 +from operator import attrgetter +from typing import Dict, Set +from warnings import warn + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.const import DEFAULT_LINE_LENGTH + + +class TargetVersion(Enum): + PY33 = 3 + PY34 = 4 + PY35 = 5 + PY36 = 6 + PY37 = 7 + PY38 = 8 + PY39 = 9 + PY310 = 10 + PY311 = 11 + + +class Feature(Enum): + F_STRINGS = 2 + NUMERIC_UNDERSCORES = 3 + TRAILING_COMMA_IN_CALL = 4 + TRAILING_COMMA_IN_DEF = 5 + # The following two feature-flags are mutually exclusive, and exactly one should be + # set for every version of python. + ASYNC_IDENTIFIERS = 6 + ASYNC_KEYWORDS = 7 + ASSIGNMENT_EXPRESSIONS = 8 + POS_ONLY_ARGUMENTS = 9 + RELAXED_DECORATORS = 10 + PATTERN_MATCHING = 11 + UNPACKING_ON_FLOW = 12 + ANN_ASSIGN_EXTENDED_RHS = 13 + EXCEPT_STAR = 14 + VARIADIC_GENERICS = 15 + DEBUG_F_STRINGS = 16 + FORCE_OPTIONAL_PARENTHESES = 50 + + # __future__ flags + FUTURE_ANNOTATIONS = 51 + + +FUTURE_FLAG_TO_FEATURE: Final = { + "annotations": Feature.FUTURE_ANNOTATIONS, +} + + +VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { + TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS}, + TargetVersion.PY36: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_IDENTIFIERS, + }, + TargetVersion.PY37: { + Feature.F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + }, + TargetVersion.PY38: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY39: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + }, + TargetVersion.PY310: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PATTERN_MATCHING, + }, + TargetVersion.PY311: { + Feature.F_STRINGS, + Feature.DEBUG_F_STRINGS, + Feature.NUMERIC_UNDERSCORES, + Feature.TRAILING_COMMA_IN_CALL, + Feature.TRAILING_COMMA_IN_DEF, + Feature.ASYNC_KEYWORDS, + Feature.FUTURE_ANNOTATIONS, + Feature.ASSIGNMENT_EXPRESSIONS, + Feature.RELAXED_DECORATORS, + Feature.POS_ONLY_ARGUMENTS, + Feature.UNPACKING_ON_FLOW, + Feature.ANN_ASSIGN_EXTENDED_RHS, + Feature.PATTERN_MATCHING, + Feature.EXCEPT_STAR, + Feature.VARIADIC_GENERICS, + }, +} + + +def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: + return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) + + +class Preview(Enum): + """Individual preview style features.""" + + annotation_parens = auto() + long_docstring_quotes_on_newline = auto() + normalize_docstring_quotes_and_prefixes_properly = auto() + one_element_subscript = auto() + remove_block_trailing_newline = auto() + remove_redundant_parens = auto() + string_processing = auto() + skip_magic_trailing_comma_in_subscript = auto() + + +class Deprecated(UserWarning): + """Visible deprecation warning.""" + + +@dataclass +class Mode: + target_versions: Set[TargetVersion] = field(default_factory=set) + line_length: int = DEFAULT_LINE_LENGTH + string_normalization: bool = True + is_pyi: bool = False + is_ipynb: bool = False + skip_source_first_line: bool = False + magic_trailing_comma: bool = True + experimental_string_processing: bool = False + python_cell_magics: Set[str] = field(default_factory=set) + preview: bool = False + + def __post_init__(self) -> None: + if self.experimental_string_processing: + warn( + "`experimental string processing` has been included in `preview`" + " and deprecated. Use `preview` instead.", + Deprecated, + ) + + def __contains__(self, feature: Preview) -> bool: + """ + Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag. + + The argument is not checked and features are not differentiated. + They only exist to make development easier by clarifying intent. + """ + if feature is Preview.string_processing: + return self.preview or self.experimental_string_processing + return self.preview + + def get_cache_key(self) -> str: + if self.target_versions: + version_str = ",".join( + str(version.value) + for version in sorted(self.target_versions, key=attrgetter("value")) + ) + else: + version_str = "-" + parts = [ + version_str, + str(self.line_length), + str(int(self.string_normalization)), + str(int(self.is_pyi)), + str(int(self.is_ipynb)), + str(int(self.skip_source_first_line)), + str(int(self.magic_trailing_comma)), + str(int(self.experimental_string_processing)), + str(int(self.preview)), + sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(), + ] + return ".".join(parts) diff --git a/src/black/nodes.py b/src/black/nodes.py new file mode 100644 index 0000000..aeb2be3 --- /dev/null +++ b/src/black/nodes.py @@ -0,0 +1,850 @@ +""" +blib2to3 Node/Leaf transformation-related utility functions. +""" + +import sys +from typing import Generic, Iterator, List, Optional, Set, Tuple, TypeVar, Union + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +from mypy_extensions import mypyc_attr + +from black.cache import CACHE_DIR +from black.strings import has_triple_quotes +from blib2to3 import pygram +from blib2to3.pgen2 import token +from blib2to3.pytree import NL, Leaf, Node, type_repr + +pygram.initialize(CACHE_DIR) +syms: Final = pygram.python_symbols + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +LeafID = int +NodeType = int + + +WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE} +STATEMENT: Final = { + syms.if_stmt, + syms.while_stmt, + syms.for_stmt, + syms.try_stmt, + syms.except_clause, + syms.with_stmt, + syms.funcdef, + syms.classdef, + syms.match_stmt, + syms.case_block, +} +STANDALONE_COMMENT: Final = 153 +token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" +LOGIC_OPERATORS: Final = {"and", "or"} +COMPARATORS: Final = { + token.LESS, + token.GREATER, + token.EQEQUAL, + token.NOTEQUAL, + token.LESSEQUAL, + token.GREATEREQUAL, +} +MATH_OPERATORS: Final = { + token.VBAR, + token.CIRCUMFLEX, + token.AMPER, + token.LEFTSHIFT, + token.RIGHTSHIFT, + token.PLUS, + token.MINUS, + token.STAR, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.AT, + token.TILDE, + token.DOUBLESTAR, +} +STARS: Final = {token.STAR, token.DOUBLESTAR} +VARARGS_SPECIALS: Final = STARS | {token.SLASH} +VARARGS_PARENTS: Final = { + syms.arglist, + syms.argument, # double star in arglist + syms.trailer, # single argument to call + syms.typedargslist, + syms.varargslist, # lambdas +} +UNPACKING_PARENTS: Final = { + syms.atom, # single element of a list or set literal + syms.dictsetmaker, + syms.listmaker, + syms.testlist_gexp, + syms.testlist_star_expr, + syms.subject_expr, + syms.pattern, +} +TEST_DESCENDANTS: Final = { + syms.test, + syms.lambdef, + syms.or_test, + syms.and_test, + syms.not_test, + syms.comparison, + syms.star_expr, + syms.expr, + syms.xor_expr, + syms.and_expr, + syms.shift_expr, + syms.arith_expr, + syms.trailer, + syms.term, + syms.power, +} +TYPED_NAMES: Final = {syms.tname, syms.tname_star} +ASSIGNMENTS: Final = { + "=", + "+=", + "-=", + "*=", + "@=", + "/=", + "%=", + "&=", + "|=", + "^=", + "<<=", + ">>=", + "**=", + "//=", +} + +IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist} +BRACKET: Final = { + token.LPAR: token.RPAR, + token.LSQB: token.RSQB, + token.LBRACE: token.RBRACE, +} +OPENING_BRACKETS: Final = set(BRACKET.keys()) +CLOSING_BRACKETS: Final = set(BRACKET.values()) +BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS +ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} + +RARROW = 55 + + +@mypyc_attr(allow_interpreted_subclasses=True) +class Visitor(Generic[T]): + """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" + + def visit(self, node: LN) -> Iterator[T]: + """Main method to visit `node` and its children. + + It tries to find a `visit_*()` method for the given `node.type`, like + `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. + If no dedicated `visit_*()` method is found, chooses `visit_default()` + instead. + + Then yields objects of type `T` from the selected visitor. + """ + if node.type < 256: + name = token.tok_name[node.type] + else: + name = str(type_repr(node.type)) + # We explicitly branch on whether a visitor exists (instead of + # using self.visit_default as the default arg to getattr) in order + # to save needing to create a bound method object and so mypyc can + # generate a native call to visit_default. + visitf = getattr(self, f"visit_{name}", None) + if visitf: + yield from visitf(node) + else: + yield from self.visit_default(node) + + def visit_default(self, node: LN) -> Iterator[T]: + """Default `visit_*()` implementation. Recurses to children of `node`.""" + if isinstance(node, Node): + for child in node.children: + yield from self.visit(child) + + +def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 + """Return whitespace prefix if needed for the given `leaf`. + + `complex_subscript` signals whether the given leaf is part of a subscription + which has non-trivial arguments, like arithmetic expressions or function calls. + """ + NO: Final = "" + SPACE: Final = " " + DOUBLESPACE: Final = " " + t = leaf.type + p = leaf.parent + v = leaf.value + if t in ALWAYS_NO_SPACE: + return NO + + if t == token.COMMENT: + return DOUBLESPACE + + assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" + if t == token.COLON and p.type not in { + syms.subscript, + syms.subscriptlist, + syms.sliceop, + }: + return NO + + prev = leaf.prev_sibling + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + if t == token.COLON: + if prevp.type == token.COLON: + return NO + + elif prevp.type != token.COMMA and not complex_subscript: + return NO + + return SPACE + + if prevp.type == token.EQUAL: + if prevp.parent: + if prevp.parent.type in { + syms.arglist, + syms.argument, + syms.parameters, + syms.varargslist, + }: + return NO + + elif prevp.parent.type == syms.typedargslist: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using + # that, too. + return prevp.prefix + + elif ( + prevp.type == token.STAR + and parent_type(prevp) == syms.star_expr + and parent_type(prevp.parent) == syms.subscriptlist + ): + # No space between typevar tuples. + return NO + + elif prevp.type in VARARGS_SPECIALS: + if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): + return NO + + elif prevp.type == token.COLON: + if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: + return SPACE if complex_subscript else NO + + elif ( + prevp.parent + and prevp.parent.type == syms.factor + and prevp.type in MATH_OPERATORS + ): + return NO + + elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator: + # no space in decorators + return NO + + elif prev.type in OPENING_BRACKETS: + return NO + + if p.type in {syms.parameters, syms.arglist}: + # untyped function signatures or calls + if not prev or prev.type != token.COMMA: + return NO + + elif p.type == syms.varargslist: + # lambdas + if prev and prev.type != token.COMMA: + return NO + + elif p.type == syms.typedargslist: + # typed function signatures + if not prev: + return NO + + if t == token.EQUAL: + if prev.type not in TYPED_NAMES: + return NO + + elif prev.type == token.EQUAL: + # A bit hacky: if the equal sign has whitespace, it means we + # previously found it's a typed argument. So, we're using that, too. + return prev.prefix + + elif prev.type != token.COMMA: + return NO + + elif p.type in TYPED_NAMES: + # type names + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type != token.COMMA: + return NO + + elif p.type == syms.trailer: + # attributes and calls + if t == token.LPAR or t == token.RPAR: + return NO + + if not prev: + if t == token.DOT or t == token.LSQB: + return NO + + elif prev.type != token.COMMA: + return NO + + elif p.type == syms.argument: + # single argument + if t == token.EQUAL: + return NO + + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.LPAR: + return NO + + elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: + return NO + + elif p.type == syms.decorator: + # decorators + return NO + + elif p.type == syms.dotted_name: + if prev: + return NO + + prevp = preceding_leaf(p) + if not prevp or prevp.type == token.AT or prevp.type == token.DOT: + return NO + + elif p.type == syms.classdef: + if t == token.LPAR: + return NO + + if prev and prev.type == token.LPAR: + return NO + + elif p.type in {syms.subscript, syms.sliceop}: + # indexing + if not prev: + assert p.parent is not None, "subscripts are always parented" + if p.parent.type == syms.subscriptlist: + return SPACE + + return NO + + elif not complex_subscript: + return NO + + elif p.type == syms.atom: + if prev and t == token.DOT: + # dots, but not the first one. + return NO + + elif p.type == syms.dictsetmaker: + # dict unpacking + if prev and prev.type == token.DOUBLESTAR: + return NO + + elif p.type in {syms.factor, syms.star_expr}: + # unary ops + if not prev: + prevp = preceding_leaf(p) + if not prevp or prevp.type in OPENING_BRACKETS: + return NO + + prevp_parent = prevp.parent + assert prevp_parent is not None + if prevp.type == token.COLON and prevp_parent.type in { + syms.subscript, + syms.sliceop, + }: + return NO + + elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: + return NO + + elif t in {token.NAME, token.NUMBER, token.STRING}: + return NO + + elif p.type == syms.import_from: + if t == token.DOT: + if prev and prev.type == token.DOT: + return NO + + elif t == token.NAME: + if v == "import": + return SPACE + + if prev and prev.type == token.DOT: + return NO + + elif p.type == syms.sliceop: + return NO + + elif p.type == syms.except_clause: + if t == token.STAR: + return NO + + return SPACE + + +def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: + """Return the first leaf that precedes `node`, if any.""" + while node: + res = node.prev_sibling + if res: + if isinstance(res, Leaf): + return res + + try: + return list(res.leaves())[-1] + + except IndexError: + return None + + node = node.parent + return None + + +def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool: + """Return if the `node` and its previous siblings match types against the provided + list of tokens; the provided `node`has its type matched against the last element in + the list. `None` can be used as the first element to declare that the start of the + list is anchored at the start of its parent's children.""" + if not tokens: + return True + if tokens[-1] is None: + return node is None + if not node: + return False + if node.type != tokens[-1]: + return False + return prev_siblings_are(node.prev_sibling, tokens[:-1]) + + +def parent_type(node: Optional[LN]) -> Optional[NodeType]: + """ + Returns: + @node.parent.type, if @node is not None and has a parent. + OR + None, otherwise. + """ + if node is None or node.parent is None: + return None + + return node.parent.type + + +def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: + """Return the child of `ancestor` that contains `descendant`.""" + node: Optional[LN] = descendant + while node and node.parent != ancestor: + node = node.parent + return node + + +def replace_child(old_child: LN, new_child: LN) -> None: + """ + Side Effects: + * If @old_child.parent is set, replace @old_child with @new_child in + @old_child's underlying Node structure. + OR + * Otherwise, this function does nothing. + """ + parent = old_child.parent + if not parent: + return + + child_idx = old_child.remove() + if child_idx is not None: + parent.insert_child(child_idx, new_child) + + +def container_of(leaf: Leaf) -> LN: + """Return `leaf` or one of its ancestors that is the topmost container of it. + + By "container" we mean a node where `leaf` is the very first child. + """ + same_prefix = leaf.prefix + container: LN = leaf + while container: + parent = container.parent + if parent is None: + break + + if parent.children[0].prefix != same_prefix: + break + + if parent.type == syms.file_input: + break + + if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: + break + + container = parent + return container + + +def first_leaf_of(node: LN) -> Optional[Leaf]: + """Returns the first leaf of the node tree.""" + if isinstance(node, Leaf): + return node + if node.children: + return first_leaf_of(node.children[0]) + else: + return None + + +def is_arith_like(node: LN) -> bool: + """Whether node is an arithmetic or a binary arithmetic expression""" + return node.type in { + syms.arith_expr, + syms.shift_expr, + syms.xor_expr, + syms.and_expr, + } + + +def is_docstring(leaf: Leaf) -> bool: + if prev_siblings_are( + leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt] + ): + return True + + # Multiline docstring on the same line as the `def`. + if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]): + # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python + # grammar. We're safe to return True without further checks. + return True + + return False + + +def is_empty_tuple(node: LN) -> bool: + """Return True if `node` holds an empty tuple.""" + return ( + node.type == syms.atom + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + + +def is_one_tuple(node: LN) -> bool: + """Return True if `node` holds a tuple with one element, with or without parens.""" + if node.type == syms.atom: + gexp = unwrap_singleton_parenthesis(node) + if gexp is None or gexp.type != syms.testlist_gexp: + return False + + return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA + + return ( + node.type in IMPLICIT_TUPLE + and len(node.children) == 2 + and node.children[1].type == token.COMMA + ) + + +def is_one_sequence_between( + opening: Leaf, + closing: Leaf, + leaves: List[Leaf], + brackets: Tuple[int, int] = (token.LPAR, token.RPAR), +) -> bool: + """Return True if content between `opening` and `closing` is a one-sequence.""" + if (opening.type, closing.type) != brackets: + return False + + depth = closing.bracket_depth + 1 + for _opening_index, leaf in enumerate(leaves): + if leaf is opening: + break + + else: + raise LookupError("Opening paren not found in `leaves`") + + commas = 0 + _opening_index += 1 + for leaf in leaves[_opening_index:]: + if leaf is closing: + break + + bracket_depth = leaf.bracket_depth + if bracket_depth == depth and leaf.type == token.COMMA: + commas += 1 + if leaf.parent and leaf.parent.type in { + syms.arglist, + syms.typedargslist, + }: + commas += 1 + break + + return commas < 2 + + +def is_walrus_assignment(node: LN) -> bool: + """Return True iff `node` is of the shape ( test := test )""" + inner = unwrap_singleton_parenthesis(node) + return inner is not None and inner.type == syms.namedexpr_test + + +def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool: + """Return True iff `node` is a trailer valid in a simple decorator""" + return node.type == syms.trailer and ( + ( + len(node.children) == 2 + and node.children[0].type == token.DOT + and node.children[1].type == token.NAME + ) + # last trailer can be an argument-less parentheses pair + or ( + last + and len(node.children) == 2 + and node.children[0].type == token.LPAR + and node.children[1].type == token.RPAR + ) + # last trailer can be arguments + or ( + last + and len(node.children) == 3 + and node.children[0].type == token.LPAR + # and node.children[1].type == syms.argument + and node.children[2].type == token.RPAR + ) + ) + + +def is_simple_decorator_expression(node: LN) -> bool: + """Return True iff `node` could be a 'dotted name' decorator + + This function takes the node of the 'namedexpr_test' of the new decorator + grammar and test if it would be valid under the old decorator grammar. + + The old grammar was: decorator: @ dotted_name [arguments] NEWLINE + The new grammar is : decorator: @ namedexpr_test NEWLINE + """ + if node.type == token.NAME: + return True + if node.type == syms.power: + if node.children: + return ( + node.children[0].type == token.NAME + and all(map(is_simple_decorator_trailer, node.children[1:-1])) + and ( + len(node.children) < 2 + or is_simple_decorator_trailer(node.children[-1], last=True) + ) + ) + return False + + +def is_yield(node: LN) -> bool: + """Return True if `node` holds a `yield` or `yield from` expression.""" + if node.type == syms.yield_expr: + return True + + if is_name_token(node) and node.value == "yield": + return True + + if node.type != syms.atom: + return False + + if len(node.children) != 3: + return False + + lpar, expr, rpar = node.children + if lpar.type == token.LPAR and rpar.type == token.RPAR: + return is_yield(expr) + + return False + + +def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: + """Return True if `leaf` is a star or double star in a vararg or kwarg. + + If `within` includes VARARGS_PARENTS, this applies to function signatures. + If `within` includes UNPACKING_PARENTS, it applies to right hand-side + extended iterable unpacking (PEP 3132) and additional unpacking + generalizations (PEP 448). + """ + if leaf.type not in VARARGS_SPECIALS or not leaf.parent: + return False + + p = leaf.parent + if p.type == syms.star_expr: + # Star expressions are also used as assignment targets in extended + # iterable unpacking (PEP 3132). See what its parent is instead. + if not p.parent: + return False + + p = p.parent + + return p.type in within + + +def is_multiline_string(leaf: Leaf) -> bool: + """Return True if `leaf` is a multiline string that actually spans many lines.""" + return has_triple_quotes(leaf.value) and "\n" in leaf.value + + +def is_stub_suite(node: Node) -> bool: + """Return True if `node` is a suite with a stub body.""" + if ( + len(node.children) != 4 + or node.children[0].type != token.NEWLINE + or node.children[1].type != token.INDENT + or node.children[3].type != token.DEDENT + ): + return False + + return is_stub_body(node.children[2]) + + +def is_stub_body(node: LN) -> bool: + """Return True if `node` is a simple statement containing an ellipsis.""" + if not isinstance(node, Node) or node.type != syms.simple_stmt: + return False + + if len(node.children) != 2: + return False + + child = node.children[0] + return ( + child.type == syms.atom + and len(child.children) == 3 + and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) + ) + + +def is_atom_with_invisible_parens(node: LN) -> bool: + """Given a `LN`, determines whether it's an atom `node` with invisible + parens. Useful in dedupe-ing and normalizing parens. + """ + if isinstance(node, Leaf) or node.type != syms.atom: + return False + + first, last = node.children[0], node.children[-1] + return ( + isinstance(first, Leaf) + and first.type == token.LPAR + and first.value == "" + and isinstance(last, Leaf) + and last.type == token.RPAR + and last.value == "" + ) + + +def is_empty_par(leaf: Leaf) -> bool: + return is_empty_lpar(leaf) or is_empty_rpar(leaf) + + +def is_empty_lpar(leaf: Leaf) -> bool: + return leaf.type == token.LPAR and leaf.value == "" + + +def is_empty_rpar(leaf: Leaf) -> bool: + return leaf.type == token.RPAR and leaf.value == "" + + +def is_import(leaf: Leaf) -> bool: + """Return True if the given leaf starts an import statement.""" + p = leaf.parent + t = leaf.type + v = leaf.value + return bool( + t == token.NAME + and ( + (v == "import" and p and p.type == syms.import_name) + or (v == "from" and p and p.type == syms.import_from) + ) + ) + + +def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: + """Return True if the given leaf is a special comment. + Only returns true for type comments for now.""" + t = leaf.type + v = leaf.value + return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) + + +def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: + """Wrap `child` in parentheses. + + This replaces `child` with an atom holding the parentheses and the old + child. That requires moving the prefix. + + If `visible` is False, the leaves will be valueless (and thus invisible). + """ + lpar = Leaf(token.LPAR, "(" if visible else "") + rpar = Leaf(token.RPAR, ")" if visible else "") + prefix = child.prefix + child.prefix = "" + index = child.remove() or 0 + new_child = Node(syms.atom, [lpar, child, rpar]) + new_child.prefix = prefix + parent.insert_child(index, new_child) + + +def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: + """Returns `wrapped` if `node` is of the shape ( wrapped ). + + Parenthesis can be optional. Returns None otherwise""" + if len(node.children) != 3: + return None + + lpar, wrapped, rpar = node.children + if not (lpar.type == token.LPAR and rpar.type == token.RPAR): + return None + + return wrapped + + +def ensure_visible(leaf: Leaf) -> None: + """Make sure parentheses are visible. + + They could be invisible as part of some statements (see + :func:`normalize_invisible_parens` and :func:`visit_import_from`). + """ + if leaf.type == token.LPAR: + leaf.value = "(" + elif leaf.type == token.RPAR: + leaf.value = ")" + + +def is_name_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NAME + + +def is_lpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.LPAR + + +def is_rpar_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.RPAR + + +def is_string_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.STRING + + +def is_number_token(nl: NL) -> TypeGuard[Leaf]: + return nl.type == token.NUMBER diff --git a/src/black/numerics.py b/src/black/numerics.py new file mode 100644 index 0000000..879e5b2 --- /dev/null +++ b/src/black/numerics.py @@ -0,0 +1,60 @@ +""" +Formatting numeric literals. +""" +from blib2to3.pytree import Leaf + + +def format_hex(text: str) -> str: + """ + Formats a hexadecimal string like "0x12B3" + """ + before, after = text[:2], text[2:] + return f"{before}{after.upper()}" + + +def format_scientific_notation(text: str) -> str: + """Formats a numeric string utilizing scentific notation""" + before, after = text.split("e") + sign = "" + if after.startswith("-"): + after = after[1:] + sign = "-" + elif after.startswith("+"): + after = after[1:] + before = format_float_or_int_string(before) + return f"{before}e{sign}{after}" + + +def format_complex_number(text: str) -> str: + """Formats a complex string like `10j`""" + number = text[:-1] + suffix = text[-1] + return f"{format_float_or_int_string(number)}{suffix}" + + +def format_float_or_int_string(text: str) -> str: + """Formats a float string like "1.0".""" + if "." not in text: + return text + + before, after = text.split(".") + return f"{before or 0}.{after or 0}" + + +def normalize_numeric_literal(leaf: Leaf) -> None: + """Normalizes numeric (float, int, and complex) literals. + + All letters used in the representation are normalized to lowercase.""" + text = leaf.value.lower() + if text.startswith(("0o", "0b")): + # Leave octal and binary literals alone. + pass + elif text.startswith("0x"): + text = format_hex(text) + elif "e" in text: + text = format_scientific_notation(text) + elif text.endswith("j"): + text = format_complex_number(text) + else: + text = format_float_or_int_string(text) + leaf.value = text diff --git a/src/black/output.py b/src/black/output.py new file mode 100644 index 0000000..f4c17f2 --- /dev/null +++ b/src/black/output.py @@ -0,0 +1,105 @@ +"""Nice output for Black. + +The double calls are for patching purposes in tests. +""" + +import json +import tempfile +from typing import Any, Optional + +from click import echo, style +from mypy_extensions import mypyc_attr + + +@mypyc_attr(patchable=True) +def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "bold" not in styles: + styles["bold"] = True + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + if message is not None: + if "fg" not in styles: + styles["fg"] = "red" + message = style(message, **styles) + echo(message, nl=nl, err=True) + + +@mypyc_attr(patchable=True) +def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _out(message, nl=nl, **styles) + + +def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None: + _err(message, nl=nl, **styles) + + +def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between each cell in notebooks `a` and `b`.""" + a_nb = json.loads(a) + b_nb = json.loads(b) + diff_lines = [ + diff( + "".join(a_nb["cells"][cell_number]["source"]) + "\n", + "".join(b_nb["cells"][cell_number]["source"]) + "\n", + f"{a_name}:cell_{cell_number}", + f"{b_name}:cell_{cell_number}", + ) + for cell_number, cell in enumerate(a_nb["cells"]) + if cell["cell_type"] == "code" + ] + return "".join(diff_lines) + + +def diff(a: str, b: str, a_name: str, b_name: str) -> str: + """Return a unified diff string between strings `a` and `b`.""" + import difflib + + a_lines = a.splitlines(keepends=True) + b_lines = b.splitlines(keepends=True) + diff_lines = [] + for line in difflib.unified_diff( + a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5 + ): + # Work around https://bugs.python.org/issue2142 + # See: + # https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html + if line[-1] == "\n": + diff_lines.append(line) + else: + diff_lines.append(line + "\n") + diff_lines.append("\\ No newline at end of file\n") + return "".join(diff_lines) + + +def color_diff(contents: str) -> str: + """Inject the ANSI color codes to the diff.""" + lines = contents.split("\n") + for i, line in enumerate(lines): + if line.startswith("+++") or line.startswith("---"): + line = "\033[1m" + line + "\033[0m" # bold, reset + elif line.startswith("@@"): + line = "\033[36m" + line + "\033[0m" # cyan, reset + elif line.startswith("+"): + line = "\033[32m" + line + "\033[0m" # green, reset + elif line.startswith("-"): + line = "\033[31m" + line + "\033[0m" # red, reset + lines[i] = line + return "\n".join(lines) + + +@mypyc_attr(patchable=True) +def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str: + """Dump `output` to a temporary file. Return path to the file.""" + with tempfile.NamedTemporaryFile( + mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" + ) as f: + for lines in output: + f.write(lines) + if ensure_final_newline and lines and lines[-1] != "\n": + f.write("\n") + return f.name diff --git a/src/black/parsing.py b/src/black/parsing.py new file mode 100644 index 0000000..64c0b1e --- /dev/null +++ b/src/black/parsing.py @@ -0,0 +1,278 @@ +""" +Parse Python code and perform AST validation. +""" +import ast +import platform +import sys +from typing import Any, Iterable, Iterator, List, Set, Tuple, Type, Union + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + +from black.mode import Feature, TargetVersion, supports_feature +from black.nodes import syms +from blib2to3 import pygram +from blib2to3.pgen2 import driver +from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.parse import ParseError +from blib2to3.pgen2.tokenize import TokenError +from blib2to3.pytree import Leaf, Node + +ast3: Any + +_IS_PYPY = platform.python_implementation() == "PyPy" + +try: + from typed_ast import ast3 +except ImportError: + # Either our python version is too low, or we're on pypy + if sys.version_info < (3, 7) or (sys.version_info < (3, 8) and not _IS_PYPY): + print( + "The typed_ast package is required but not installed.\n" + "You can upgrade to Python 3.8+ or install typed_ast with\n" + "`python3 -m pip install typed-ast`.", + file=sys.stderr, + ) + sys.exit(1) + else: + ast3 = ast + + +PY2_HINT: Final = "Python 2 support was removed in version 22.0." + + +class InvalidInput(ValueError): + """Raised when input source code fails all parse attempts.""" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7+ + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 3.10+ + pygram.python_grammar_soft_keywords, + ] + + grammars = [] + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + if supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + errors = {} + for grammar in grammars: + drv = driver.Driver(grammar) + try: + result = drv.parse_string(src_txt, True) + break + + except ParseError as pe: + lineno, column = pe.context[1] + lines = src_txt.splitlines() + try: + faulty_line = lines[lineno - 1] + except IndexError: + faulty_line = "" + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {faulty_line}" + ) + + except TokenError as te: + # In edge cases these are raised; and typically don't have a "faulty_line". + lineno, column = te.args[1] + errors[grammar.version] = InvalidInput( + f"Cannot parse: {lineno}:{column}: {te.args[0]}" + ) + + else: + # Choose the latest version when raising the actual parsing error. + assert len(errors) >= 1 + exc = errors[max(errors)] + + if matches_grammar(src_txt, pygram.python_grammar) or matches_grammar( + src_txt, pygram.python_grammar_no_print_statement + ): + original_msg = exc.args[0] + msg = f"{original_msg}\n{PY2_HINT}" + raise InvalidInput(msg) from None + + raise exc from None + + if isinstance(result, Leaf): + result = Node(syms.file_input, [result]) + return result + + +def matches_grammar(src_txt: str, grammar: Grammar) -> bool: + drv = driver.Driver(grammar) + try: + drv.parse_string(src_txt, True) + except (ParseError, TokenError, IndentationError): + return False + else: + return True + + +def lib2to3_unparse(node: Node) -> str: + """Given a lib2to3 node, return its string representation.""" + code = str(node) + return code + + +def parse_single_version( + src: str, version: Tuple[int, int] +) -> Union[ast.AST, ast3.AST]: + filename = "" + # typed-ast is needed because of feature version limitations in the builtin ast 3.8> + if sys.version_info >= (3, 8) and version >= (3,): + return ast.parse(src, filename, feature_version=version, type_comments=True) + + if _IS_PYPY: + # PyPy 3.7 doesn't support type comment tracking which is not ideal, but there's + # not much we can do as typed-ast won't work either. + if sys.version_info >= (3, 8): + return ast3.parse(src, filename, type_comments=True) + else: + return ast3.parse(src, filename) + else: + # Typed-ast is guaranteed to be used here and automatically tracks type + # comments separately. + return ast3.parse(src, filename, feature_version=version[1]) + + raise AssertionError("INTERNAL ERROR: Tried parsing unsupported Python version!") + + +def parse_ast(src: str) -> Union[ast.AST, ast3.AST]: + # TODO: support Python 4+ ;) + versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)] + + first_error = "" + for version in sorted(versions, reverse=True): + try: + return parse_single_version(src, version) + except SyntaxError as e: + if not first_error: + first_error = str(e) + + raise SyntaxError(first_error) + + +ast3_AST: Final[Type[ast3.AST]] = ast3.AST + + +def _normalize(lineend: str, value: str) -> str: + # To normalize, we strip any leading and trailing space from + # each line... + stripped: List[str] = [i.strip() for i in value.splitlines()] + normalized = lineend.join(stripped) + # ...and remove any blank lines at the beginning and end of + # the whole string + return normalized.strip() + + +def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[str]: + """Simple visitor generating strings to compare ASTs by content.""" + + node = fixup_ast_constants(node) + + yield f"{' ' * depth}{node.__class__.__name__}(" + + type_ignore_classes: Tuple[Type[Any], ...] + for field in sorted(node._fields): # noqa: F402 + # TypeIgnore will not be present using pypy < 3.8, so need for this + if not (_IS_PYPY and sys.version_info < (3, 8)): + # TypeIgnore has only one field 'lineno' which breaks this comparison + type_ignore_classes = (ast3.TypeIgnore,) + if sys.version_info >= (3, 8): + type_ignore_classes += (ast.TypeIgnore,) + if isinstance(node, type_ignore_classes): + break + + try: + value: object = getattr(node, field) + except AttributeError: + continue + + yield f"{' ' * (depth+1)}{field}=" + + if isinstance(value, list): + for item in value: + # Ignore nested tuples within del statements, because we may insert + # parentheses and they change the AST. + if ( + field == "targets" + and isinstance(node, (ast.Delete, ast3.Delete)) + and isinstance(item, (ast.Tuple, ast3.Tuple)) + ): + for elt in item.elts: + yield from stringify_ast(elt, depth + 2) + + elif isinstance(item, (ast.AST, ast3.AST)): + yield from stringify_ast(item, depth + 2) + + # Note that we are referencing the typed-ast ASTs via global variables and not + # direct module attribute accesses because that breaks mypyc. It's probably + # something to do with the ast3 variables being marked as Any leading + # mypy to think this branch is always taken, leaving the rest of the code + # unanalyzed. Tighting up the types for the typed-ast AST types avoids the + # mypyc crash. + elif isinstance(value, (ast.AST, ast3_AST)): + yield from stringify_ast(value, depth + 2) + + else: + normalized: object + # Constant strings may be indented across newlines, if they are + # docstrings; fold spaces after newlines when comparing. Similarly, + # trailing and leading space may be removed. + if ( + isinstance(node, ast.Constant) + and field == "value" + and isinstance(value, str) + ): + normalized = _normalize("\n", value) + else: + normalized = value + yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}" + + yield f"{' ' * depth}) # /{node.__class__.__name__}" + + +def fixup_ast_constants(node: Union[ast.AST, ast3.AST]) -> Union[ast.AST, ast3.AST]: + """Map ast nodes deprecated in 3.8 to Constant.""" + if isinstance(node, (ast.Str, ast3.Str, ast.Bytes, ast3.Bytes)): + return ast.Constant(value=node.s) + + if isinstance(node, (ast.Num, ast3.Num)): + return ast.Constant(value=node.n) + + if isinstance(node, (ast.NameConstant, ast3.NameConstant)): + return ast.Constant(value=node.value) + + return node diff --git a/src/black/py.typed b/src/black/py.typed index 8b13789..e69de29 100644 --- a/src/black/py.typed +++ b/src/black/py.typed @@ -1 +0,0 @@ - diff --git a/src/black/report.py b/src/black/report.py new file mode 100644 index 0000000..a507671 --- /dev/null +++ b/src/black/report.py @@ -0,0 +1,106 @@ +""" +Summarize Black runs to users. +""" +from dataclasses import dataclass +from enum import Enum +from pathlib import Path + +from click import style + +from black.output import err, out + + +class Changed(Enum): + NO = 0 + CACHED = 1 + YES = 2 + + +class NothingChanged(UserWarning): + """Raised when reformatted code is the same as source.""" + + +@dataclass +class Report: + """Provides a reformatting counter. Can be rendered with `str(report)`.""" + + check: bool = False + diff: bool = False + quiet: bool = False + verbose: bool = False + change_count: int = 0 + same_count: int = 0 + failure_count: int = 0 + + def done(self, src: Path, changed: Changed) -> None: + """Increment the counter for successful reformatting. Write out a message.""" + if changed is Changed.YES: + reformatted = "would reformat" if self.check or self.diff else "reformatted" + if self.verbose or not self.quiet: + out(f"{reformatted} {src}") + self.change_count += 1 + else: + if self.verbose: + if changed is Changed.NO: + msg = f"{src} already well formatted, good job." + else: + msg = f"{src} wasn't modified on disk since last run." + out(msg, bold=False) + self.same_count += 1 + + def failed(self, src: Path, message: str) -> None: + """Increment the counter for failed reformatting. Write out a message.""" + err(f"error: cannot format {src}: {message}") + self.failure_count += 1 + + def path_ignored(self, path: Path, message: str) -> None: + if self.verbose: + out(f"{path} ignored: {message}", bold=False) + + @property + def return_code(self) -> int: + """Return the exit code that the app should use. + + This considers the current state of changed files and failures: + - if there were any failures, return 123; + - if any files were changed and --check is being used, return 1; + - otherwise return 0. + """ + # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with + # 126 we have special return codes reserved by the shell. + if self.failure_count: + return 123 + + elif self.change_count and self.check: + return 1 + + return 0 + + def __str__(self) -> str: + """Render a color report of the current state. + + Use `click.unstyle` to remove colors. + """ + if self.check or self.diff: + reformatted = "would be reformatted" + unchanged = "would be left unchanged" + failed = "would fail to reformat" + else: + reformatted = "reformatted" + unchanged = "left unchanged" + failed = "failed to reformat" + report = [] + if self.change_count: + s = "s" if self.change_count > 1 else "" + report.append( + style(f"{self.change_count} file{s} ", bold=True, fg="blue") + + style(f"{reformatted}", bold=True) + ) + + if self.same_count: + s = "s" if self.same_count > 1 else "" + report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged) + if self.failure_count: + s = "s" if self.failure_count > 1 else "" + report.append(style(f"{self.failure_count} file{s} {failed}", fg="red")) + return ", ".join(report) + "." diff --git a/src/black/rusty.py b/src/black/rusty.py new file mode 100644 index 0000000..84a80b5 --- /dev/null +++ b/src/black/rusty.py @@ -0,0 +1,27 @@ +"""An error-handling model influenced by that used by the Rust programming language + +See https://doc.rust-lang.org/book/ch09-00-error-handling.html. +""" +from typing import Generic, TypeVar, Union + +T = TypeVar("T") +E = TypeVar("E", bound=Exception) + + +class Ok(Generic[T]): + def __init__(self, value: T) -> None: + self._value = value + + def ok(self) -> T: + return self._value + + +class Err(Generic[E]): + def __init__(self, e: E) -> None: + self._e = e + + def err(self) -> E: + return self._e + + +Result = Union[Ok[T], Err[E]] diff --git a/src/black/strings.py b/src/black/strings.py new file mode 100644 index 0000000..9d0e2eb --- /dev/null +++ b/src/black/strings.py @@ -0,0 +1,238 @@ +""" +Simple formatting on strings. Further string formatting code is in trans.py. +""" + +import re +import sys +from functools import lru_cache +from typing import List, Pattern + +if sys.version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final + + +STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters. +STRING_PREFIX_RE: Final = re.compile( + r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL +) +FIRST_NON_WHITESPACE_RE: Final = re.compile(r"\s*\t+\s*(\S)") + + +def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: + """Replace `regex` with `replacement` twice on `original`. + + This is used by string normalization to perform replaces on + overlapping matches. + """ + return regex.sub(replacement, regex.sub(replacement, original)) + + +def has_triple_quotes(string: str) -> bool: + """ + Returns: + True iff @string starts with three quotation characters. + """ + raw_string = string.lstrip(STRING_PREFIX_CHARS) + return raw_string[:3] in {'"""', "'''"} + + +def lines_with_leading_tabs_expanded(s: str) -> List[str]: + """ + Splits string into lines and expands only leading tabs (following the normal + Python rules) + """ + lines = [] + for line in s.splitlines(): + # Find the index of the first non-whitespace character after a string of + # whitespace that includes at least one tab + match = FIRST_NON_WHITESPACE_RE.match(line) + if match: + first_non_whitespace_idx = match.start(1) + + lines.append( + line[:first_non_whitespace_idx].expandtabs() + + line[first_non_whitespace_idx:] + ) + else: + lines.append(line) + return lines + + +def fix_docstring(docstring: str, prefix: str) -> str: + # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation + if not docstring: + return "" + lines = lines_with_leading_tabs_expanded(docstring) + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + last_line_idx = len(lines) - 2 + for i, line in enumerate(lines[1:]): + stripped_line = line[indent:].rstrip() + if stripped_line or i == last_line_idx: + trimmed.append(prefix + stripped_line) + else: + trimmed.append("") + return "\n".join(trimmed) + + +def get_string_prefix(string: str) -> str: + """ + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + @string's prefix (e.g. '', 'r', 'f', or 'rf'). + """ + assert_is_leaf_string(string) + + prefix = "" + prefix_idx = 0 + while string[prefix_idx] in STRING_PREFIX_CHARS: + prefix += string[prefix_idx] + prefix_idx += 1 + + return prefix + + +def assert_is_leaf_string(string: str) -> None: + """ + Checks the pre-condition that @string has the format that you would expect + of `leaf.value` where `leaf` is some Leaf such that `leaf.type == + token.STRING`. A more precise description of the pre-conditions that are + checked are listed below. + + Pre-conditions: + * @string starts with either ', ", ', or " where + `set()` is some subset of `set(STRING_PREFIX_CHARS)`. + * @string ends with a quote character (' or "). + + Raises: + AssertionError(...) if the pre-conditions listed above are not + satisfied. + """ + dquote_idx = string.find('"') + squote_idx = string.find("'") + if -1 in [dquote_idx, squote_idx]: + quote_idx = max(dquote_idx, squote_idx) + else: + quote_idx = min(squote_idx, dquote_idx) + + assert ( + 0 <= quote_idx < len(string) - 1 + ), f"{string!r} is missing a starting quote character (' or \")." + assert string[-1] in ( + "'", + '"', + ), f"{string!r} is missing an ending quote character (' or \")." + assert set(string[:quote_idx]).issubset( + set(STRING_PREFIX_CHARS) + ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}." + + +def normalize_string_prefix(s: str) -> str: + """Make all string prefixes lowercase.""" + match = STRING_PREFIX_RE.match(s) + assert match is not None, f"failed to match string {s!r}" + orig_prefix = match.group(1) + new_prefix = ( + orig_prefix.replace("F", "f") + .replace("B", "b") + .replace("U", "") + .replace("u", "") + ) + + # Python syntax guarantees max 2 prefixes and that one of them is "r" + if len(new_prefix) == 2 and "r" != new_prefix[0].lower(): + new_prefix = new_prefix[::-1] + return f"{new_prefix}{match.group(2)}" + + +# Re(gex) does actually cache patterns internally but this still improves +# performance on a long list literal of strings by 5-9% since lru_cache's +# caching overhead is much lower. +@lru_cache(maxsize=64) +def _cached_compile(pattern: str) -> Pattern[str]: + return re.compile(pattern) + + +def normalize_string_quotes(s: str) -> str: + """Prefer double quotes but only if it doesn't cause more escaping. + + Adds or removes backslashes as appropriate. Doesn't parse and fix + strings nested in f-strings. + """ + value = s.lstrip(STRING_PREFIX_CHARS) + if value[:3] == '"""': + return s + + elif value[:3] == "'''": + orig_quote = "'''" + new_quote = '"""' + elif value[0] == '"': + orig_quote = '"' + new_quote = "'" + else: + orig_quote = "'" + new_quote = '"' + first_quote_pos = s.find(orig_quote) + if first_quote_pos == -1: + return s # There's an internal error + + prefix = s[:first_quote_pos] + unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}") + escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") + escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") + body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)] + if "r" in prefix.casefold(): + if unescaped_new_quote.search(body): + # There's at least one unescaped new_quote in this raw string + # so converting is impossible + return s + + # Do not introduce or remove backslashes in raw strings + new_body = body + else: + # remove unnecessary escapes + new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) + if body != new_body: + # Consider the string without unnecessary escapes as the original + body = new_body + s = f"{prefix}{orig_quote}{body}{orig_quote}" + new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) + new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) + if "f" in prefix.casefold(): + matches = re.findall( + r""" + (?:(? orig_escape_count: + return s # Do not introduce more escaping + + if new_escape_count == orig_escape_count and orig_quote == '"': + return s # Prefer double quotes + + return f"{prefix}{new_quote}{new_body}{new_quote}" diff --git a/src/black/trans.py b/src/black/trans.py new file mode 100644 index 0000000..7e2d8e6 --- /dev/null +++ b/src/black/trans.py @@ -0,0 +1,2222 @@ +""" +String transformers that can split and merge strings. +""" +import re +import sys +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass +from typing import ( + Any, + Callable, + ClassVar, + Collection, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + Union, +) + +if sys.version_info < (3, 8): + from typing_extensions import Final, Literal +else: + from typing import Literal, Final + +from mypy_extensions import trait + +from black.brackets import BracketMatchError +from black.comments import contains_pragma_comment +from black.lines import Line, append_leaves +from black.mode import Feature +from black.nodes import ( + CLOSING_BRACKETS, + OPENING_BRACKETS, + STANDALONE_COMMENT, + is_empty_lpar, + is_empty_par, + is_empty_rpar, + parent_type, + replace_child, + syms, +) +from black.rusty import Err, Ok, Result +from black.strings import ( + assert_is_leaf_string, + get_string_prefix, + has_triple_quotes, + normalize_string_quotes, +) +from blib2to3.pgen2 import token +from blib2to3.pytree import Leaf, Node + + +class CannotTransform(Exception): + """Base class for errors raised by Transformers.""" + + +# types +T = TypeVar("T") +LN = Union[Leaf, Node] +Transformer = Callable[[Line, Collection[Feature]], Iterator[Line]] +Index = int +NodeType = int +ParserState = int +StringID = int +TResult = Result[T, CannotTransform] # (T)ransform Result +TMatchResult = TResult[Index] + + +def TErr(err_msg: str) -> Err[CannotTransform]: + """(T)ransform Err + + Convenience function used when working with the TResult type. + """ + cant_transform = CannotTransform(err_msg) + return Err(cant_transform) + + +def hug_power_op(line: Line, features: Collection[Feature]) -> Iterator[Line]: + """A transformer which normalizes spacing around power operators.""" + + # Performance optimization to avoid unnecessary Leaf clones and other ops. + for leaf in line.leaves: + if leaf.type == token.DOUBLESTAR: + break + else: + raise CannotTransform("No doublestar token was found in the line.") + + def is_simple_lookup(index: int, step: Literal[1, -1]) -> bool: + # Brackets and parentheses indicate calls, subscripts, etc. ... + # basically stuff that doesn't count as "simple". Only a NAME lookup + # or dotted lookup (eg. NAME.NAME) is OK. + if step == -1: + disallowed = {token.RPAR, token.RSQB} + else: + disallowed = {token.LPAR, token.LSQB} + + while 0 <= index < len(line.leaves): + current = line.leaves[index] + if current.type in disallowed: + return False + if current.type not in {token.NAME, token.DOT} or current.value == "for": + # If the current token isn't disallowed, we'll assume this is simple as + # only the disallowed tokens are semantically attached to this lookup + # expression we're checking. Also, stop early if we hit the 'for' bit + # of a comprehension. + return True + + index += step + + return True + + def is_simple_operand(index: int, kind: Literal["base", "exponent"]) -> bool: + # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple + # lookup (see above), with or without a preceding unary operator. + start = line.leaves[index] + if start.type in {token.NAME, token.NUMBER}: + return is_simple_lookup(index, step=(1 if kind == "exponent" else -1)) + + if start.type in {token.PLUS, token.MINUS, token.TILDE}: + if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: + # step is always one as bases with a preceding unary op will be checked + # for simplicity starting from the next token (so it'll hit the check + # above). + return is_simple_lookup(index + 1, step=1) + + return False + + new_line = line.clone() + should_hug = False + for idx, leaf in enumerate(line.leaves): + new_leaf = leaf.clone() + if should_hug: + new_leaf.prefix = "" + should_hug = False + + should_hug = ( + (0 < idx < len(line.leaves) - 1) + and leaf.type == token.DOUBLESTAR + and is_simple_operand(idx - 1, kind="base") + and line.leaves[idx - 1].value != "lambda" + and is_simple_operand(idx + 1, kind="exponent") + ) + if should_hug: + new_leaf.prefix = "" + + # We have to be careful to make a new line properly: + # - bracket related metadata must be maintained (handled by Line.append) + # - comments need to copied over, updating the leaf IDs they're attached to + new_line.append(new_leaf, preformatted=True) + for comment_leaf in line.comments_after(leaf): + new_line.append(comment_leaf, preformatted=True) + + yield new_line + + +class StringTransformer(ABC): + """ + An implementation of the Transformer protocol that relies on its + subclasses overriding the template methods `do_match(...)` and + `do_transform(...)`. + + This Transformer works exclusively on strings (for example, by merging + or splitting them). + + The following sections can be found among the docstrings of each concrete + StringTransformer subclass. + + Requirements: + Which requirements must be met of the given Line for this + StringTransformer to be applied? + + Transformations: + If the given Line meets all of the above requirements, which string + transformations can you expect to be applied to it by this + StringTransformer? + + Collaborations: + What contractual agreements does this StringTransformer have with other + StringTransfomers? Such collaborations should be eliminated/minimized + as much as possible. + """ + + __name__: Final = "StringTransformer" + + # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with + # `abc.ABC`. + def __init__(self, line_length: int, normalize_strings: bool) -> None: + self.line_length = line_length + self.normalize_strings = normalize_strings + + @abstractmethod + def do_match(self, line: Line) -> TMatchResult: + """ + Returns: + * Ok(string_idx) such that `line.leaves[string_idx]` is our target + string, if a match was able to be made. + OR + * Err(CannotTransform), if a match was not able to be made. + """ + + @abstractmethod + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + """ + Yields: + * Ok(new_line) where new_line is the new transformed line. + OR + * Err(CannotTransform) if the transformation failed for some reason. The + `do_match(...)` template method should usually be used to reject + the form of the given Line, but in some cases it is difficult to + know whether or not a Line meets the StringTransformer's + requirements until the transformation is already midway. + + Side Effects: + This method should NOT mutate @line directly, but it MAY mutate the + Line's underlying Node structure. (WARNING: If the underlying Node + structure IS altered, then this method should NOT be allowed to + yield an CannotTransform after that point.) + """ + + def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]: + """ + StringTransformer instances have a call signature that mirrors that of + the Transformer type. + + Raises: + CannotTransform(...) if the concrete StringTransformer class is unable + to transform @line. + """ + # Optimization to avoid calling `self.do_match(...)` when the line does + # not contain any string. + if not any(leaf.type == token.STRING for leaf in line.leaves): + raise CannotTransform("There are no strings in this line.") + + match_result = self.do_match(line) + + if isinstance(match_result, Err): + cant_transform = match_result.err() + raise CannotTransform( + f"The string transformer {self.__class__.__name__} does not recognize" + " this line as one that it can transform." + ) from cant_transform + + string_idx = match_result.ok() + + for line_result in self.do_transform(line, string_idx): + if isinstance(line_result, Err): + cant_transform = line_result.err() + raise CannotTransform( + "StringTransformer failed while attempting to transform string." + ) from cant_transform + line = line_result.ok() + yield line + + +@dataclass +class CustomSplit: + """A custom (i.e. manual) string split. + + A single CustomSplit instance represents a single substring. + + Examples: + Consider the following string: + ``` + "Hi there friend." + " This is a custom" + f" string {split}." + ``` + + This string will correspond to the following three CustomSplit instances: + ``` + CustomSplit(False, 16) + CustomSplit(False, 17) + CustomSplit(True, 16) + ``` + """ + + has_prefix: bool + break_idx: int + + +@trait +class CustomSplitMapMixin: + """ + This mixin class is used to map merged strings to a sequence of + CustomSplits, which will then be used to re-split the strings iff none of + the resultant substrings go over the configured max line length. + """ + + _Key: ClassVar = Tuple[StringID, str] + _CUSTOM_SPLIT_MAP: ClassVar[Dict[_Key, Tuple[CustomSplit, ...]]] = defaultdict( + tuple + ) + + @staticmethod + def _get_key(string: str) -> "CustomSplitMapMixin._Key": + """ + Returns: + A unique identifier that is used internally to map @string to a + group of custom splits. + """ + return (id(string), string) + + def add_custom_splits( + self, string: str, custom_splits: Iterable[CustomSplit] + ) -> None: + """Custom Split Map Setter Method + + Side Effects: + Adds a mapping from @string to the custom splits @custom_splits. + """ + key = self._get_key(string) + self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) + + def pop_custom_splits(self, string: str) -> List[CustomSplit]: + """Custom Split Map Getter Method + + Returns: + * A list of the custom splits that are mapped to @string, if any + exist. + OR + * [], otherwise. + + Side Effects: + Deletes the mapping between @string and its associated custom + splits (which are returned to the caller). + """ + key = self._get_key(string) + + custom_splits = self._CUSTOM_SPLIT_MAP[key] + del self._CUSTOM_SPLIT_MAP[key] + + return list(custom_splits) + + def has_custom_splits(self, string: str) -> bool: + """ + Returns: + True iff @string is associated with a set of custom splits. + """ + key = self._get_key(string) + return key in self._CUSTOM_SPLIT_MAP + + +class StringMerger(StringTransformer, CustomSplitMapMixin): + """StringTransformer that merges strings together. + + Requirements: + (A) The line contains adjacent strings such that ALL of the validation checks + listed in StringMerger.__validate_msg(...)'s docstring pass. + OR + (B) The line contains a string which uses line continuation backslashes. + + Transformations: + Depending on which of the two requirements above where met, either: + + (A) The string group associated with the target string is merged. + OR + (B) All line-continuation backslashes are removed from the target string. + + Collaborations: + StringMerger provides custom split information to StringSplitter. + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + if ( + leaf.type == token.STRING + and is_valid_index(i + 1) + and LL[i + 1].type == token.STRING + ): + return Ok(i) + + if leaf.type == token.STRING and "\\\n" in leaf.value: + return Ok(i) + + return TErr("This line has no strings that need merging.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + new_line = line + rblc_result = self._remove_backslash_line_continuation_chars( + new_line, string_idx + ) + if isinstance(rblc_result, Ok): + new_line = rblc_result.ok() + + msg_result = self._merge_string_group(new_line, string_idx) + if isinstance(msg_result, Ok): + new_line = msg_result.ok() + + if isinstance(rblc_result, Err) and isinstance(msg_result, Err): + msg_cant_transform = msg_result.err() + rblc_cant_transform = rblc_result.err() + cant_transform = CannotTransform( + "StringMerger failed to merge any strings in this line." + ) + + # Chain the errors together using `__cause__`. + msg_cant_transform.__cause__ = rblc_cant_transform + cant_transform.__cause__ = msg_cant_transform + + yield Err(cant_transform) + else: + yield Ok(new_line) + + @staticmethod + def _remove_backslash_line_continuation_chars( + line: Line, string_idx: int + ) -> TResult[Line]: + """ + Merge strings that were split across multiple lines using + line-continuation backslashes. + + Returns: + Ok(new_line), if @line contains backslash line-continuation + characters. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + if not ( + string_leaf.type == token.STRING + and "\\\n" in string_leaf.value + and not has_triple_quotes(string_leaf.value) + ): + return TErr( + f"String leaf {string_leaf} does not contain any backslash line" + " continuation characters." + ) + + new_line = line.clone() + new_line.comments = line.comments.copy() + append_leaves(new_line, line, LL) + + new_string_leaf = new_line.leaves[string_idx] + new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") + + return Ok(new_line) + + def _merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]: + """ + Merges string group (i.e. set of adjacent strings) where the first + string in the group is `line.leaves[string_idx]`. + + Returns: + Ok(new_line), if ALL of the validation checks found in + __validate_msg(...) pass. + OR + Err(CannotTransform), otherwise. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + vresult = self._validate_msg(line, string_idx) + if isinstance(vresult, Err): + return vresult + + # If the string group is wrapped inside an Atom node, we must make sure + # to later replace that Atom with our new (merged) string leaf. + atom_node = LL[string_idx].parent + + # We will place BREAK_MARK in between every two substrings that we + # merge. We will then later go through our final result and use the + # various instances of BREAK_MARK we find to add the right values to + # the custom split map. + BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" + + QUOTE = LL[string_idx].value[-1] + + def make_naked(string: str, string_prefix: str) -> str: + """Strip @string (i.e. make it a "naked" string) + + Pre-conditions: + * assert_is_leaf_string(@string) + + Returns: + A string that is identical to @string except that + @string_prefix has been stripped, the surrounding QUOTE + characters have been removed, and any remaining QUOTE + characters have been escaped. + """ + assert_is_leaf_string(string) + + RE_EVEN_BACKSLASHES = r"(?:(?= 0 + ), "Logic error while filling the custom string breakpoint cache." + + temp_string = temp_string[mark_idx + len(BREAK_MARK) :] + breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 + custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) + + string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) + + if atom_node is not None: + # If not all children of the atom node are merged (this can happen + # when there is a standalone comment in the middle) ... + if non_string_idx - string_idx < len(atom_node.children): + # We need to replace the old STRING leaves with the new string leaf. + first_child_idx = LL[string_idx].remove() + for idx in range(string_idx + 1, non_string_idx): + LL[idx].remove() + if first_child_idx is not None: + atom_node.insert_child(first_child_idx, string_leaf) + else: + # Else replace the atom node with the new string leaf. + replace_child(atom_node, string_leaf) + + # Build the final line ('new_line') that this method will later return. + new_line = line.clone() + for i, leaf in enumerate(LL): + if i == string_idx: + new_line.append(string_leaf) + + if string_idx <= i < string_idx + num_of_strings: + for comment_leaf in line.comments_after(LL[i]): + new_line.append(comment_leaf, preformatted=True) + continue + + append_leaves(new_line, line, [leaf]) + + self.add_custom_splits(string_leaf.value, custom_splits) + return Ok(new_line) + + @staticmethod + def _validate_msg(line: Line, string_idx: int) -> TResult[None]: + """Validate (M)erge (S)tring (G)roup + + Transform-time string validation logic for __merge_string_group(...). + + Returns: + * Ok(None), if ALL validation checks (listed below) pass. + OR + * Err(CannotTransform), if any of the following are true: + - The target string group does not contain ANY stand-alone comments. + - The target string is not in a string group (i.e. it has no + adjacent strings). + - The string group has more than one inline comment. + - The string group has an inline comment that appears to be a pragma. + - The set of all string prefixes in the string group is of + length greater than one and is not equal to {"", "f"}. + - The string group consists of raw strings. + """ + # We first check for "inner" stand-alone comments (i.e. stand-alone + # comments that have a string leaf before them AND after them). + for inc in [1, -1]: + i = string_idx + found_sa_comment = False + is_valid_index = is_valid_index_factory(line.leaves) + while is_valid_index(i) and line.leaves[i].type in [ + token.STRING, + STANDALONE_COMMENT, + ]: + if line.leaves[i].type == STANDALONE_COMMENT: + found_sa_comment = True + elif found_sa_comment: + return TErr( + "StringMerger does NOT merge string groups which contain " + "stand-alone comments." + ) + + i += inc + + num_of_inline_string_comments = 0 + set_of_prefixes = set() + num_of_strings = 0 + for leaf in line.leaves[string_idx:]: + if leaf.type != token.STRING: + # If the string group is trailed by a comma, we count the + # comments trailing the comma to be one of the string group's + # comments. + if leaf.type == token.COMMA and id(leaf) in line.comments: + num_of_inline_string_comments += 1 + break + + if has_triple_quotes(leaf.value): + return TErr("StringMerger does NOT merge multiline strings.") + + num_of_strings += 1 + prefix = get_string_prefix(leaf.value).lower() + if "r" in prefix: + return TErr("StringMerger does NOT merge raw strings.") + + set_of_prefixes.add(prefix) + + if id(leaf) in line.comments: + num_of_inline_string_comments += 1 + if contains_pragma_comment(line.comments[id(leaf)]): + return TErr("Cannot merge strings which have pragma comments.") + + if num_of_strings < 2: + return TErr( + f"Not enough strings to merge (num_of_strings={num_of_strings})." + ) + + if num_of_inline_string_comments > 1: + return TErr( + f"Too many inline string comments ({num_of_inline_string_comments})." + ) + + if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: + return TErr(f"Too many different prefixes ({set_of_prefixes}).") + + return Ok(None) + + +class StringParenStripper(StringTransformer): + """StringTransformer that strips surrounding parentheses from strings. + + Requirements: + The line contains a string which is surrounded by parentheses and: + - The target string is NOT the only argument to a function call. + - The target string is NOT a "pointless" string. + - If the target string contains a PERCENT, the brackets are not + preceded or followed by an operator with higher precedence than + PERCENT. + + Transformations: + The parentheses mentioned in the 'Requirements' section are stripped. + + Collaborations: + StringParenStripper has its own inherent usefulness, but it is also + relied on to clean up the parentheses created by StringParenWrapper (in + the event that they are no longer needed). + """ + + def do_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + for idx, leaf in enumerate(LL): + # Should be a string... + if leaf.type != token.STRING: + continue + + # If this is a "pointless" string... + if ( + leaf.parent + and leaf.parent.parent + and leaf.parent.parent.type == syms.simple_stmt + ): + continue + + # Should be preceded by a non-empty LPAR... + if ( + not is_valid_index(idx - 1) + or LL[idx - 1].type != token.LPAR + or is_empty_lpar(LL[idx - 1]) + ): + continue + + # That LPAR should NOT be preceded by a function name or a closing + # bracket (which could be a function which returns a function or a + # list/dictionary that contains a function)... + if is_valid_index(idx - 2) and ( + LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS + ): + continue + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + next_idx = string_parser.parse(LL, string_idx) + + # if the leaves in the parsed string include a PERCENT, we need to + # make sure the initial LPAR is NOT preceded by an operator with + # higher or equal precedence to PERCENT + if is_valid_index(idx - 2): + # mypy can't quite follow unless we name this + before_lpar = LL[idx - 2] + if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( + ( + before_lpar.type + in { + token.STAR, + token.AT, + token.SLASH, + token.DOUBLESLASH, + token.PERCENT, + token.TILDE, + token.DOUBLESTAR, + token.AWAIT, + token.LSQB, + token.LPAR, + } + ) + or ( + # only unary PLUS/MINUS + before_lpar.parent + and before_lpar.parent.type == syms.factor + and (before_lpar.type in {token.PLUS, token.MINUS}) + ) + ): + continue + + # Should be followed by a non-empty RPAR... + if ( + is_valid_index(next_idx) + and LL[next_idx].type == token.RPAR + and not is_empty_rpar(LL[next_idx]) + ): + # That RPAR should NOT be followed by anything with higher + # precedence than PERCENT + if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { + token.DOUBLESTAR, + token.LSQB, + token.LPAR, + token.DOT, + }: + continue + + return Ok(string_idx) + + return TErr("This line has no strings wrapped in parens.") + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + string_parser = StringParser() + rpar_idx = string_parser.parse(LL, string_idx) + + for leaf in (LL[string_idx - 1], LL[rpar_idx]): + if line.comments_after(leaf): + yield TErr( + "Will not strip parentheses which have comments attached to them." + ) + return + + new_line = line.clone() + new_line.comments = line.comments.copy() + try: + append_leaves(new_line, line, LL[: string_idx - 1]) + except BracketMatchError: + # HACK: I believe there is currently a bug somewhere in + # right_hand_split() that is causing brackets to not be tracked + # properly by a shared BracketTracker. + append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True) + + string_leaf = Leaf(token.STRING, LL[string_idx].value) + LL[string_idx - 1].remove() + replace_child(LL[string_idx], string_leaf) + new_line.append(string_leaf) + + append_leaves( + new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :] + ) + + LL[rpar_idx].remove() + + yield Ok(new_line) + + +class BaseStringSplitter(StringTransformer): + """ + Abstract class for StringTransformers which transform a Line's strings by splitting + them or placing them on their own lines where necessary to avoid going over + the configured line length. + + Requirements: + * The target string value is responsible for the line going over the + line length limit. It follows that after all of black's other line + split methods have been exhausted, this line (or one of the resulting + lines after all line splits are performed) would still be over the + line_length limit unless we split this string. + AND + * The target string is NOT a "pointless" string (i.e. a string that has + no parent or siblings). + AND + * The target string is not followed by an inline comment that appears + to be a pragma. + AND + * The target string is not a multiline (i.e. triple-quote) string. + """ + + STRING_OPERATORS: Final = [ + token.EQEQUAL, + token.GREATER, + token.GREATEREQUAL, + token.LESS, + token.LESSEQUAL, + token.NOTEQUAL, + token.PERCENT, + token.PLUS, + token.STAR, + ] + + @abstractmethod + def do_splitter_match(self, line: Line) -> TMatchResult: + """ + BaseStringSplitter asks its clients to override this method instead of + `StringTransformer.do_match(...)`. + + Follows the same protocol as `StringTransformer.do_match(...)`. + + Refer to `help(StringTransformer.do_match)` for more information. + """ + + def do_match(self, line: Line) -> TMatchResult: + match_result = self.do_splitter_match(line) + if isinstance(match_result, Err): + return match_result + + string_idx = match_result.ok() + vresult = self._validate(line, string_idx) + if isinstance(vresult, Err): + return vresult + + return match_result + + def _validate(self, line: Line, string_idx: int) -> TResult[None]: + """ + Checks that @line meets all of the requirements listed in this classes' + docstring. Refer to `help(BaseStringSplitter)` for a detailed + description of those requirements. + + Returns: + * Ok(None), if ALL of the requirements are met. + OR + * Err(CannotTransform), if ANY of the requirements are NOT met. + """ + LL = line.leaves + + string_leaf = LL[string_idx] + + max_string_length = self._get_max_string_length(line, string_idx) + if len(string_leaf.value) <= max_string_length: + return TErr( + "The string itself is not what is causing this line to be too long." + ) + + if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ + token.STRING, + token.NEWLINE, + ]: + return TErr( + f"This string ({string_leaf.value}) appears to be pointless (i.e. has" + " no parent)." + ) + + if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( + line.comments[id(line.leaves[string_idx])] + ): + return TErr( + "Line appears to end with an inline pragma comment. Splitting the line" + " could modify the pragma's behavior." + ) + + if has_triple_quotes(string_leaf.value): + return TErr("We cannot split multiline strings.") + + return Ok(None) + + def _get_max_string_length(self, line: Line, string_idx: int) -> int: + """ + Calculates the max string length used when attempting to determine + whether or not the target string is responsible for causing the line to + go over the line length limit. + + WARNING: This method is tightly coupled to both StringSplitter and + (especially) StringParenWrapper. There is probably a better way to + accomplish what is being done here. + + Returns: + max_string_length: such that `line.leaves[string_idx].value > + max_string_length` implies that the target string IS responsible + for causing this line to exceed the line length limit. + """ + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + + # We use the shorthand "WMA4" in comments to abbreviate "We must + # account for". When giving examples, we use STRING to mean some/any + # valid string. + # + # Finally, we use the following convenience variables: + # + # P: The leaf that is before the target string leaf. + # N: The leaf that is after the target string leaf. + # NN: The leaf that is after N. + + # WMA4 the whitespace at the beginning of the line. + offset = line.depth * 4 + + if is_valid_index(string_idx - 1): + p_idx = string_idx - 1 + if ( + LL[string_idx - 1].type == token.LPAR + and LL[string_idx - 1].value == "" + and string_idx >= 2 + ): + # If the previous leaf is an empty LPAR placeholder, we should skip it. + p_idx -= 1 + + P = LL[p_idx] + if P.type in self.STRING_OPERATORS: + # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). + offset += len(str(P)) + 1 + + if P.type == token.COMMA: + # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. + offset += 3 + + if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: + # This conditional branch is meant to handle dictionary keys, + # variable assignments, 'return STRING' statement lines, and + # 'else STRING' ternary expression lines. + + # WMA4 a single space. + offset += 1 + + # WMA4 the lengths of any leaves that came before that space, + # but after any closing bracket before that space. + for leaf in reversed(LL[: p_idx + 1]): + offset += len(str(leaf)) + if leaf.type in CLOSING_BRACKETS: + break + + if is_valid_index(string_idx + 1): + N = LL[string_idx + 1] + if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: + # If the next leaf is an empty RPAR placeholder, we should skip it. + N = LL[string_idx + 2] + + if N.type == token.COMMA: + # WMA4 a single comma at the end of the string (e.g `STRING,`). + offset += 1 + + if is_valid_index(string_idx + 2): + NN = LL[string_idx + 2] + + if N.type == token.DOT and NN.type == token.NAME: + # This conditional branch is meant to handle method calls invoked + # off of a string literal up to and including the LPAR character. + + # WMA4 the '.' character. + offset += 1 + + if ( + is_valid_index(string_idx + 3) + and LL[string_idx + 3].type == token.LPAR + ): + # WMA4 the left parenthesis character. + offset += 1 + + # WMA4 the length of the method's name. + offset += len(NN.value) + + has_comments = False + for comment_leaf in line.comments_after(LL[string_idx]): + if not has_comments: + has_comments = True + # WMA4 two spaces before the '#' character. + offset += 2 + + # WMA4 the length of the inline comment. + offset += len(comment_leaf.value) + + max_string_length = self.line_length - offset + return max_string_length + + @staticmethod + def _prefer_paren_wrap_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the "prefer paren wrap" statement + requirements listed in the 'Requirements' section of the StringParenWrapper + class's docstring. + OR + None, otherwise. + """ + # The line must start with a string. + if LL[0].type != token.STRING: + return None + + matching_nodes = [ + syms.listmaker, + syms.dictsetmaker, + syms.testlist_gexp, + ] + # If the string is an immediate child of a list/set/tuple literal... + if ( + parent_type(LL[0]) in matching_nodes + or parent_type(LL[0].parent) in matching_nodes + ): + # And the string is surrounded by commas (or is the first/last child)... + prev_sibling = LL[0].prev_sibling + next_sibling = LL[0].next_sibling + if ( + not prev_sibling + and not next_sibling + and parent_type(LL[0]) == syms.atom + ): + # If it's an atom string, we need to check the parent atom's siblings. + parent = LL[0].parent + assert parent is not None # For type checkers. + prev_sibling = parent.prev_sibling + next_sibling = parent.next_sibling + if (not prev_sibling or prev_sibling.type == token.COMMA) and ( + not next_sibling or next_sibling.type == token.COMMA + ): + return 0 + + return None + + +def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]: + """ + Yields spans corresponding to expressions in a given f-string. + Spans are half-open ranges (left inclusive, right exclusive). + Assumes the input string is a valid f-string, but will not crash if the input + string is invalid. + """ + stack: List[int] = [] # our curly paren stack + i = 0 + while i < len(s): + if s[i] == "{": + # if we're in a string part of the f-string, ignore escaped curly braces + if not stack and i + 1 < len(s) and s[i + 1] == "{": + i += 2 + continue + stack.append(i) + i += 1 + continue + + if s[i] == "}": + if not stack: + i += 1 + continue + j = stack.pop() + # we've made it back out of the expression! yield the span + if not stack: + yield (j, i + 1) + i += 1 + continue + + # if we're in an expression part of the f-string, fast forward through strings + # note that backslashes are not legal in the expression portion of f-strings + if stack: + delim = None + if s[i : i + 3] in ("'''", '"""'): + delim = s[i : i + 3] + elif s[i] in ("'", '"'): + delim = s[i] + if delim: + i += len(delim) + while i < len(s) and s[i : i + len(delim)] != delim: + i += 1 + i += len(delim) + continue + i += 1 + + +def fstring_contains_expr(s: str) -> bool: + return any(iter_fexpr_spans(s)) + + +class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that splits "atom" strings (i.e. strings which exist on + lines by themselves). + + Requirements: + * The line consists ONLY of a single string (possibly prefixed by a + string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE + a trailing comma. + AND + * All of the requirements listed in BaseStringSplitter's docstring. + + Transformations: + The string mentioned in the 'Requirements' section is split into as + many substrings as necessary to adhere to the configured line length. + + In the final set of substrings, no substring should be smaller than + MIN_SUBSTR_SIZE characters. + + The string will ONLY be split on spaces (i.e. each new substring should + start with a space). Note that the string will NOT be split on a space + which is escaped with a backslash. + + If the string is an f-string, it will NOT be split in the middle of an + f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x + else bar()} is an f-expression). + + If the string that is being split has an associated set of custom split + records and those custom splits will NOT result in any line going over + the configured line length, those custom splits are used. Otherwise the + string is split as late as possible (from left-to-right) while still + adhering to the transformation rules listed above. + + Collaborations: + StringSplitter relies on StringMerger to construct the appropriate + CustomSplit objects and add them to the custom split map. + """ + + MIN_SUBSTR_SIZE: Final = 6 + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if self._prefer_paren_wrap_match(LL) is not None: + return TErr("Line needs to be wrapped in parens first.") + + is_valid_index = is_valid_index_factory(LL) + + idx = 0 + + # The first two leaves MAY be the 'not in' keywords... + if ( + is_valid_index(idx) + and is_valid_index(idx + 1) + and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] + and str(LL[idx]) + str(LL[idx + 1]) == "not in" + ): + idx += 2 + # Else the first leaf MAY be a string operator symbol or the 'in' keyword... + elif is_valid_index(idx) and ( + LL[idx].type in self.STRING_OPERATORS + or LL[idx].type == token.NAME + and str(LL[idx]) == "in" + ): + idx += 1 + + # The next/first leaf MAY be an empty LPAR... + if is_valid_index(idx) and is_empty_lpar(LL[idx]): + idx += 1 + + # The next/first leaf MUST be a string... + if not is_valid_index(idx) or LL[idx].type != token.STRING: + return TErr("Line does not start with a string.") + + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by an empty RPAR... + if is_valid_index(idx) and is_empty_rpar(LL[idx]): + idx += 1 + + # That string / empty RPAR leaf MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if is_valid_index(idx): + return TErr("This line does not end with a string.") + + return Ok(string_idx) + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + QUOTE = LL[string_idx].value[-1] + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + prefix = get_string_prefix(LL[string_idx].value).lower() + + # We MAY choose to drop the 'f' prefix from substrings that don't + # contain any f-expressions, but ONLY if the original f-string + # contains at least one f-expression. Otherwise, we will alter the AST + # of the program. + drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( + LL[string_idx].value + ) + + first_string_line = True + + string_op_leaves = self._get_string_operator_leaves(LL) + string_op_leaves_length = ( + sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 + if string_op_leaves + else 0 + ) + + def maybe_append_string_operators(new_line: Line) -> None: + """ + Side Effects: + If @line starts with a string operator and this is the first + line we are constructing, this function appends the string + operator to @new_line and replaces the old string operator leaf + in the node structure. Otherwise this function does nothing. + """ + maybe_prefix_leaves = string_op_leaves if first_string_line else [] + for i, prefix_leaf in enumerate(maybe_prefix_leaves): + replace_child(LL[i], prefix_leaf) + new_line.append(prefix_leaf) + + ends_with_comma = ( + is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA + ) + + def max_last_string() -> int: + """ + Returns: + The max allowed length of the string value used for the last + line we will construct. + """ + result = self.line_length + result -= line.depth * 4 + result -= 1 if ends_with_comma else 0 + result -= string_op_leaves_length + return result + + # --- Calculate Max Break Index (for string value) + # We start with the line length limit + max_break_idx = self.line_length + # The last index of a string of length N is N-1. + max_break_idx -= 1 + # Leading whitespace is not present in the string value (e.g. Leaf.value). + max_break_idx -= line.depth * 4 + if max_break_idx < 0: + yield TErr( + f"Unable to split {LL[string_idx].value} at such high of a line depth:" + f" {line.depth}" + ) + return + + # Check if StringMerger registered any custom splits. + custom_splits = self.pop_custom_splits(LL[string_idx].value) + # We use them ONLY if none of them would produce lines that exceed the + # line limit. + use_custom_breakpoints = bool( + custom_splits + and all(csplit.break_idx <= max_break_idx for csplit in custom_splits) + ) + + # Temporary storage for the remaining chunk of the string line that + # can't fit onto the line currently being constructed. + rest_value = LL[string_idx].value + + def more_splits_should_be_made() -> bool: + """ + Returns: + True iff `rest_value` (the remaining string value from the last + split), should be split again. + """ + if use_custom_breakpoints: + return len(custom_splits) > 1 + else: + return len(rest_value) > max_last_string() + + string_line_results: List[Ok[Line]] = [] + while more_splits_should_be_made(): + if use_custom_breakpoints: + # Custom User Split (manual) + csplit = custom_splits.pop(0) + break_idx = csplit.break_idx + else: + # Algorithmic Split (automatic) + max_bidx = max_break_idx - string_op_leaves_length + maybe_break_idx = self._get_break_idx(rest_value, max_bidx) + if maybe_break_idx is None: + # If we are unable to algorithmically determine a good split + # and this string has custom splits registered to it, we + # fall back to using them--which means we have to start + # over from the beginning. + if custom_splits: + rest_value = LL[string_idx].value + string_line_results = [] + first_string_line = True + use_custom_breakpoints = True + continue + + # Otherwise, we stop splitting here. + break + + break_idx = maybe_break_idx + + # --- Construct `next_value` + next_value = rest_value[:break_idx] + QUOTE + + # HACK: The following 'if' statement is a hack to fix the custom + # breakpoint index in the case of either: (a) substrings that were + # f-strings but will have the 'f' prefix removed OR (b) substrings + # that were not f-strings but will now become f-strings because of + # redundant use of the 'f' prefix (i.e. none of the substrings + # contain f-expressions but one or more of them had the 'f' prefix + # anyway; in which case, we will prepend 'f' to _all_ substrings). + # + # There is probably a better way to accomplish what is being done + # here... + # + # If this substring is an f-string, we _could_ remove the 'f' + # prefix, and the current custom split did NOT originally use a + # prefix... + if ( + next_value != self._normalize_f_string(next_value, prefix) + and use_custom_breakpoints + and not csplit.has_prefix + ): + # Then `csplit.break_idx` will be off by one after removing + # the 'f' prefix. + break_idx += 1 + next_value = rest_value[:break_idx] + QUOTE + + if drop_pointless_f_prefix: + next_value = self._normalize_f_string(next_value, prefix) + + # --- Construct `next_leaf` + next_leaf = Leaf(token.STRING, next_value) + insert_str_child(next_leaf) + self._maybe_normalize_string_quotes(next_leaf) + + # --- Construct `next_line` + next_line = line.clone() + maybe_append_string_operators(next_line) + next_line.append(next_leaf) + string_line_results.append(Ok(next_line)) + + rest_value = prefix + QUOTE + rest_value[break_idx:] + first_string_line = False + + yield from string_line_results + + if drop_pointless_f_prefix: + rest_value = self._normalize_f_string(rest_value, prefix) + + rest_leaf = Leaf(token.STRING, rest_value) + insert_str_child(rest_leaf) + + # NOTE: I could not find a test case that verifies that the following + # line is actually necessary, but it seems to be. Otherwise we risk + # not normalizing the last substring, right? + self._maybe_normalize_string_quotes(rest_leaf) + + last_line = line.clone() + maybe_append_string_operators(last_line) + + # If there are any leaves to the right of the target string... + if is_valid_index(string_idx + 1): + # We use `temp_value` here to determine how long the last line + # would be if we were to append all the leaves to the right of the + # target string to the last string line. + temp_value = rest_value + for leaf in LL[string_idx + 1 :]: + temp_value += str(leaf) + if leaf.type == token.LPAR: + break + + # Try to fit them all on the same line with the last substring... + if ( + len(temp_value) <= max_last_string() + or LL[string_idx + 1].type == token.COMMA + ): + last_line.append(rest_leaf) + append_leaves(last_line, line, LL[string_idx + 1 :]) + yield Ok(last_line) + # Otherwise, place the last substring on one line and everything + # else on a line below that... + else: + last_line.append(rest_leaf) + yield Ok(last_line) + + non_string_line = line.clone() + append_leaves(non_string_line, line, LL[string_idx + 1 :]) + yield Ok(non_string_line) + # Else the target string was the last leaf... + else: + last_line.append(rest_leaf) + last_line.comments = line.comments.copy() + yield Ok(last_line) + + def _iter_nameescape_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an \\N{...} expression (which is NOT + allowed). + """ + # True - the previous backslash was unescaped + # False - the previous backslash was escaped *or* there was no backslash + previous_was_unescaped_backslash = False + it = iter(enumerate(string)) + for idx, c in it: + if c == "\\": + previous_was_unescaped_backslash = not previous_was_unescaped_backslash + continue + if not previous_was_unescaped_backslash or c != "N": + previous_was_unescaped_backslash = False + continue + previous_was_unescaped_backslash = False + + begin = idx - 1 # the position of backslash before \N{...} + for idx, c in it: + if c == "}": + end = idx + break + else: + # malformed nameescape expression? + # should have been detected by AST parsing earlier... + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + yield begin, end + + def _iter_fexpr_slices(self, string: str) -> Iterator[Tuple[Index, Index]]: + """ + Yields: + All ranges of @string which, if @string were to be split there, + would result in the splitting of an f-expression (which is NOT + allowed). + """ + if "f" not in get_string_prefix(string).lower(): + return + yield from iter_fexpr_spans(string) + + def _get_illegal_split_indices(self, string: str) -> Set[Index]: + illegal_indices: Set[Index] = set() + iterators = [ + self._iter_fexpr_slices(string), + self._iter_nameescape_slices(string), + ] + for it in iterators: + for begin, end in it: + illegal_indices.update(range(begin, end + 1)) + return illegal_indices + + def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: + """ + This method contains the algorithm that StringSplitter uses to + determine which character to split each string at. + + Args: + @string: The substring that we are attempting to split. + @max_break_idx: The ideal break index. We will return this value if it + meets all the necessary conditions. In the likely event that it + doesn't we will try to find the closest index BELOW @max_break_idx + that does. If that fails, we will expand our search by also + considering all valid indices ABOVE @max_break_idx. + + Pre-Conditions: + * assert_is_leaf_string(@string) + * 0 <= @max_break_idx < len(@string) + + Returns: + break_idx, if an index is able to be found that meets all of the + conditions listed in the 'Transformations' section of this classes' + docstring. + OR + None, otherwise. + """ + is_valid_index = is_valid_index_factory(string) + + assert is_valid_index(max_break_idx) + assert_is_leaf_string(string) + + _illegal_split_indices = self._get_illegal_split_indices(string) + + def breaks_unsplittable_expression(i: Index) -> bool: + """ + Returns: + True iff returning @i would result in the splitting of an + unsplittable expression (which is NOT allowed). + """ + return i in _illegal_split_indices + + def passes_all_checks(i: Index) -> bool: + """ + Returns: + True iff ALL of the conditions listed in the 'Transformations' + section of this classes' docstring would be be met by returning @i. + """ + is_space = string[i] == " " + + is_not_escaped = True + j = i - 1 + while is_valid_index(j) and string[j] == "\\": + is_not_escaped = not is_not_escaped + j -= 1 + + is_big_enough = ( + len(string[i:]) >= self.MIN_SUBSTR_SIZE + and len(string[:i]) >= self.MIN_SUBSTR_SIZE + ) + return ( + is_space + and is_not_escaped + and is_big_enough + and not breaks_unsplittable_expression(i) + ) + + # First, we check all indices BELOW @max_break_idx. + break_idx = max_break_idx + while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): + break_idx -= 1 + + if not passes_all_checks(break_idx): + # If that fails, we check all indices ABOVE @max_break_idx. + # + # If we are able to find a valid index here, the next line is going + # to be longer than the specified line length, but it's probably + # better than doing nothing at all. + break_idx = max_break_idx + 1 + while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): + break_idx += 1 + + if not is_valid_index(break_idx) or not passes_all_checks(break_idx): + return None + + return break_idx + + def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: + if self.normalize_strings: + leaf.value = normalize_string_quotes(leaf.value) + + def _normalize_f_string(self, string: str, prefix: str) -> str: + """ + Pre-Conditions: + * assert_is_leaf_string(@string) + + Returns: + * If @string is an f-string that contains no f-expressions, we + return a string identical to @string except that the 'f' prefix + has been stripped and all double braces (i.e. '{{' or '}}') have + been normalized (i.e. turned into '{' or '}'). + OR + * Otherwise, we return @string. + """ + assert_is_leaf_string(string) + + if "f" in prefix and not fstring_contains_expr(string): + new_prefix = prefix.replace("f", "") + + temp = string[len(prefix) :] + temp = re.sub(r"\{\{", "{", temp) + temp = re.sub(r"\}\}", "}", temp) + new_string = temp + + return f"{new_prefix}{new_string}" + else: + return string + + def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> List[Leaf]: + LL = list(leaves) + + string_op_leaves = [] + i = 0 + while LL[i].type in self.STRING_OPERATORS + [token.NAME]: + prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) + string_op_leaves.append(prefix_leaf) + i += 1 + return string_op_leaves + + +class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): + """ + StringTransformer that wraps strings in parens and then splits at the LPAR. + + Requirements: + All of the requirements listed in BaseStringSplitter's docstring in + addition to the requirements listed below: + + * The line is a return/yield statement, which returns/yields a string. + OR + * The line is part of a ternary expression (e.g. `x = y if cond else + z`) such that the line starts with `else `, where is + some string. + OR + * The line is an assert statement, which ends with a string. + OR + * The line is an assignment statement (e.g. `x = ` or `x += + `) such that the variable is being assigned the value of some + string. + OR + * The line is a dictionary key assignment where some valid key is being + assigned the value of some string. + OR + * The line starts with an "atom" string that prefers to be wrapped in + parens. It's preferred to be wrapped when it's is an immediate child of + a list/set/tuple literal, AND the string is surrounded by commas (or is + the first/last child). + + Transformations: + The chosen string is wrapped in parentheses and then split at the LPAR. + + We then have one line which ends with an LPAR and another line that + starts with the chosen string. The latter line is then split again at + the RPAR. This results in the RPAR (and possibly a trailing comma) + being placed on its own line. + + NOTE: If any leaves exist to the right of the chosen string (except + for a trailing comma, which would be placed after the RPAR), those + leaves are placed inside the parentheses. In effect, the chosen + string is not necessarily being "wrapped" by parentheses. We can, + however, count on the LPAR being placed directly before the chosen + string. + + In other words, StringParenWrapper creates "atom" strings. These + can then be split again by StringSplitter, if necessary. + + Collaborations: + In the event that a string line split by StringParenWrapper is + changed such that it no longer needs to be given its own line, + StringParenWrapper relies on StringParenStripper to clean up the + parentheses it created. + + For "atom" strings that prefers to be wrapped in parens, it requires + StringSplitter to hold the split until the string is wrapped in parens. + """ + + def do_splitter_match(self, line: Line) -> TMatchResult: + LL = line.leaves + + if line.leaves[-1].type in OPENING_BRACKETS: + return TErr( + "Cannot wrap parens around a line that ends in an opening bracket." + ) + + string_idx = ( + self._return_match(LL) + or self._else_match(LL) + or self._assert_match(LL) + or self._assign_match(LL) + or self._dict_match(LL) + or self._prefer_paren_wrap_match(LL) + ) + + if string_idx is not None: + string_value = line.leaves[string_idx].value + # If the string has no spaces... + if " " not in string_value: + # And will still violate the line length limit when split... + max_string_length = self.line_length - ((line.depth + 1) * 4) + if len(string_value) > max_string_length: + # And has no associated custom splits... + if not self.has_custom_splits(string_value): + # Then we should NOT put this string on its own line. + return TErr( + "We do not wrap long strings in parentheses when the" + " resultant line would still be over the specified line" + " length and can't be split further by StringSplitter." + ) + return Ok(string_idx) + + return TErr("This line does not contain any non-atomic strings.") + + @staticmethod + def _return_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the return/yield statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a return/yield statement and the first leaf + # contains either the "return" or "yield" keywords... + if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ + 0 + ].value in ["return", "yield"]: + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _else_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the ternary expression + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of a ternary expression and the first leaf + # contains the "else" keyword... + if ( + parent_type(LL[0]) == syms.test + and LL[0].type == token.NAME + and LL[0].value == "else" + ): + is_valid_index = is_valid_index_factory(LL) + + idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 + # The next visible leaf MUST contain a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + return idx + + return None + + @staticmethod + def _assert_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assert statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an assert statement and the first leaf + # contains the "assert" keyword... + if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a comma... + if leaf.type == token.COMMA: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That comma MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _assign_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the assignment statement + requirements listed in the 'Requirements' section of this classes' + docstring. + OR + None, otherwise. + """ + # If this line is apart of an expression statement or is a function + # argument AND the first leaf contains a variable name... + if ( + parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] + and LL[0].type == token.NAME + ): + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find either an '=' or '+=' symbol... + if leaf.type in [token.EQUAL, token.PLUSEQUAL]: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That symbol MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # The next leaf MAY be a comma iff this line is apart + # of a function argument... + if ( + parent_type(LL[0]) == syms.argument + and is_valid_index(idx) + and LL[idx].type == token.COMMA + ): + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + @staticmethod + def _dict_match(LL: List[Leaf]) -> Optional[int]: + """ + Returns: + string_idx such that @LL[string_idx] is equal to our target (i.e. + matched) string, if this line matches the dictionary key assignment + statement requirements listed in the 'Requirements' section of this + classes' docstring. + OR + None, otherwise. + """ + # If this line is apart of a dictionary key assignment... + if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: + is_valid_index = is_valid_index_factory(LL) + + for i, leaf in enumerate(LL): + # We MUST find a colon... + if leaf.type == token.COLON: + idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 + + # That colon MUST be followed by a string... + if is_valid_index(idx) and LL[idx].type == token.STRING: + string_idx = idx + + # Skip the string trailer, if one exists. + string_parser = StringParser() + idx = string_parser.parse(LL, string_idx) + + # That string MAY be followed by a comma... + if is_valid_index(idx) and LL[idx].type == token.COMMA: + idx += 1 + + # But no more leaves are allowed... + if not is_valid_index(idx): + return string_idx + + return None + + def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]: + LL = line.leaves + + is_valid_index = is_valid_index_factory(LL) + insert_str_child = insert_str_child_factory(LL[string_idx]) + + comma_idx = -1 + ends_with_comma = False + if LL[comma_idx].type == token.COMMA: + ends_with_comma = True + + leaves_to_steal_comments_from = [LL[string_idx]] + if ends_with_comma: + leaves_to_steal_comments_from.append(LL[comma_idx]) + + # --- First Line + first_line = line.clone() + left_leaves = LL[:string_idx] + + # We have to remember to account for (possibly invisible) LPAR and RPAR + # leaves that already wrapped the target string. If these leaves do + # exist, we will replace them with our own LPAR and RPAR leaves. + old_parens_exist = False + if left_leaves and left_leaves[-1].type == token.LPAR: + old_parens_exist = True + leaves_to_steal_comments_from.append(left_leaves[-1]) + left_leaves.pop() + + append_leaves(first_line, line, left_leaves) + + lpar_leaf = Leaf(token.LPAR, "(") + if old_parens_exist: + replace_child(LL[string_idx - 1], lpar_leaf) + else: + insert_str_child(lpar_leaf) + first_line.append(lpar_leaf) + + # We throw inline comments that were originally to the right of the + # target string to the top line. They will now be shown to the right of + # the LPAR. + for leaf in leaves_to_steal_comments_from: + for comment_leaf in line.comments_after(leaf): + first_line.append(comment_leaf, preformatted=True) + + yield Ok(first_line) + + # --- Middle (String) Line + # We only need to yield one (possibly too long) string line, since the + # `StringSplitter` will break it down further if necessary. + string_value = LL[string_idx].value + string_line = Line( + mode=line.mode, + depth=line.depth + 1, + inside_brackets=True, + should_split_rhs=line.should_split_rhs, + magic_trailing_comma=line.magic_trailing_comma, + ) + string_leaf = Leaf(token.STRING, string_value) + insert_str_child(string_leaf) + string_line.append(string_leaf) + + old_rpar_leaf = None + if is_valid_index(string_idx + 1): + right_leaves = LL[string_idx + 1 :] + if ends_with_comma: + right_leaves.pop() + + if old_parens_exist: + assert right_leaves and right_leaves[-1].type == token.RPAR, ( + "Apparently, old parentheses do NOT exist?!" + f" (left_leaves={left_leaves}, right_leaves={right_leaves})" + ) + old_rpar_leaf = right_leaves.pop() + + append_leaves(string_line, line, right_leaves) + + yield Ok(string_line) + + # --- Last Line + last_line = line.clone() + last_line.bracket_tracker = first_line.bracket_tracker + + new_rpar_leaf = Leaf(token.RPAR, ")") + if old_rpar_leaf is not None: + replace_child(old_rpar_leaf, new_rpar_leaf) + else: + insert_str_child(new_rpar_leaf) + last_line.append(new_rpar_leaf) + + # If the target string ended with a comma, we place this comma to the + # right of the RPAR on the last line. + if ends_with_comma: + comma_leaf = Leaf(token.COMMA, ",") + replace_child(LL[comma_idx], comma_leaf) + last_line.append(comma_leaf) + + yield Ok(last_line) + + +class StringParser: + """ + A state machine that aids in parsing a string's "trailer", which can be + either non-existent, an old-style formatting sequence (e.g. `% varX` or `% + (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, + varY)`). + + NOTE: A new StringParser object MUST be instantiated for each string + trailer we need to parse. + + Examples: + We shall assume that `line` equals the `Line` object that corresponds + to the following line of python code: + ``` + x = "Some {}.".format("String") + some_other_string + ``` + + Furthermore, we will assume that `string_idx` is some index such that: + ``` + assert line.leaves[string_idx].value == "Some {}." + ``` + + The following code snippet then holds: + ``` + string_parser = StringParser() + idx = string_parser.parse(line.leaves, string_idx) + assert line.leaves[idx].type == token.PLUS + ``` + """ + + DEFAULT_TOKEN: Final = 20210605 + + # String Parser States + START: Final = 1 + DOT: Final = 2 + NAME: Final = 3 + PERCENT: Final = 4 + SINGLE_FMT_ARG: Final = 5 + LPAR: Final = 6 + RPAR: Final = 7 + DONE: Final = 8 + + # Lookup Table for Next State + _goto: Final[Dict[Tuple[ParserState, NodeType], ParserState]] = { + # A string trailer may start with '.' OR '%'. + (START, token.DOT): DOT, + (START, token.PERCENT): PERCENT, + (START, DEFAULT_TOKEN): DONE, + # A '.' MUST be followed by an attribute or method name. + (DOT, token.NAME): NAME, + # A method name MUST be followed by an '(', whereas an attribute name + # is the last symbol in the string trailer. + (NAME, token.LPAR): LPAR, + (NAME, DEFAULT_TOKEN): DONE, + # A '%' symbol can be followed by an '(' or a single argument (e.g. a + # string or variable name). + (PERCENT, token.LPAR): LPAR, + (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, + # If a '%' symbol is followed by a single argument, that argument is + # the last leaf in the string trailer. + (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, + # If present, a ')' symbol is the last symbol in a string trailer. + # (NOTE: LPARS and nested RPARS are not included in this lookup table, + # since they are treated as a special case by the parsing logic in this + # classes' implementation.) + (RPAR, DEFAULT_TOKEN): DONE, + } + + def __init__(self) -> None: + self._state = self.START + self._unmatched_lpars = 0 + + def parse(self, leaves: List[Leaf], string_idx: int) -> int: + """ + Pre-conditions: + * @leaves[@string_idx].type == token.STRING + + Returns: + The index directly after the last leaf which is apart of the string + trailer, if a "trailer" exists. + OR + @string_idx + 1, if no string "trailer" exists. + """ + assert leaves[string_idx].type == token.STRING + + idx = string_idx + 1 + while idx < len(leaves) and self._next_state(leaves[idx]): + idx += 1 + return idx + + def _next_state(self, leaf: Leaf) -> bool: + """ + Pre-conditions: + * On the first call to this function, @leaf MUST be the leaf that + was directly after the string leaf in question (e.g. if our target + string is `line.leaves[i]` then the first call to this method must + be `line.leaves[i + 1]`). + * On the next call to this function, the leaf parameter passed in + MUST be the leaf directly following @leaf. + + Returns: + True iff @leaf is apart of the string's trailer. + """ + # We ignore empty LPAR or RPAR leaves. + if is_empty_par(leaf): + return True + + next_token = leaf.type + if next_token == token.LPAR: + self._unmatched_lpars += 1 + + current_state = self._state + + # The LPAR parser state is a special case. We will return True until we + # find the matching RPAR token. + if current_state == self.LPAR: + if next_token == token.RPAR: + self._unmatched_lpars -= 1 + if self._unmatched_lpars == 0: + self._state = self.RPAR + # Otherwise, we use a lookup table to determine the next state. + else: + # If the lookup table matches the current state to the next + # token, we use the lookup table. + if (current_state, next_token) in self._goto: + self._state = self._goto[current_state, next_token] + else: + # Otherwise, we check if a the current state was assigned a + # default. + if (current_state, self.DEFAULT_TOKEN) in self._goto: + self._state = self._goto[current_state, self.DEFAULT_TOKEN] + # If no default has been assigned, then this parser has a logic + # error. + else: + raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") + + if self._state == self.DONE: + return False + + return True + + +def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: + """ + Factory for a convenience function that is used to orphan @string_leaf + and then insert multiple new leaves into the same part of the node + structure that @string_leaf had originally occupied. + + Examples: + Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = + string_leaf.parent`. Assume the node `N` has the following + original structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(STRING, '"foo"'), + ] + ) + + We then run the code snippet shown below. + ``` + insert_str_child = insert_str_child_factory(string_leaf) + + lpar = Leaf(token.LPAR, '(') + insert_str_child(lpar) + + bar = Leaf(token.STRING, '"bar"') + insert_str_child(bar) + + rpar = Leaf(token.RPAR, ')') + insert_str_child(rpar) + ``` + + After which point, it follows that `string_leaf.parent is None` and + the node `N` now has the following structure: + + Node( + expr_stmt, [ + Leaf(NAME, 'x'), + Leaf(EQUAL, '='), + Leaf(LPAR, '('), + Leaf(STRING, '"bar"'), + Leaf(RPAR, ')'), + ] + ) + """ + string_parent = string_leaf.parent + string_child_idx = string_leaf.remove() + + def insert_str_child(child: LN) -> None: + nonlocal string_child_idx + + assert string_parent is not None + assert string_child_idx is not None + + string_parent.insert_child(string_child_idx, child) + string_child_idx += 1 + + return insert_str_child + + +def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: + """ + Examples: + ``` + my_list = [1, 2, 3] + + is_valid_index = is_valid_index_factory(my_list) + + assert is_valid_index(0) + assert is_valid_index(2) + + assert not is_valid_index(3) + assert not is_valid_index(-1) + ``` + """ + + def is_valid_index(idx: int) -> bool: + """ + Returns: + True iff @idx is positive AND seq[@idx] does NOT raise an + IndexError. + """ + return 0 <= idx < len(seq) + + return is_valid_index diff --git a/src/black_primer/cli.py b/src/black_primer/cli.py deleted file mode 100644 index b2d4159..0000000 --- a/src/black_primer/cli.py +++ /dev/null @@ -1,134 +0,0 @@ -# coding=utf8 - -import asyncio -import logging -import sys -from datetime import datetime -from pathlib import Path -from shutil import rmtree, which -from tempfile import gettempdir -from typing import Any, Union - -import click - -from black_primer import lib - - -DEFAULT_CONFIG = Path(__file__).parent / "primer.json" -_timestamp = datetime.now().strftime("%Y%m%d%H%M%S") -DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}" -LOG = logging.getLogger(__name__) - - -def _handle_debug( - ctx: click.core.Context, - param: Union[click.core.Option, click.core.Parameter], - debug: Union[bool, int, str], -) -> Union[bool, int, str]: - """Turn on debugging if asked otherwise INFO default""" - log_level = logging.DEBUG if debug else logging.INFO - logging.basicConfig( - format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)", - level=log_level, - ) - return debug - - -async def async_main( - config: str, - debug: bool, - keep: bool, - long_checkouts: bool, - rebase: bool, - workdir: str, - workers: int, -) -> int: - work_path = Path(workdir) - if not work_path.exists(): - LOG.debug(f"Creating {work_path}") - work_path.mkdir() - - if not which("black"): - LOG.error("Can not find 'black' executable in PATH. No point in running") - return -1 - - try: - ret_val = await lib.process_queue( - config, work_path, workers, keep, long_checkouts, rebase - ) - return int(ret_val) - finally: - if not keep and work_path.exists(): - LOG.debug(f"Removing {work_path}") - rmtree(work_path, onerror=lib.handle_PermissionError) - - return -2 - - -@click.command(context_settings={"help_option_names": ["-h", "--help"]}) -@click.option( - "-c", - "--config", - default=str(DEFAULT_CONFIG), - type=click.Path(exists=True), - show_default=True, - help="JSON config file path", -) -@click.option( - "--debug", - is_flag=True, - callback=_handle_debug, - show_default=True, - help="Turn on debug logging", -) -@click.option( - "-k", - "--keep", - is_flag=True, - show_default=True, - help="Keep workdir + repos post run", -) -@click.option( - "-L", - "--long-checkouts", - is_flag=True, - show_default=True, - help="Pull big projects to test", -) -@click.option( - "-R", - "--rebase", - is_flag=True, - show_default=True, - help="Rebase project if already checked out", -) -@click.option( - "-w", - "--workdir", - default=str(DEFAULT_WORKDIR), - type=click.Path(exists=False), - show_default=True, - help="Directory path for repo checkouts", -) -@click.option( - "-W", - "--workers", - default=2, - type=int, - show_default=True, - help="Number of parallel worker coroutines", -) -@click.pass_context -def main(ctx: click.core.Context, **kwargs: Any) -> None: - """primer - prime projects for blackening... 🏴""" - LOG.debug(f"Starting {sys.argv[0]}") - # TODO: Change to asyncio.run when Black >= 3.7 only - loop = asyncio.get_event_loop() - try: - ctx.exit(loop.run_until_complete(async_main(**kwargs))) - finally: - loop.close() - - -if __name__ == "__main__": # pragma: nocover - main() diff --git a/src/black_primer/lib.py b/src/black_primer/lib.py deleted file mode 100644 index ecc704f..0000000 --- a/src/black_primer/lib.py +++ /dev/null @@ -1,344 +0,0 @@ -import asyncio -import errno -import json -import logging -import os -import stat -import sys -from functools import partial -from pathlib import Path -from platform import system -from shutil import rmtree, which -from subprocess import CalledProcessError -from sys import version_info -from tempfile import TemporaryDirectory -from typing import Any, Callable, Dict, NamedTuple, Optional, Sequence, Tuple -from urllib.parse import urlparse - -import click - - -WINDOWS = system() == "Windows" -BLACK_BINARY = "black.exe" if WINDOWS else "black" -GIT_BINARY = "git.exe" if WINDOWS else "git" -LOG = logging.getLogger(__name__) - - -# Windows needs a ProactorEventLoop if you want to exec subprocesses -# Starting with 3.8 this is the default - can remove when Black >= 3.8 -# mypy only respects sys.platform if directly in the evaluation -# https://mypy.readthedocs.io/en/latest/common_issues.html#python-version-and-system-platform-checks # noqa: B950 -if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - -class Results(NamedTuple): - stats: Dict[str, int] = {} - failed_projects: Dict[str, CalledProcessError] = {} - - -async def _gen_check_output( - cmd: Sequence[str], - timeout: float = 300, - env: Optional[Dict[str, str]] = None, - cwd: Optional[Path] = None, -) -> Tuple[bytes, bytes]: - process = await asyncio.create_subprocess_exec( - *cmd, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - env=env, - cwd=cwd, - ) - try: - (stdout, stderr) = await asyncio.wait_for(process.communicate(), timeout) - except asyncio.TimeoutError: - process.kill() - await process.wait() - raise - - # A non-optional timeout was supplied to asyncio.wait_for, guaranteeing - # a timeout or completed process. A terminated Python process will have a - # non-empty returncode value. - assert process.returncode is not None - - if process.returncode != 0: - cmd_str = " ".join(cmd) - raise CalledProcessError( - process.returncode, cmd_str, output=stdout, stderr=stderr - ) - - return (stdout, stderr) - - -def analyze_results(project_count: int, results: Results) -> int: - failed_pct = round(((results.stats["failed"] / project_count) * 100), 2) - success_pct = round(((results.stats["success"] / project_count) * 100), 2) - - click.secho("-- primer results 📊 --\n", bold=True) - click.secho( - f"{results.stats['success']} / {project_count} succeeded ({success_pct}%) ✅", - bold=True, - fg="green", - ) - click.secho( - f"{results.stats['failed']} / {project_count} FAILED ({failed_pct}%) 💩", - bold=bool(results.stats["failed"]), - fg="red", - ) - s = "" if results.stats["disabled"] == 1 else "s" - click.echo(f" - {results.stats['disabled']} project{s} disabled by config") - s = "" if results.stats["wrong_py_ver"] == 1 else "s" - click.echo( - f" - {results.stats['wrong_py_ver']} project{s} skipped due to Python version" - ) - click.echo( - f" - {results.stats['skipped_long_checkout']} skipped due to long checkout" - ) - - if results.failed_projects: - click.secho("\nFailed projects:\n", bold=True) - - for project_name, project_cpe in results.failed_projects.items(): - print(f"## {project_name}:") - print(f" - Returned {project_cpe.returncode}") - if project_cpe.stderr: - print(f" - stderr:\n{project_cpe.stderr.decode('utf8')}") - if project_cpe.stdout: - print(f" - stdout:\n{project_cpe.stdout.decode('utf8')}") - print("") - - return results.stats["failed"] - - -async def black_run( - repo_path: Path, project_config: Dict[str, Any], results: Results -) -> None: - """Run Black and record failures""" - cmd = [str(which(BLACK_BINARY))] - if "cli_arguments" in project_config and project_config["cli_arguments"]: - cmd.extend(*project_config["cli_arguments"]) - cmd.extend(["--check", "--diff", "."]) - - with TemporaryDirectory() as tmp_path: - # Prevent reading top-level user configs by manipulating envionment variables - env = { - **os.environ, - "XDG_CONFIG_HOME": tmp_path, # Unix-like - "USERPROFILE": tmp_path, # Windows (changes `Path.home()` output) - } - - try: - _stdout, _stderr = await _gen_check_output(cmd, cwd=repo_path, env=env) - except asyncio.TimeoutError: - results.stats["failed"] += 1 - LOG.error(f"Running black for {repo_path} timed out ({cmd})") - except CalledProcessError as cpe: - # TODO: Tune for smarter for higher signal - # If any other return value than 1 we raise - can disable project in config - if cpe.returncode == 1: - if not project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - else: - results.stats["success"] += 1 - return - elif cpe.returncode > 1: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - return - - LOG.error(f"Unknown error with {repo_path}") - raise - - # If we get here and expect formatting changes something is up - if project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = CalledProcessError( - 0, cmd, b"Expected formatting changes but didn't get any!", b"" - ) - return - - results.stats["success"] += 1 - - -async def git_checkout_or_rebase( - work_path: Path, - project_config: Dict[str, Any], - rebase: bool = False, - *, - depth: int = 1, -) -> Optional[Path]: - """git Clone project or rebase""" - git_bin = str(which(GIT_BINARY)) - if not git_bin: - LOG.error("No git binary found") - return None - - repo_url_parts = urlparse(project_config["git_clone_url"]) - path_parts = repo_url_parts.path[1:].split("/", maxsplit=1) - - repo_path: Path = work_path / path_parts[1].replace(".git", "") - cmd = [git_bin, "clone", "--depth", str(depth), project_config["git_clone_url"]] - cwd = work_path - if repo_path.exists() and rebase: - cmd = [git_bin, "pull", "--rebase"] - cwd = repo_path - elif repo_path.exists(): - return repo_path - - try: - _stdout, _stderr = await _gen_check_output(cmd, cwd=cwd) - except (asyncio.TimeoutError, CalledProcessError) as e: - LOG.error(f"Unable to git clone / pull {project_config['git_clone_url']}: {e}") - return None - - return repo_path - - -def handle_PermissionError( - func: Callable, path: Path, exc: Tuple[Any, Any, Any] -) -> None: - """ - Handle PermissionError during shutil.rmtree. - - This checks if the erroring function is either 'os.rmdir' or 'os.unlink', and that - the error was EACCES (i.e. Permission denied). If true, the path is set writable, - readable, and executable by everyone. Finally, it tries the error causing delete - operation again. - - If the check is false, then the original error will be reraised as this function - can't handle it. - """ - excvalue = exc[1] - LOG.debug(f"Handling {excvalue} from {func.__name__}... ") - if func in (os.rmdir, os.unlink) and excvalue.errno == errno.EACCES: - LOG.debug(f"Setting {path} writable, readable, and executable by everyone... ") - os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # chmod 0777 - func(path) # Try the error causing delete operation again - else: - raise - - -async def load_projects_queue( - config_path: Path, -) -> Tuple[Dict[str, Any], asyncio.Queue]: - """Load project config and fill queue with all the project names""" - with config_path.open("r") as cfp: - config = json.load(cfp) - - # TODO: Offer more options here - # e.g. Run on X random packages or specific sub list etc. - project_names = sorted(config["projects"].keys()) - queue: asyncio.Queue = asyncio.Queue(maxsize=len(project_names)) - for project in project_names: - await queue.put(project) - - return config, queue - - -async def project_runner( - idx: int, - config: Dict[str, Any], - queue: asyncio.Queue, - work_path: Path, - results: Results, - long_checkouts: bool = False, - rebase: bool = False, - keep: bool = False, -) -> None: - """Check out project and run Black on it + record result""" - loop = asyncio.get_event_loop() - py_version = f"{version_info[0]}.{version_info[1]}" - while True: - try: - project_name = queue.get_nowait() - except asyncio.QueueEmpty: - LOG.debug(f"project_runner {idx} exiting") - return - LOG.debug(f"worker {idx} working on {project_name}") - - project_config = config["projects"][project_name] - - # Check if disabled by config - if "disabled" in project_config and project_config["disabled"]: - results.stats["disabled"] += 1 - LOG.info(f"Skipping {project_name} as it's disabled via config") - continue - - # Check if we should run on this version of Python - if ( - "all" not in project_config["py_versions"] - and py_version not in project_config["py_versions"] - ): - results.stats["wrong_py_ver"] += 1 - LOG.debug(f"Skipping {project_name} as it's not enabled for {py_version}") - continue - - # Check if we're doing big projects / long checkouts - if not long_checkouts and project_config["long_checkout"]: - results.stats["skipped_long_checkout"] += 1 - LOG.debug(f"Skipping {project_name} as it's configured as a long checkout") - continue - - repo_path = await git_checkout_or_rebase(work_path, project_config, rebase) - if not repo_path: - continue - await black_run(repo_path, project_config, results) - - if not keep: - LOG.debug(f"Removing {repo_path}") - rmtree_partial = partial( - rmtree, path=repo_path, onerror=handle_PermissionError - ) - await loop.run_in_executor(None, rmtree_partial) - - LOG.info(f"Finished {project_name}") - - -async def process_queue( - config_file: str, - work_path: Path, - workers: int, - keep: bool = False, - long_checkouts: bool = False, - rebase: bool = False, -) -> int: - """ - Process the queue with X workers and evaluate results - - Success is guaged via the config "expect_formatting_changes" - - Integer return equals the number of failed projects - """ - results = Results() - results.stats["disabled"] = 0 - results.stats["failed"] = 0 - results.stats["skipped_long_checkout"] = 0 - results.stats["success"] = 0 - results.stats["wrong_py_ver"] = 0 - - config, queue = await load_projects_queue(Path(config_file)) - project_count = queue.qsize() - s = "" if project_count == 1 else "s" - LOG.info(f"{project_count} project{s} to run Black over") - if project_count < 1: - return -1 - - s = "" if workers == 1 else "s" - LOG.debug(f"Using {workers} parallel worker{s} to run Black") - # Wait until we finish running all the projects before analyzing - await asyncio.gather( - *[ - project_runner( - i, config, queue, work_path, results, long_checkouts, rebase, keep - ) - for i in range(workers) - ] - ) - - LOG.info("Analyzing results") - return analyze_results(project_count, results) - - -if __name__ == "__main__": # pragma: nocover - raise NotImplementedError("lib is a library, funnily enough.") diff --git a/src/black_primer/primer.json b/src/black_primer/primer.json deleted file mode 100644 index 75469ad..0000000 --- a/src/black_primer/primer.json +++ /dev/null @@ -1,142 +0,0 @@ -{ - "configuration_format_version": 20200509, - "projects": { - "aioexabgp": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/cooperlees/aioexabgp.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "attrs": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python-attrs/attrs.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "bandersnatch": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/pypa/bandersnatch.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "channels": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/django/channels.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "django": { - "disabled_reason": "black --check --diff returned 123 on two files", - "disabled": true, - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/django/django.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "flake8-bugbear": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/PyCQA/flake8-bugbear.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "hypothesis": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/HypothesisWorks/hypothesis.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pandas": { - "disabled_reason": "black --check --diff returned 123 on one file", - "disabled": true, - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pandas-dev/pandas.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pillow": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python-pillow/Pillow.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "poetry": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python-poetry/poetry.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pyanalyze": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/quora/pyanalyze.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pyramid": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/Pylons/pyramid.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "ptr": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/facebookincubator/ptr.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pytest": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/pytest-dev/pytest.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "sqlalchemy": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/sqlalchemy/sqlalchemy.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "tox": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/tox-dev/tox.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "typeshed": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python/typeshed.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "virtualenv": { - "cli_arguments": [], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/pypa/virtualenv.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "warehouse": { - "cli_arguments": [], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pypa/warehouse.git", - "long_checkout": false, - "py_versions": ["all"] - } - } -} diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py index fc68473..ba4750b 100644 --- a/src/blackd/__init__.py +++ b/src/blackd/__init__.py @@ -1,6 +1,5 @@ import asyncio import logging -import sys from concurrent.futures import Executor, ProcessPoolExecutor from datetime import datetime from functools import partial @@ -9,20 +8,20 @@ try: from aiohttp import web - import aiohttp_cors + + from .middlewares import cors except ImportError as ie: - print( + raise ImportError( f"aiohttp dependency is not installed: {ie}. " + "Please re-install black with the '[d]' extra install " - + "to obtain aiohttp_cors: `pip install black[d]`", - file=sys.stderr, - ) - sys.exit(-1) + + "to obtain aiohttp_cors: `pip install black[d]`" + ) from None -import black import click +import black from _black_version import version as __version__ +from black.concurrency import maybe_install_uvloop # This is used internally by tests to shut down the server prematurely _stop_signal = asyncio.Event() @@ -31,8 +30,10 @@ PROTOCOL_VERSION_HEADER = "X-Protocol-Version" LINE_LENGTH_HEADER = "X-Line-Length" PYTHON_VARIANT_HEADER = "X-Python-Variant" +SKIP_SOURCE_FIRST_LINE = "X-Skip-Source-First-Line" SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization" SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma" +PREVIEW = "X-Preview" FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe" DIFF_HEADER = "X-Diff" @@ -40,8 +41,10 @@ PROTOCOL_VERSION_HEADER, LINE_LENGTH_HEADER, PYTHON_VARIANT_HEADER, + SKIP_SOURCE_FIRST_LINE, SKIP_STRING_NORMALIZATION_HEADER, SKIP_MAGIC_TRAILING_COMMA, + PREVIEW, FAST_OR_SAFE_HEADER, DIFF_HEADER, ] @@ -69,20 +72,11 @@ def main(bind_host: str, bind_port: int) -> None: def make_app() -> web.Application: - app = web.Application() - executor = ProcessPoolExecutor() - - cors = aiohttp_cors.setup(app) - resource = cors.add(app.router.add_resource("/")) - cors.add( - resource.add_route("POST", partial(handle, executor=executor)), - { - "*": aiohttp_cors.ResourceOptions( - allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*" - ) - }, + app = web.Application( + middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))] ) - + executor = ProcessPoolExecutor() + app.add_routes([web.post("/", partial(handle, executor=executor))]) return app @@ -119,6 +113,10 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: skip_magic_trailing_comma = bool( request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False) ) + skip_source_first_line = bool( + request.headers.get(SKIP_SOURCE_FIRST_LINE, False) + ) + preview = bool(request.headers.get(PREVIEW, False)) fast = False if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast": fast = True @@ -126,19 +124,38 @@ async def handle(request: web.Request, executor: Executor) -> web.Response: target_versions=versions, is_pyi=pyi, line_length=line_length, + skip_source_first_line=skip_source_first_line, string_normalization=not skip_string_normalization, magic_trailing_comma=not skip_magic_trailing_comma, + preview=preview, ) req_bytes = await request.content.read() charset = request.charset if request.charset is not None else "utf8" req_str = req_bytes.decode(charset) then = datetime.utcnow() + header = "" + if skip_source_first_line: + first_newline_position: int = req_str.find("\n") + 1 + header = req_str[:first_newline_position] + req_str = req_str[first_newline_position:] + loop = asyncio.get_event_loop() formatted_str = await loop.run_in_executor( executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode) ) + # Preserve CRLF line endings + if req_str[req_str.find("\n") - 1] == "\r": + formatted_str = formatted_str.replace("\n", "\r\n") + # If, after swapping line endings, nothing changed, then say so + if formatted_str == req_str: + raise black.NothingChanged + + # Put the source first line back + req_str = header + req_str + formatted_str = header + formatted_str + # Only output the diff in the HTTP response only_diff = bool(request.headers.get(DIFF_HEADER, False)) if only_diff: @@ -185,10 +202,8 @@ def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersi raise InvalidVariantHeader("major version must be 2 or 3") if len(rest) > 0: minor = int(rest[0]) - if major == 2 and minor != 7: - raise InvalidVariantHeader( - "minor version must be 7 for Python 2" - ) + if major == 2: + raise InvalidVariantHeader("Python 2 is not supported") else: # Default to lowest supported minor version. minor = 7 if major == 2 else 3 @@ -197,11 +212,12 @@ def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersi raise InvalidVariantHeader(f"3.{minor} is not supported") versions.add(black.TargetVersion[version_str]) except (KeyError, ValueError): - raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") + raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None return False, versions def patched_main() -> None: + maybe_install_uvloop() freeze_support() black.patch_click() main() diff --git a/src/blackd/__main__.py b/src/blackd/__main__.py new file mode 100644 index 0000000..b5a4b13 --- /dev/null +++ b/src/blackd/__main__.py @@ -0,0 +1,3 @@ +import blackd + +blackd.patched_main() diff --git a/src/blackd/middlewares.py b/src/blackd/middlewares.py new file mode 100644 index 0000000..370e0ae --- /dev/null +++ b/src/blackd/middlewares.py @@ -0,0 +1,45 @@ +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, TypeVar + +from aiohttp.web_request import Request +from aiohttp.web_response import StreamResponse + +if TYPE_CHECKING: + F = TypeVar("F", bound=Callable[..., Any]) + middleware: Callable[[F], F] +else: + try: + from aiohttp.web_middlewares import middleware + except ImportError: + # @middleware is deprecated and its behaviour is the default since aiohttp 4.0 + # so if it doesn't exist anymore, define a no-op for forward compatibility. + middleware = lambda x: x # noqa: E731 + +Handler = Callable[[Request], Awaitable[StreamResponse]] +Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] + + +def cors(allow_headers: Iterable[str]) -> Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + is_options = request.method == "OPTIONS" + is_preflight = is_options and "Access-Control-Request-Method" in request.headers + if is_preflight: + resp = StreamResponse() + else: + resp = await handler(request) + + origin = request.headers.get("Origin") + if not origin: + return resp + + resp.headers["Access-Control-Allow-Origin"] = "*" + resp.headers["Access-Control-Expose-Headers"] = "*" + if is_options: + resp.headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers) + resp.headers["Access-Control-Allow-Methods"] = ", ".join( + ("OPTIONS", "POST") + ) + + return resp + + return impl diff --git a/src/blib2to3/Grammar.txt b/src/blib2to3/Grammar.txt index 69b9af9..ac7ad76 100644 --- a/src/blib2to3/Grammar.txt +++ b/src/blib2to3/Grammar.txt @@ -24,7 +24,7 @@ parameters: '(' [typedargslist] ')' # arguments = argument (',' argument)* # argument = tfpdef ['=' test] # kwargs = '**' tname [','] -# args = '*' [tname] +# args = '*' [tname_star] # kwonly_kwargs = (',' argument)* [',' [kwargs]] # args_kwonly_kwargs = args kwonly_kwargs | kwargs # poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] @@ -34,14 +34,15 @@ parameters: '(' [typedargslist] ')' # It needs to be fully expanded to allow our LL(1) parser to work on it. typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ - ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + ',' [((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] - ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* + ] | ((tfpdef ['=' test] ',')* ('*' [tname_star] (',' tname ['=' test])* [',' ['**' tname [',']]] | '**' tname [',']) | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) tname: NAME [':' test] +tname_star: NAME [':' (test|star_expr)] tfpdef: tname | '(' tfplist ')' tfplist: tfpdef (',' tfpdef)* [','] @@ -105,21 +106,20 @@ global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt | match_stmt async_stmt: ASYNC (funcdef | with_stmt | for_stmt) if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] -for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist_star_expr ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite -with_item: test ['as' expr] -with_var: 'as' expr +with_stmt: 'with' asexpr_test (',' asexpr_test)* ':' suite + # NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] +except_clause: 'except' ['*'] [test [(',' | 'as') test]] suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: @@ -131,7 +131,15 @@ testlist_safe: old_test [(',' old_test)+ [',']] old_test: or_test | old_lambdef old_lambdef: 'lambda' [varargslist] ':' old_test -namedexpr_test: test [':=' test] +namedexpr_test: asexpr_test [':=' asexpr_test] + +# This is actually not a real rule, though since the parser is very +# limited in terms of the strategy about match/case rules, we are inserting +# a virtual case ( as ) as a valid expression. Unless a better +# approach is thought, the only side effect of this seem to be just allowing +# more stuff to be parser (which would fail on the ast). +asexpr_test: test ['as' test] + test: or_test ['if' or_test 'else' test] | lambdef or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* @@ -156,15 +164,15 @@ listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME -subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] +subscriptlist: (subscript|star_expr) (',' (subscript|star_expr))* [','] +subscript: test [':=' test] | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] -dictsetmaker: ( ((test ':' test | '**' expr) - (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) +dictsetmaker: ( ((test ':' asexpr_test | '**' expr) + (comp_for | (',' (test ':' asexpr_test | '**' expr))* [','])) | + ((test [':=' test] | star_expr) + (comp_for | (',' (test [':=' test] | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite @@ -179,7 +187,8 @@ arglist: argument (',' argument)* [','] # that precede iterable unpackings are blocked; etc. argument: ( test [comp_for] | test ':=' test | - test '=' test | + test 'as' test | + test '=' asexpr_test | '**' test | '*' test ) @@ -213,3 +222,31 @@ encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist_star_expr + + +# 3.10 match statement definition + +# PS: normally the grammar is much much more restricted, but +# at this moment for not trying to bother much with encoding the +# exact same DSL in a LL(1) parser, we will just accept an expression +# and let the ast.parse() step of the safe mode to reject invalid +# grammar. + +# The reason why it is more restricted is that, patterns are some +# sort of a DSL (more advanced than our LHS on assignments, but +# still in a very limited python subset). They are not really +# expressions, but who cares. If we can parse them, that is enough +# to reformat them. + +match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT + +# This is more permissive than the actual version. For example it +# accepts `match *something:`, even though single-item starred expressions +# are forbidden. +subject_expr: (namedexpr_test|star_expr) (',' (namedexpr_test|star_expr))* [','] + +# cases +case_block: "case" patterns [guard] ':' suite +guard: 'if' namedexpr_test +patterns: pattern (',' pattern)* [','] +pattern: (expr|star_expr) ['as' expr] diff --git a/src/blib2to3/README b/src/blib2to3/README index a43f15c..0d3c607 100644 --- a/src/blib2to3/README +++ b/src/blib2to3/README @@ -13,4 +13,11 @@ Reasons for forking: - ability to Cythonize Change Log: -- Changes default logger used by Driver \ No newline at end of file +- Changes default logger used by Driver +- Backported the following upstream parser changes: + - "bpo-42381: Allow walrus in set literals and set comprehensions (GH-23332)" + https://github.com/python/cpython/commit/cae60187cf7a7b26281d012e1952fafe4e2e97e9 + - "bpo-42316: Allow unparenthesized walrus operator in indexes (GH-23317)" + https://github.com/python/cpython/commit/b0aba1fcdc3da952698d99aec2334faa79a8b68c +- Tweaks to help mypyc compile faster code (including inlining type information, + "Final-ing", etc.) diff --git a/src/blib2to3/pgen2/conv.py b/src/blib2to3/pgen2/conv.py index 7816521..fa9825e 100644 --- a/src/blib2to3/pgen2/conv.py +++ b/src/blib2to3/pgen2/conv.py @@ -29,7 +29,7 @@ """ # Python imports -import regex as re +import re # Local imports from pgen2 import grammar, token diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index af1dc6b..daf271d 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -23,48 +23,121 @@ import sys from typing import ( Any, + cast, IO, Iterable, List, Optional, Text, + Iterator, Tuple, + TypeVar, + Generic, Union, ) +from contextlib import contextmanager +from dataclasses import dataclass, field # Pgen imports from . import grammar, parse, token, tokenize, pgen from logging import Logger -from blib2to3.pytree import _Convert, NL +from blib2to3.pytree import NL from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.tokenize import GoodTokenInfo Path = Union[str, "os.PathLike[str]"] +@dataclass +class ReleaseRange: + start: int + end: Optional[int] = None + tokens: List[Any] = field(default_factory=list) + + def lock(self) -> None: + total_eaten = len(self.tokens) + self.end = self.start + total_eaten + + +class TokenProxy: + def __init__(self, generator: Any) -> None: + self._tokens = generator + self._counter = 0 + self._release_ranges: List[ReleaseRange] = [] + + @contextmanager + def release(self) -> Iterator["TokenProxy"]: + release_range = ReleaseRange(self._counter) + self._release_ranges.append(release_range) + try: + yield self + finally: + # Lock the last release range to the final position that + # has been eaten. + release_range.lock() + + def eat(self, point: int) -> Any: + eaten_tokens = self._release_ranges[-1].tokens + if point < len(eaten_tokens): + return eaten_tokens[point] + else: + while point >= len(eaten_tokens): + token = next(self._tokens) + eaten_tokens.append(token) + return token + + def __iter__(self) -> "TokenProxy": + return self + + def __next__(self) -> Any: + # If the current position is already compromised (looked up) + # return the eaten token, if not just go further on the given + # token producer. + for release_range in self._release_ranges: + assert release_range.end is not None + + start, end = release_range.start, release_range.end + if start <= self._counter < end: + token = release_range.tokens[self._counter - start] + break + else: + token = next(self._tokens) + self._counter += 1 + return token + + def can_advance(self, to: int) -> bool: + # Try to eat, fail if it can't. The eat operation is cached + # so there wont be any additional cost of eating here + try: + self.eat(to) + except StopIteration: + return False + else: + return True + + class Driver(object): - def __init__( - self, - grammar: Grammar, - convert: Optional[_Convert] = None, - logger: Optional[Logger] = None, - ) -> None: + def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: self.grammar = grammar if logger is None: logger = logging.getLogger(__name__) self.logger = logger - self.convert = convert - def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: + def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL: """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. - p = parse.Parser(self.grammar, self.convert) - p.setup() + proxy = TokenProxy(tokens) + + p = parse.Parser(self.grammar) + p.setup(proxy=proxy) + lineno = 1 column = 0 - indent_columns = [] + indent_columns: List[int] = [] type = value = start = end = line_text = None prefix = "" - for quintuple in tokens: + + for quintuple in proxy: type, value, start, end, line_text = quintuple if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) @@ -86,6 +159,7 @@ def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: if type == token.OP: type = grammar.opmap[value] if debug: + assert type is not None self.logger.debug( "%s %r (prefix=%r)", token.tok_name[type], value, prefix ) @@ -97,7 +171,7 @@ def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: elif type == token.DEDENT: _indent_col = indent_columns.pop() prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col) - if p.addtoken(type, value, (prefix, start)): + if p.addtoken(cast(int, type), value, (prefix, start)): if debug: self.logger.debug("Stop.") break @@ -189,14 +263,13 @@ def load_grammar( logger = logging.getLogger(__name__) gp = _generate_pickle_name(gt) if gp is None else gp if force or not _newer(gp, gt): - logger.info("Generating grammar tables from %s", gt) g: grammar.Grammar = pgen.generate_grammar(gt) if save: - logger.info("Writing grammar tables to %s", gp) try: g.dump(gp) - except OSError as e: - logger.info("Writing failed: %s", e) + except OSError: + # Ignore error, caching is not vital. + pass else: g = grammar.Grammar() g.load(gp) diff --git a/src/blib2to3/pgen2/grammar.py b/src/blib2to3/pgen2/grammar.py index 2882cda..337a64f 100644 --- a/src/blib2to3/pgen2/grammar.py +++ b/src/blib2to3/pgen2/grammar.py @@ -89,8 +89,10 @@ def __init__(self) -> None: self.dfas: Dict[int, DFAS] = {} self.labels: List[Label] = [(0, "EMPTY")] self.keywords: Dict[str, int] = {} + self.soft_keywords: Dict[str, int] = {} self.tokens: Dict[int, int] = {} self.symbol2label: Dict[str, int] = {} + self.version: Tuple[int, int] = (0, 0) self.start = 256 # Python 3.7+ parses async as a keyword, not an identifier self.async_keywords = False @@ -136,6 +138,7 @@ def copy(self: _P) -> _P: "number2symbol", "dfas", "keywords", + "soft_keywords", "tokens", "symbol2label", ): @@ -143,6 +146,7 @@ def copy(self: _P) -> _P: new.labels = self.labels[:] new.states = self.states[:] new.start = self.start + new.version = self.version new.async_keywords = self.async_keywords return new diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index 47c8f02..d6deaac 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -9,21 +9,30 @@ how this parsing engine works. """ +import copy +from contextlib import contextmanager # Local imports -from . import token +from . import grammar, token, tokenize from typing import ( + cast, + Any, Optional, Text, Union, Tuple, Dict, List, + Iterator, Callable, Set, + TYPE_CHECKING, ) from blib2to3.pgen2.grammar import Grammar -from blib2to3.pytree import NL, Context, RawNode, Leaf, Node +from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node + +if TYPE_CHECKING: + from blib2to3.driver import TokenProxy Results = Dict[Text, NL] @@ -37,6 +46,87 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL: return Node(type=node[0], children=node[3], context=node[2]) +# A placeholder node, used when parser is backtracking. +DUMMY_NODE = (-1, None, None, None) + + +def stack_copy( + stack: List[Tuple[DFAS, int, RawNode]] +) -> List[Tuple[DFAS, int, RawNode]]: + """Nodeless stack copy.""" + return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack] + + +class Recorder: + def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None: + self.parser = parser + self._ilabels = ilabels + self.context = context # not really matter + + self._dead_ilabels: Set[int] = set() + self._start_point = self.parser.stack + self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels} + + @property + def ilabels(self) -> Set[int]: + return self._dead_ilabels.symmetric_difference(self._ilabels) + + @contextmanager + def switch_to(self, ilabel: int) -> Iterator[None]: + with self.backtrack(): + self.parser.stack = self._points[ilabel] + try: + yield + except ParseError: + self._dead_ilabels.add(ilabel) + finally: + self.parser.stack = self._start_point + + @contextmanager + def backtrack(self) -> Iterator[None]: + """ + Use the node-level invariant ones for basic parsing operations (push/pop/shift). + These still will operate on the stack; but they won't create any new nodes, or + modify the contents of any other existing nodes. + + This saves us a ton of time when we are backtracking, since we + want to restore to the initial state as quick as possible, which + can only be done by having as little mutatations as possible. + """ + is_backtracking = self.parser.is_backtracking + try: + self.parser.is_backtracking = True + yield + finally: + self.parser.is_backtracking = is_backtracking + + def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + func: Callable[..., Any] + if raw: + func = self.parser._addtoken + else: + func = self.parser.addtoken + + for ilabel in self.ilabels: + with self.switch_to(ilabel): + args = [tok_type, tok_val, self.context] + if raw: + args.insert(0, ilabel) + func(*args) + + def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]: + alive_ilabels = self.ilabels + if len(alive_ilabels) == 0: + *_, most_successful_ilabel = self._dead_ilabels + raise ParseError("bad input", most_successful_ilabel, value, self.context) + + ilabel, *rest = alive_ilabels + if force or not rest: + return ilabel + else: + return None + + class ParseError(Exception): """Exception to signal the parser is stuck.""" @@ -100,6 +190,11 @@ def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None: to be converted. The syntax tree is converted from the bottom up. + **post-note: the convert argument is ignored since for Black's + usage, convert will always be blib2to3.pytree.convert. Allowing + this to be dynamic hurts mypyc's ability to use early binding. + These docs are left for historical and informational value. + A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is @@ -112,9 +207,11 @@ def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None: """ self.grammar = grammar + # See note in docstring above. TL;DR this is ignored. self.convert = convert or lam_sub + self.is_backtracking = False - def setup(self, start: Optional[int] = None) -> None: + def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: """Prepare for parsing. This *must* be called before starting to parse. @@ -137,11 +234,57 @@ def setup(self, start: Optional[int] = None) -> None: self.stack: List[Tuple[DFAS, int, RawNode]] = [stackentry] self.rootnode: Optional[NL] = None self.used_names: Set[str] = set() + self.proxy = proxy - def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: + def addtoken(self, type: int, value: Text, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label - ilabel = self.classify(type, value, context) + ilabels = self.classify(type, value, context) + assert len(ilabels) >= 1 + + # If we have only one state to advance, we'll directly + # take it as is. + if len(ilabels) == 1: + [ilabel] = ilabels + return self._addtoken(ilabel, type, value, context) + + # If there are multiple states which we can advance (only + # happen under soft-keywords), then we will try all of them + # in parallel and as soon as one state can reach further than + # the rest, we'll choose that one. This is a pretty hacky + # and hopefully temporary algorithm. + # + # For a more detailed explanation, check out this post: + # https://tree.science/what-the-backtracking.html + + with self.proxy.release() as proxy: + counter, force = 0, False + recorder = Recorder(self, ilabels, context) + recorder.add_token(type, value, raw=True) + + next_token_value = value + while recorder.determine_route(next_token_value) is None: + if not proxy.can_advance(counter): + force = True + break + + next_token_type, next_token_value, *_ = proxy.eat(counter) + if next_token_type in (tokenize.COMMENT, tokenize.NL): + counter += 1 + continue + + if next_token_type == tokenize.OP: + next_token_type = grammar.opmap[next_token_value] + + recorder.add_token(next_token_type, next_token_value) + counter += 1 + + ilabel = cast(int, recorder.determine_route(next_token_value, force=force)) + assert ilabel is not None + + return self._addtoken(ilabel, type, value, context) + + def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] @@ -149,10 +292,18 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: arcs = states[state] # Look for a state with this label for i, newstate in arcs: - t, v = self.grammar.labels[i] - if ilabel == i: + t = self.grammar.labels[i][0] + if t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self.grammar.dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, itsdfa, newstate, context) + break # To continue the outer while loop + + elif ilabel == i: # Look it up in the list of labels - assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state @@ -166,14 +317,7 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: states, first = dfa # Done with this token return False - elif t >= 256: - # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] - itsstates, itsfirst = itsdfa - if ilabel in itsfirst: - # Push a symbol - self.push(t, self.grammar.dfas[t], newstate, context) - break # To continue the outer while loop + else: if (0, state) in arcs: # An accepting state, pop it and try something else @@ -185,47 +329,61 @@ def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool: # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: Optional[Text], context: Context) -> int: - """Turn a token into a label. (Internal)""" + def classify(self, type: int, value: Text, context: Context) -> List[int]: + """Turn a token into a label. (Internal) + + Depending on whether the value is a soft-keyword or not, + this function may return multiple labels to choose from.""" if type == token.NAME: # Keep a listing of all used names - assert value is not None self.used_names.add(value) # Check for reserved words - ilabel = self.grammar.keywords.get(value) - if ilabel is not None: - return ilabel + if value in self.grammar.keywords: + return [self.grammar.keywords[value]] + elif value in self.grammar.soft_keywords: + assert type in self.grammar.tokens + return [ + self.grammar.soft_keywords[value], + self.grammar.tokens[type], + ] + ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) - return ilabel + return [ilabel] - def shift( - self, type: int, value: Optional[Text], newstate: int, context: Context - ) -> None: + def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - assert value is not None - assert context is not None - rawnode: RawNode = (type, value, context, None) - newnode = self.convert(self.grammar, rawnode) - if newnode is not None: + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + else: + dfa, state, node = self.stack[-1] + rawnode: RawNode = (type, value, context, None) + newnode = convert(self.grammar, rawnode) assert node[-1] is not None node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) + self.stack[-1] = (dfa, newstate, node) def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None: """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode: RawNode = (type, None, context, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + self.stack.append((newdfa, 0, DUMMY_NODE)) + else: + dfa, state, node = self.stack[-1] + newnode: RawNode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) def pop(self) -> None: """Pop a nonterminal. (Internal)""" - popdfa, popstate, popnode = self.stack.pop() - newnode = self.convert(self.grammar, popnode) - if newnode is not None: + if self.is_backtracking: + self.stack.pop() + else: + popdfa, popstate, popnode = self.stack.pop() + newnode = convert(self.grammar, popnode) if self.stack: dfa, state, node = self.stack[-1] assert node[-1] is not None diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 564ebbd..631682a 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -115,12 +115,17 @@ def make_label(self, c: PgenGrammar, label: Text) -> int: assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): + if label[0] == '"': + keywords = c.soft_keywords + else: + keywords = c.keywords + # A keyword - if value in c.keywords: - return c.keywords[value] + if value in keywords: + return keywords[value] else: c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel + keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index bad79b2..257dbef 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -27,6 +27,7 @@ function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" +import sys from typing import ( Callable, Iterable, @@ -39,13 +40,19 @@ Union, cast, ) + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar __author__ = "Ka-Ping Yee " __credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" -import regex as re +import re from codecs import BOM_UTF8, lookup from blib2to3.pgen2.token import * @@ -79,7 +86,7 @@ def _combinations(*l): Comment = r"#[^\r\n]*" Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment) Name = ( # this is invalid but it's fine because Name comes after Number in all groups - r"\w+" + r"[^\s#\(\)\[\]\{\}+\-*/!@$%^&=|;:'\",\.<>/?`~\\]+" ) Binnumber = r"0[bB]_?[01]+(?:_[01]+)*" @@ -139,7 +146,7 @@ def _combinations(*l): PseudoExtras = group(r"\\\r?\n", Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) -pseudoprog = re.compile(PseudoToken, re.UNICODE) +pseudoprog: Final = re.compile(PseudoToken, re.UNICODE) single3prog = re.compile(Single3) double3prog = re.compile(Double3) @@ -149,7 +156,7 @@ def _combinations(*l): | {"u", "U", "ur", "uR", "Ur", "UR"} ) -endprogs = { +endprogs: Final = { "'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, @@ -159,12 +166,12 @@ def _combinations(*l): **{prefix: None for prefix in _strprefixes}, } -triple_quoted = ( +triple_quoted: Final = ( {"'''", '"""'} | {f"{prefix}'''" for prefix in _strprefixes} | {f'{prefix}"""' for prefix in _strprefixes} ) -single_quoted = ( +single_quoted: Final = ( {"'", '"'} | {f"{prefix}'" for prefix in _strprefixes} | {f'{prefix}"' for prefix in _strprefixes} @@ -286,7 +293,7 @@ def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII) -blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) +blank_re = re.compile(rb"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) def _get_normal_name(orig_enc: str) -> str: @@ -418,7 +425,7 @@ def generate_tokens( logical line; continuation lines are included. """ lnum = parenlev = continued = 0 - numchars = "0123456789" + numchars: Final = "0123456789" contstr, needcont = "", 0 contline: Optional[str] = None indents = [0] @@ -427,7 +434,7 @@ def generate_tokens( # `await` as keywords. async_keywords = False if grammar is None else grammar.async_keywords # 'stashed' and 'async_*' are used for async/await parsing - stashed = None + stashed: Optional[GoodTokenInfo] = None async_def = False async_def_indent = 0 async_def_nl = False @@ -440,7 +447,7 @@ def generate_tokens( line = readline() except StopIteration: line = "" - lnum = lnum + 1 + lnum += 1 pos, max = 0, len(line) if contstr: # continued string @@ -481,14 +488,14 @@ def generate_tokens( column = 0 while pos < max: # measure leading whitespace if line[pos] == " ": - column = column + 1 + column += 1 elif line[pos] == "\t": column = (column // tabsize + 1) * tabsize elif line[pos] == "\f": column = 0 else: break - pos = pos + 1 + pos += 1 if pos == max: break @@ -507,7 +514,7 @@ def generate_tokens( COMMENT, comment_token, (lnum, pos), - (lnum, pos + len(comment_token)), + (lnum, nl_pos), line, ) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) @@ -652,16 +659,16 @@ def generate_tokens( continued = 1 else: if initial in "([{": - parenlev = parenlev + 1 + parenlev += 1 elif initial in ")]}": - parenlev = parenlev - 1 + parenlev -= 1 if stashed: yield stashed stashed = None yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) - pos = pos + 1 + pos += 1 if stashed: yield stashed diff --git a/src/blib2to3/pygram.py b/src/blib2to3/pygram.py index b8362b8..99012cd 100644 --- a/src/blib2to3/pygram.py +++ b/src/blib2to3/pygram.py @@ -39,12 +39,14 @@ class _python_symbols(Symbols): arglist: int argument: int arith_expr: int + asexpr_test: int assert_stmt: int async_funcdef: int async_stmt: int atom: int augassign: int break_stmt: int + case_block: int classdef: int comp_for: int comp_if: int @@ -74,6 +76,7 @@ class _python_symbols(Symbols): for_stmt: int funcdef: int global_stmt: int + guard: int if_stmt: int import_as_name: int import_as_names: int @@ -82,6 +85,7 @@ class _python_symbols(Symbols): import_stmt: int lambdef: int listmaker: int + match_stmt: int namedexpr_test: int not_test: int old_comp_for: int @@ -92,6 +96,8 @@ class _python_symbols(Symbols): or_test: int parameters: int pass_stmt: int + pattern: int + patterns: int power: int print_stmt: int raise_stmt: int @@ -101,6 +107,7 @@ class _python_symbols(Symbols): single_input: int sliceop: int small_stmt: int + subject_expr: int star_expr: int stmt: int subscript: int @@ -116,6 +123,7 @@ class _python_symbols(Symbols): tfpdef: int tfplist: int tname: int + tname_star: int trailer: int try_stmt: int typedargslist: int @@ -124,9 +132,7 @@ class _python_symbols(Symbols): vfplist: int vname: int while_stmt: int - with_item: int with_stmt: int - with_var: int xor_expr: int yield_arg: int yield_expr: int @@ -149,6 +155,7 @@ class _pattern_symbols(Symbols): python_grammar_no_print_statement_no_exec_statement_async_keywords: Grammar python_grammar_no_exec_statement: Grammar pattern_grammar: Grammar +python_grammar_soft_keywords: Grammar python_symbols: _python_symbols pattern_symbols: _pattern_symbols @@ -159,6 +166,7 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: global python_grammar_no_print_statement global python_grammar_no_print_statement_no_exec_statement global python_grammar_no_print_statement_no_exec_statement_async_keywords + global python_grammar_soft_keywords global python_symbols global pattern_grammar global pattern_symbols @@ -171,6 +179,10 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: # Python 2 python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE, cache_dir) + python_grammar.version = (2, 0) + + soft_keywords = python_grammar.soft_keywords.copy() + python_grammar.soft_keywords.clear() python_symbols = _python_symbols(python_grammar) @@ -182,6 +194,7 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: python_grammar_no_print_statement_no_exec_statement = python_grammar.copy() del python_grammar_no_print_statement_no_exec_statement.keywords["print"] del python_grammar_no_print_statement_no_exec_statement.keywords["exec"] + python_grammar_no_print_statement_no_exec_statement.version = (3, 0) # Python 3.7+ python_grammar_no_print_statement_no_exec_statement_async_keywords = ( @@ -190,6 +203,14 @@ def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None: python_grammar_no_print_statement_no_exec_statement_async_keywords.async_keywords = ( True ) + python_grammar_no_print_statement_no_exec_statement_async_keywords.version = (3, 7) + + # Python 3.10+ + python_grammar_soft_keywords = ( + python_grammar_no_print_statement_no_exec_statement_async_keywords.copy() + ) + python_grammar_soft_keywords.soft_keywords = soft_keywords + python_grammar_soft_keywords.version = (3, 10) pattern_grammar = driver.load_packaged_grammar( "blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 7843467..15a1420 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -10,11 +10,10 @@ There's also a pattern matching implementation here. """ -# mypy: allow-untyped-defs +# mypy: allow-untyped-defs, allow-incomplete-defs from typing import ( Any, - Callable, Dict, Iterator, List, @@ -52,7 +51,7 @@ def type_repr(type_num: int) -> Union[Text, int]: return _type_reprs.setdefault(type_num, type_num) -_P = TypeVar("_P") +_P = TypeVar("_P", bound="Base") NL = Union["Node", "Leaf"] Context = Tuple[Text, Tuple[int, int]] @@ -92,8 +91,6 @@ def __eq__(self, other: Any) -> bool: return NotImplemented return self._eq(other) - __hash__ = None # type: Any # For Py3 compatibility. - @property def prefix(self) -> Text: raise NotImplementedError @@ -109,6 +106,9 @@ def _eq(self: _P, other: _P) -> bool: """ raise NotImplementedError + def __deepcopy__(self: _P, memo: Any) -> _P: + return self.clone() + def clone(self: _P) -> _P: """ Return a cloned (deep) copy of self. @@ -291,7 +291,7 @@ def __str__(self) -> Text: """ return "".join(map(str, self.children)) - def _eq(self, other) -> bool: + def _eq(self, other: Base) -> bool: """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) @@ -326,7 +326,7 @@ def prefix(self) -> Text: return self.children[0].prefix @prefix.setter - def prefix(self, prefix) -> None: + def prefix(self, prefix: Text) -> None: if self.children: self.children[0].prefix = prefix @@ -386,7 +386,8 @@ class Leaf(Base): value: Text fixers_applied: List[Any] bracket_depth: int - opening_bracket: "Leaf" + # Changed later in brackets.py + opening_bracket: Optional["Leaf"] = None used_names: Optional[Set[Text]] _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input @@ -399,6 +400,7 @@ def __init__( context: Optional[Context] = None, prefix: Optional[Text] = None, fixers_applied: List[Any] = [], + opening_bracket: Optional["Leaf"] = None, ) -> None: """ Initializer. @@ -416,6 +418,7 @@ def __init__( self._prefix = prefix self.fixers_applied: Optional[List[Any]] = fixers_applied[:] self.children = [] + self.opening_bracket = opening_bracket def __repr__(self) -> str: """Return a canonical string representation.""" @@ -434,9 +437,9 @@ def __str__(self) -> Text: This reproduces the input source exactly. """ - return self.prefix + str(self.value) + return self._prefix + str(self.value) - def _eq(self, other) -> bool: + def _eq(self, other: "Leaf") -> bool: """Compare two nodes for equality.""" return (self.type, self.value) == (other.type, other.value) @@ -469,7 +472,7 @@ def prefix(self) -> Text: return self._prefix @prefix.setter - def prefix(self, prefix) -> None: + def prefix(self, prefix: Text) -> None: self.changed() self._prefix = prefix @@ -615,7 +618,7 @@ def __init__( self.content = content self.name = name - def match(self, node: NL, results=None): + def match(self, node: NL, results=None) -> bool: """Override match() to insist on a leaf node.""" if not isinstance(node, Leaf): return False @@ -669,10 +672,13 @@ def __init__( newcontent = list(content) for i, item in enumerate(newcontent): assert isinstance(item, BasePattern), (i, item) - if isinstance(item, WildcardPattern): - self.wildcards = True + # I don't even think this code is used anywhere, but it does cause + # unreachable errors from mypy. This function's signature does look + # odd though *shrug*. + if isinstance(item, WildcardPattern): # type: ignore[unreachable] + self.wildcards = True # type: ignore[unreachable] self.type = type - self.content = newcontent + self.content = newcontent # TODO: this is unbound when content is None self.name = name def _submatch(self, node, results=None) -> bool: @@ -914,7 +920,7 @@ def _recursive_matches(self, nodes, count) -> Iterator[Tuple[int, _Results]]: class NegatedPattern(BasePattern): - def __init__(self, content: Optional[Any] = None) -> None: + def __init__(self, content: Optional[BasePattern] = None) -> None: """ Initializer. @@ -935,7 +941,7 @@ def match_seq(self, nodes, results=None) -> bool: # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 - def generate_matches(self, nodes) -> Iterator[Tuple[int, _Results]]: + def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]: if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: @@ -975,6 +981,3 @@ def generate_matches( r.update(r0) r.update(r1) yield c0 + c1, r - - -_Convert = Callable[[Grammar, RawNode], Any] diff --git a/test_requirements.txt b/test_requirements.txt index a1464e9..5bc494d 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -1,7 +1,6 @@ coverage >= 5.3 pre-commit pytest >= 6.1.1 -pytest-mock >= 3.3.1 -pytest-cases >= 2.3.0 -parameterized >= 0.7.4 +pytest-xdist >= 2.2.1 +pytest-cov >= 2.11.1 tox diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..6751726 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1 @@ +pytest_plugins = ["tests.optional"] diff --git a/tests/data/pep_572_do_not_remove_parens.py b/tests/data/fast/pep_572_do_not_remove_parens.py similarity index 100% rename from tests/data/pep_572_do_not_remove_parens.py rename to tests/data/fast/pep_572_do_not_remove_parens.py diff --git a/tests/data/function_trailing_comma.py b/tests/data/function_trailing_comma.py deleted file mode 100644 index d15459c..0000000 --- a/tests/data/function_trailing_comma.py +++ /dev/null @@ -1,88 +0,0 @@ -def f(a,): - d = {'key': 'value',} - tup = (1,) - -def f2(a,b,): - d = {'key': 'value', 'key2': 'value2',} - tup = (1,2,) - -def f(a:int=1,): - call(arg={'explode': 'this',}) - call2(arg=[1,2,3],) - x = { - "a": 1, - "b": 2, - }["a"] - if a == {"a": 1,"b": 2,"c": 3,"d": 4,"e": 5,"f": 6,"g": 7,"h": 8,}["a"]: - pass - -def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -]: - json = {"k": {"k2": {"k3": [1,]}}} - -# output - -def f( - a, -): - d = { - "key": "value", - } - tup = (1,) - - -def f2( - a, - b, -): - d = { - "key": "value", - "key2": "value2", - } - tup = ( - 1, - 2, - ) - - -def f( - a: int = 1, -): - call( - arg={ - "explode": "this", - } - ) - call2( - arg=[1, 2, 3], - ) - x = { - "a": 1, - "b": 2, - }["a"] - if a == { - "a": 1, - "b": 2, - "c": 3, - "d": 4, - "e": 5, - "f": 6, - "g": 7, - "h": 8, - }["a"]: - pass - - -def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ - "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" -]: - json = { - "k": { - "k2": { - "k3": [ - 1, - ] - } - } - } \ No newline at end of file diff --git a/tests/data/include_exclude_tests/.gitignore b/tests/data/include_exclude_tests/.gitignore new file mode 100644 index 0000000..91f3456 --- /dev/null +++ b/tests/data/include_exclude_tests/.gitignore @@ -0,0 +1 @@ +dont_exclude/ diff --git a/tests/data/include_exclude_tests/pyproject.toml b/tests/data/include_exclude_tests/pyproject.toml new file mode 100644 index 0000000..9ba7ec2 --- /dev/null +++ b/tests/data/include_exclude_tests/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/tests/data/invalid_gitignore_tests/.gitignore b/tests/data/invalid_gitignore_tests/.gitignore new file mode 100644 index 0000000..cdf4cb4 --- /dev/null +++ b/tests/data/invalid_gitignore_tests/.gitignore @@ -0,0 +1 @@ +! diff --git a/tests/data/invalid_gitignore_tests/a.py b/tests/data/invalid_gitignore_tests/a.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/invalid_gitignore_tests/pyproject.toml b/tests/data/invalid_gitignore_tests/pyproject.toml new file mode 100644 index 0000000..3908e45 --- /dev/null +++ b/tests/data/invalid_gitignore_tests/pyproject.toml @@ -0,0 +1 @@ +# Empty configuration file; used in tests to avoid interference from Black's own config. diff --git a/tests/data/invalid_nested_gitignore_tests/a.py b/tests/data/invalid_nested_gitignore_tests/a.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/invalid_nested_gitignore_tests/a/.gitignore b/tests/data/invalid_nested_gitignore_tests/a/.gitignore new file mode 100644 index 0000000..cdf4cb4 --- /dev/null +++ b/tests/data/invalid_nested_gitignore_tests/a/.gitignore @@ -0,0 +1 @@ +! diff --git a/tests/data/invalid_nested_gitignore_tests/a/a.py b/tests/data/invalid_nested_gitignore_tests/a/a.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/invalid_nested_gitignore_tests/pyproject.toml b/tests/data/invalid_nested_gitignore_tests/pyproject.toml new file mode 100644 index 0000000..3908e45 --- /dev/null +++ b/tests/data/invalid_nested_gitignore_tests/pyproject.toml @@ -0,0 +1 @@ +# Empty configuration file; used in tests to avoid interference from Black's own config. diff --git a/tests/data/jupyter/non_python_notebook.ipynb b/tests/data/jupyter/non_python_notebook.ipynb new file mode 100644 index 0000000..da5cdd8 --- /dev/null +++ b/tests/data/jupyter/non_python_notebook.ipynb @@ -0,0 +1 @@ +{"metadata":{"kernelspec":{"name":"ir","display_name":"R","language":"R"},"language_info":{"name":"R","codemirror_mode":"r","pygments_lexer":"r","mimetype":"text/x-r-source","file_extension":".r","version":"4.0.5"}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"library(tidyverse) ","metadata":{"_uuid":"051d70d956493feee0c6d64651c6a088724dca2a","_execution_state":"idle"},"execution_count":null,"outputs":[]}]} \ No newline at end of file diff --git a/tests/data/jupyter/notebook_empty_metadata.ipynb b/tests/data/jupyter/notebook_empty_metadata.ipynb new file mode 100644 index 0000000..7dc1f80 --- /dev/null +++ b/tests/data/jupyter/notebook_empty_metadata.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/data/jupyter/notebook_no_trailing_newline.ipynb b/tests/data/jupyter/notebook_no_trailing_newline.ipynb new file mode 100644 index 0000000..79f95be --- /dev/null +++ b/tests/data/jupyter/notebook_no_trailing_newline.ipynb @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/tests/data/jupyter/notebook_trailing_newline.ipynb b/tests/data/jupyter/notebook_trailing_newline.ipynb new file mode 100644 index 0000000..4f82869 --- /dev/null +++ b/tests/data/jupyter/notebook_trailing_newline.ipynb @@ -0,0 +1,39 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print('foo')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb b/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb new file mode 100644 index 0000000..257cc56 --- /dev/null +++ b/tests/data/jupyter/notebook_which_cant_be_parsed.ipynb @@ -0,0 +1 @@ +foo diff --git a/tests/data/jupyter/notebook_without_changes.ipynb b/tests/data/jupyter/notebook_without_changes.ipynb new file mode 100644 index 0000000..ac6c7e6 --- /dev/null +++ b/tests/data/jupyter/notebook_without_changes.ipynb @@ -0,0 +1,46 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "print(\"foo\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook should not be reformatted" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8" + }, + "kernelspec": { + "display_name": "Python 3.8.10 64-bit ('black': venv)", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/tests/data/async_as_identifier.py b/tests/data/miscellaneous/async_as_identifier.py similarity index 100% rename from tests/data/async_as_identifier.py rename to tests/data/miscellaneous/async_as_identifier.py diff --git a/tests/data/blackd_diff.diff b/tests/data/miscellaneous/blackd_diff.diff similarity index 100% rename from tests/data/blackd_diff.diff rename to tests/data/miscellaneous/blackd_diff.diff diff --git a/tests/data/blackd_diff.py b/tests/data/miscellaneous/blackd_diff.py similarity index 100% rename from tests/data/blackd_diff.py rename to tests/data/miscellaneous/blackd_diff.py diff --git a/tests/data/debug_visitor.out b/tests/data/miscellaneous/debug_visitor.out similarity index 100% rename from tests/data/debug_visitor.out rename to tests/data/miscellaneous/debug_visitor.out diff --git a/tests/data/debug_visitor.py b/tests/data/miscellaneous/debug_visitor.py similarity index 100% rename from tests/data/debug_visitor.py rename to tests/data/miscellaneous/debug_visitor.py diff --git a/tests/data/decorators.py b/tests/data/miscellaneous/decorators.py similarity index 98% rename from tests/data/decorators.py rename to tests/data/miscellaneous/decorators.py index acfad51..a0f38ca 100644 --- a/tests/data/decorators.py +++ b/tests/data/miscellaneous/decorators.py @@ -13,6 +13,12 @@ def f(): ## +@decorator() +def f(): + ... + +## + @decorator(arg) def f(): ... diff --git a/tests/data/docstring_no_string_normalization.py b/tests/data/miscellaneous/docstring_no_string_normalization.py similarity index 100% rename from tests/data/docstring_no_string_normalization.py rename to tests/data/miscellaneous/docstring_no_string_normalization.py diff --git a/tests/data/miscellaneous/docstring_preview_no_string_normalization.py b/tests/data/miscellaneous/docstring_preview_no_string_normalization.py new file mode 100644 index 0000000..338cc01 --- /dev/null +++ b/tests/data/miscellaneous/docstring_preview_no_string_normalization.py @@ -0,0 +1,10 @@ +def do_not_touch_this_prefix(): + R"""There was a bug where docstring prefixes would be normalized even with -S.""" + + +def do_not_touch_this_prefix2(): + FR'There was a bug where docstring prefixes would be normalized even with -S.' + + +def do_not_touch_this_prefix3(): + u'''There was a bug where docstring prefixes would be normalized even with -S.''' diff --git a/tests/data/expression_skip_magic_trailing_comma.diff b/tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff similarity index 91% rename from tests/data/expression_skip_magic_trailing_comma.diff rename to tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff index 4a8a95c..eba3fd2 100644 --- a/tests/data/expression_skip_magic_trailing_comma.diff +++ b/tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff @@ -11,7 +11,17 @@ True False 1 -@@ -29,63 +29,84 @@ +@@ -21,99 +21,118 @@ + Name1 or (Name2 and Name3) or Name4 + Name1 or Name2 and Name3 or Name4 + v1 << 2 + 1 >> v2 + 1 % finished +-1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 +-((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) ++1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 ++((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) + not great ~great +value -1 @@ -19,7 +29,7 @@ (~int) and (not ((v1 ^ (123 + v2)) | True)) -+really ** -confusing ** ~operator ** -precedence -flags & ~ select.EPOLLIN and waiters.write_task is not None -++(really ** -(confusing ** ~(operator ** -precedence))) +++(really ** -(confusing ** ~(operator**-precedence))) +flags & ~select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda a=True: a @@ -76,15 +86,19 @@ + *more, +] {i for i in (1, 2, 3)} - {(i ** 2) for i in (1, 2, 3)} +-{(i ** 2) for i in (1, 2, 3)} -{(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))} -+{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} - {((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +-{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} ++{(i**2) for i in (1, 2, 3)} ++{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} ++{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] - [(i ** 2) for i in (1, 2, 3)] +-[(i ** 2) for i in (1, 2, 3)] -[(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))] -+[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] - [((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +-[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] ++[(i**2) for i in (1, 2, 3)] ++[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] ++[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} -{i: j for i, j in ((1, 'a'), (2, 'b'), (3, 'c'))} +{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} @@ -118,8 +132,11 @@ call(**self.screen_kwargs) call(b, **self.screen_kwargs) lukasz.langa.pl -@@ -94,26 +115,24 @@ - 1.0 .real + call.me(maybe) +-1 .real +-1.0 .real ++(1).real ++(1.0).real ....__class__ list[str] dict[str, int] @@ -164,10 +181,12 @@ SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) - ((i ** 2) for i in (1, 2, 3)) +-((i ** 2) for i in (1, 2, 3)) -((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))) -+((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) - (((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +-(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) ++((i**2) for i in (1, 2, 3)) ++((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) ++(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) -{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs} +{ @@ -384,13 +403,13 @@ + return True +if ( + ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e -+ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n ++ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n +): + return True +if ( + ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e + | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h -+ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n ++ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n +): + return True +if ( @@ -400,7 +419,7 @@ + | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h + ^ aaaaaaaaaaaaaaaa.i + << aaaaaaaaaaaaaaaa.k -+ >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ++ >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n +): + return True +( diff --git a/tests/data/force_py36.py b/tests/data/miscellaneous/force_py36.py similarity index 100% rename from tests/data/force_py36.py rename to tests/data/miscellaneous/force_py36.py diff --git a/tests/data/force_pyi.py b/tests/data/miscellaneous/force_pyi.py similarity index 100% rename from tests/data/force_pyi.py rename to tests/data/miscellaneous/force_pyi.py diff --git a/tests/data/miscellaneous/invalid_header.py b/tests/data/miscellaneous/invalid_header.py new file mode 100644 index 0000000..fb49e2f --- /dev/null +++ b/tests/data/miscellaneous/invalid_header.py @@ -0,0 +1,2 @@ +This is not valid Python syntax +y = "This is valid syntax" diff --git a/tests/data/long_strings_flag_disabled.py b/tests/data/miscellaneous/long_strings_flag_disabled.py similarity index 97% rename from tests/data/long_strings_flag_disabled.py rename to tests/data/miscellaneous/long_strings_flag_disabled.py index ef3094f..db3954e 100644 --- a/tests/data/long_strings_flag_disabled.py +++ b/tests/data/miscellaneous/long_strings_flag_disabled.py @@ -133,11 +133,14 @@ "Use f-strings instead!", ) -old_fmt_string3 = "Whereas only the strings after the percent sign were long in the last example, this example uses a long initial string as well. This is another %s %s %s %s" % ( - "really really really really really", - "old", - "way to format strings!", - "Use f-strings instead!", +old_fmt_string3 = ( + "Whereas only the strings after the percent sign were long in the last example, this example uses a long initial string as well. This is another %s %s %s %s" + % ( + "really really really really really", + "old", + "way to format strings!", + "Use f-strings instead!", + ) ) fstring = f"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one." diff --git a/tests/data/missing_final_newline.diff b/tests/data/miscellaneous/missing_final_newline.diff similarity index 100% rename from tests/data/missing_final_newline.diff rename to tests/data/miscellaneous/missing_final_newline.diff diff --git a/tests/data/missing_final_newline.py b/tests/data/miscellaneous/missing_final_newline.py similarity index 100% rename from tests/data/missing_final_newline.py rename to tests/data/miscellaneous/missing_final_newline.py diff --git a/tests/data/miscellaneous/pattern_matching_invalid.py b/tests/data/miscellaneous/pattern_matching_invalid.py new file mode 100644 index 0000000..22b5b94 --- /dev/null +++ b/tests/data/miscellaneous/pattern_matching_invalid.py @@ -0,0 +1,18 @@ +# First match, no errors +match something: + case bla(): + pass + +# Problem on line 10 +match invalid_case: + case valid_case: + pass + case a := b: + pass + case valid_case: + pass + +# No problems either +match something: + case bla(): + pass diff --git a/tests/data/miscellaneous/power_op_newline.py b/tests/data/miscellaneous/power_op_newline.py new file mode 100644 index 0000000..85d434d --- /dev/null +++ b/tests/data/miscellaneous/power_op_newline.py @@ -0,0 +1,10 @@ +importA;()<<0**0# + +# output + +importA +( + () + << 0 + ** 0 +) # diff --git a/tests/data/miscellaneous/python2_detection.py b/tests/data/miscellaneous/python2_detection.py new file mode 100644 index 0000000..8de2bb5 --- /dev/null +++ b/tests/data/miscellaneous/python2_detection.py @@ -0,0 +1,90 @@ +# This uses a similar construction to the decorators.py test data file FYI. + +print "hello, world!" + +### + +exec "print('hello, world!')" + +### + +def set_position((x, y), value): + pass + +### + +try: + pass +except Exception, err: + pass + +### + +raise RuntimeError, "I feel like crashing today :p" + +### + +`wow_these_really_did_exist` + +### + +10L + +### + +10l + +### + +0123 + +# output + +print("hello python three!") + +### + +exec("I'm not sure if you can use exec like this but that's not important here!") + +### + +try: + pass +except make_exception(1, 2): + pass + +### + +try: + pass +except Exception as err: + pass + +### + +raise RuntimeError(make_msg(1, 2)) + +### + +raise RuntimeError("boom!",) + +### + +def set_position(x, y, value): + pass + +### + +10 + +### + +0 + +### + +000 + +### + +0o12 \ No newline at end of file diff --git a/tests/data/string_quotes.py b/tests/data/miscellaneous/string_quotes.py similarity index 81% rename from tests/data/string_quotes.py rename to tests/data/miscellaneous/string_quotes.py index 5a4bc5d..3384241 100644 --- a/tests/data/string_quotes.py +++ b/tests/data/miscellaneous/string_quotes.py @@ -51,6 +51,11 @@ '\'{z}\' {y * " "}' '{y * x} \'{z}\'' +# We must bail out if changing the quotes would introduce backslashes in f-string +# expressions. xref: https://github.com/psf/black/issues/2348 +f"\"{b}\"{' ' * (long-len(b)+1)}: \"{sts}\",\n" +f"\"{a}\"{'hello' * b}\"{c}\"" + # output """""" @@ -100,3 +105,8 @@ f"{y * x} '{z}'" "'{z}' {y * \" \"}" "{y * x} '{z}'" + +# We must bail out if changing the quotes would introduce backslashes in f-string +# expressions. xref: https://github.com/psf/black/issues/2348 +f"\"{b}\"{' ' * (long-len(b)+1)}: \"{sts}\",\n" +f"\"{a}\"{'hello' * b}\"{c}\"" diff --git a/tests/data/miscellaneous/stub.pyi b/tests/data/miscellaneous/stub.pyi new file mode 100644 index 0000000..af2cd2c --- /dev/null +++ b/tests/data/miscellaneous/stub.pyi @@ -0,0 +1,151 @@ +X: int + +def f(): ... + + +class D: + ... + + +class C: + ... + +class B: + this_lack_of_newline_should_be_kept: int + def b(self) -> None: ... + + but_this_newline_should_also_be_kept: int + +class A: + attr: int + attr2: str + + def f(self) -> int: + ... + + def g(self) -> str: ... + + + +def g(): + ... + +def h(): ... + +if sys.version_info >= (3, 8): + class E: + def f(self): ... + class F: + + def f(self): ... + class G: ... + class H: ... +else: + class I: ... + class J: ... + def f(): ... + + class K: + def f(self): ... + def f(): ... + +class Nested: + class dirty: ... + class little: ... + class secret: + def who_has_to_know(self): ... + def verse(self): ... + +class Conditional: + def f(self): ... + if sys.version_info >= (3, 8): + def g(self): ... + else: + def g(self): ... + def h(self): ... + def i(self): ... + if sys.version_info >= (3, 8): + def j(self): ... + def k(self): ... + if sys.version_info >= (3, 8): + class A: ... + class B: ... + class C: + def l(self): ... + def m(self): ... + + +# output +X: int + +def f(): ... + +class D: ... +class C: ... + +class B: + this_lack_of_newline_should_be_kept: int + def b(self) -> None: ... + + but_this_newline_should_also_be_kept: int + +class A: + attr: int + attr2: str + + def f(self) -> int: ... + def g(self) -> str: ... + +def g(): ... +def h(): ... + +if sys.version_info >= (3, 8): + class E: + def f(self): ... + + class F: + def f(self): ... + + class G: ... + class H: ... + +else: + class I: ... + class J: ... + + def f(): ... + + class K: + def f(self): ... + + def f(): ... + +class Nested: + class dirty: ... + class little: ... + + class secret: + def who_has_to_know(self): ... + + def verse(self): ... + +class Conditional: + def f(self): ... + if sys.version_info >= (3, 8): + def g(self): ... + else: + def g(self): ... + + def h(self): ... + def i(self): ... + if sys.version_info >= (3, 8): + def j(self): ... + + def k(self): ... + if sys.version_info >= (3, 8): + class A: ... + class B: ... + + class C: + def l(self): ... + def m(self): ... diff --git a/tests/data/nested_gitignore_tests/pyproject.toml b/tests/data/nested_gitignore_tests/pyproject.toml new file mode 100644 index 0000000..9ba7ec2 --- /dev/null +++ b/tests/data/nested_gitignore_tests/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=41.0", "setuptools-scm", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/tests/data/nested_gitignore_tests/root/.gitignore b/tests/data/nested_gitignore_tests/root/.gitignore new file mode 100644 index 0000000..2987e7b --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/.gitignore @@ -0,0 +1 @@ +a.py diff --git a/tests/data/nested_gitignore_tests/root/b.py b/tests/data/nested_gitignore_tests/root/b.py new file mode 100644 index 0000000..bdeeca3 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/b.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/root/c.py b/tests/data/nested_gitignore_tests/root/c.py new file mode 100644 index 0000000..bdeeca3 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/c.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/root/child/.gitignore b/tests/data/nested_gitignore_tests/root/child/.gitignore new file mode 100644 index 0000000..6df81dd --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/.gitignore @@ -0,0 +1 @@ +b.py diff --git a/tests/data/nested_gitignore_tests/root/child/c.py b/tests/data/nested_gitignore_tests/root/child/c.py new file mode 100644 index 0000000..bdeeca3 --- /dev/null +++ b/tests/data/nested_gitignore_tests/root/child/c.py @@ -0,0 +1 @@ +# should be included diff --git a/tests/data/nested_gitignore_tests/x.py b/tests/data/nested_gitignore_tests/x.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/numeric_literals_py2.py b/tests/data/numeric_literals_py2.py deleted file mode 100644 index 8f85c43..0000000 --- a/tests/data/numeric_literals_py2.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python2.7 - -x = 123456789L -x = 123456789l -x = 123456789 -x = 0xb1acc - -# output - - -#!/usr/bin/env python2.7 - -x = 123456789L -x = 123456789L -x = 123456789 -x = 0xB1ACC diff --git a/tests/data/cantfit.py b/tests/data/preview/cantfit.py similarity index 100% rename from tests/data/cantfit.py rename to tests/data/preview/cantfit.py diff --git a/tests/data/comments7.py b/tests/data/preview/comments7.py similarity index 81% rename from tests/data/comments7.py rename to tests/data/preview/comments7.py index ca9d7c6..ec2dc50 100644 --- a/tests/data/comments7.py +++ b/tests/data/preview/comments7.py @@ -226,39 +226,53 @@ class C: # metadata_version errors. ( {}, - "None is an invalid value for Metadata-Version. Error: This field is" - " required. see" - " https://packaging.python.org/specifications/core-metadata", + ( + "None is an invalid value for Metadata-Version. Error: This field" + " is required. see" + " https://packaging.python.org/specifications/core-metadata" + ), ), ( {"metadata_version": "-1"}, - "'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata" - " Version see" - " https://packaging.python.org/specifications/core-metadata", + ( + "'-1' is an invalid value for Metadata-Version. Error: Unknown" + " Metadata Version see" + " https://packaging.python.org/specifications/core-metadata" + ), ), # name errors. ( {"metadata_version": "1.2"}, - "'' is an invalid value for Name. Error: This field is required. see" - " https://packaging.python.org/specifications/core-metadata", + ( + "'' is an invalid value for Name. Error: This field is required." + " see https://packaging.python.org/specifications/core-metadata" + ), ), ( {"metadata_version": "1.2", "name": "foo-"}, - "'foo-' is an invalid value for Name. Error: Must start and end with a" - " letter or numeral and contain only ascii numeric and '.', '_' and" - " '-'. see https://packaging.python.org/specifications/core-metadata", + ( + "'foo-' is an invalid value for Name. Error: Must start and end" + " with a letter or numeral and contain only ascii numeric and '.'," + " '_' and '-'. see" + " https://packaging.python.org/specifications/core-metadata" + ), ), # version errors. ( {"metadata_version": "1.2", "name": "example"}, - "'' is an invalid value for Version. Error: This field is required. see" - " https://packaging.python.org/specifications/core-metadata", + ( + "'' is an invalid value for Version. Error: This field is required." + " see https://packaging.python.org/specifications/core-metadata" + ), ), ( {"metadata_version": "1.2", "name": "example", "version": "dog"}, - "'dog' is an invalid value for Version. Error: Must start and end with" - " a letter or numeral and contain only ascii numeric and '.', '_' and" - " '-'. see https://packaging.python.org/specifications/core-metadata", + ( + "'dog' is an invalid value for Version. Error: Must start and end" + " with a letter or numeral and contain only ascii numeric and '.'," + " '_' and '-'. see" + " https://packaging.python.org/specifications/core-metadata" + ), ), ], ) diff --git a/tests/data/preview/comments8.py b/tests/data/preview/comments8.py new file mode 100644 index 0000000..a2030c2 --- /dev/null +++ b/tests/data/preview/comments8.py @@ -0,0 +1,15 @@ +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +#%% +# %% + +# output + +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +# %% +# %% diff --git a/tests/data/preview/docstring_preview.py b/tests/data/preview/docstring_preview.py new file mode 100644 index 0000000..292352c --- /dev/null +++ b/tests/data/preview/docstring_preview.py @@ -0,0 +1,105 @@ +def docstring_almost_at_line_limit(): + """long docstring................................................................. + """ + + +def docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + """ + + +def mulitline_docstring_almost_at_line_limit(): + """long docstring................................................................. + + .................................................................................. + """ + + +def mulitline_docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def docstring_at_line_limit_with_prefix(): + f"""long docstring...............................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def multiline_docstring_at_line_limit_with_prefix(): + f"""first line---------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def single_quote_docstring_over_line_limit(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." + + +def single_quote_docstring_over_line_limit2(): + 'We do not want to put the closing quote on a new line as that is invalid (see GH-3141).' + + +# output + + +def docstring_almost_at_line_limit(): + """long docstring................................................................. + """ + + +def docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + """ + + +def mulitline_docstring_almost_at_line_limit(): + """long docstring................................................................. + + .................................................................................. + """ + + +def mulitline_docstring_almost_at_line_limit_with_prefix(): + f"""long docstring................................................................ + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def docstring_at_line_limit_with_prefix(): + f"""long docstring...............................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def multiline_docstring_at_line_limit_with_prefix(): + f"""first line---------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def single_quote_docstring_over_line_limit(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." + + +def single_quote_docstring_over_line_limit2(): + "We do not want to put the closing quote on a new line as that is invalid (see GH-3141)." diff --git a/tests/data/long_strings.py b/tests/data/preview/long_strings.py similarity index 73% rename from tests/data/long_strings.py rename to tests/data/preview/long_strings.py index 151396b..6db3cfe 100644 --- a/tests/data/long_strings.py +++ b/tests/data/preview/long_strings.py @@ -18,6 +18,26 @@ D4 = {"A long and ridiculous {}".format(string_key): "This is a really really really long string that has to go i,side of a dictionary. It is soooo bad.", some_func("calling", "some", "stuff"): "This is a really really really long string that has to go inside of a dictionary. It is {soooo} bad (#{x}).".format(sooo="soooo", x=2), "A %s %s" % ("formatted", "string"): "This is a really really really long string that has to go inside of a dictionary. It is %s bad (#%d)." % ("soooo", 2)} +D5 = { # Test for https://github.com/psf/black/issues/3261 + ("This is a really long string that can't be expected to fit in one line and is used as a nested dict's key"): {"inner": "value"}, +} + +D6 = { # Test for https://github.com/psf/black/issues/3261 + ("This is a really long string that can't be expected to fit in one line and is used as a dict's key"): ["value1", "value2"], +} + +L1 = ["The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a list literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a list literal.", ("parens should be stripped for short string in list")] + +L2 = ["This is a really long string that can't be expected to fit in one line and is the only child of a list literal."] + +S1 = {"The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a set literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a set literal.", ("parens should be stripped for short string in set")} + +S2 = {"This is a really long string that can't be expected to fit in one line and is the only child of a set literal."} + +T1 = ("The is a short string", "This is a really long string that can't possibly be expected to fit all together on one line. Also it is inside a tuple literal, so it's expected to be wrapped in parens when spliting to avoid implicit str concatenation.", short_call("arg", {"key": "value"}), "This is another really really (not really) long string that also can't be expected to fit on one line and is, like the other string, inside a tuple literal.", ("parens should be stripped for short string in list")) + +T2 = ("This is a really long string that can't be expected to fit in one line and is the only child of a tuple literal.",) + func_with_keywords(my_arg, my_kwarg="Long keyword strings also need to be wrapped, but they will probably need to be handled a little bit differently.") bad_split1 = ( @@ -72,6 +92,25 @@ zzz, ) +inline_comments_func1( + "if there are inline " + "comments in the middle " + # Here is the standard alone comment. + "of the implicitly concatenated " + "string, we should handle " + "them correctly", + xxx, +) + +inline_comments_func2( + "what if the string is very very very very very very very very very very long and this part does " + "not fit into a single line? " + # Here is the standard alone comment. + "then the string should still be properly handled by merging and splitting " + "it into parts that fit in line length.", + xxx, +) + raw_string = r"This is a long raw string. When re-formatting this string, black needs to make sure it prepends the 'r' onto the new string." fmt_string1 = "We also need to be sure to preserve any and all {} which may or may not be attached to the string in question.".format("method calls") @@ -90,7 +129,7 @@ comment_string = "Long lines with inline comments should have their comments appended to the reformatted string's enclosing right parentheses." # This comment gets thrown to the top. -arg_comment_string = print("Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment stays on the bottom. +arg_comment_string = print("Long lines with inline comments which are apart of (and not the only member of) an argument list should have their comments appended to the reformatted string's enclosing left parentheses.", # This comment gets thrown to the top. "Arg #2", "Arg #3", "Arg #4", "Arg #5") pragma_comment_string1 = "Lines which end with an inline pragma comment of the form `# : <...>` should be left alone." # noqa: E501 @@ -207,6 +246,38 @@ def foo(): " of it." ) +string_with_nameescape = ( + "........................................................................ \N{LAO KO LA}" +) + +string_with_nameescape = ( + "........................................................................... \N{LAO KO LA}" +) + +string_with_nameescape = ( + "............................................................................ \N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "...................................................................... \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "......................................................................... \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + ".......................................................................... \\\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................ \\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................... \\N{LAO KO LA}" +) + # output @@ -294,6 +365,84 @@ def foo(): % ("soooo", 2), } +D5 = { # Test for https://github.com/psf/black/issues/3261 + "This is a really long string that can't be expected to fit in one line and is used as a nested dict's key": { + "inner": "value" + }, +} + +D6 = { # Test for https://github.com/psf/black/issues/3261 + "This is a really long string that can't be expected to fit in one line and is used as a dict's key": [ + "value1", + "value2", + ], +} + +L1 = [ + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a list literal, so it's expected to" + " be wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a list" + " literal." + ), + "parens should be stripped for short string in list", +] + +L2 = [ + "This is a really long string that can't be expected to fit in one line and is the" + " only child of a list literal." +] + +S1 = { + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a set literal, so it's expected to be" + " wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a set" + " literal." + ), + "parens should be stripped for short string in set", +} + +S2 = { + "This is a really long string that can't be expected to fit in one line and is the" + " only child of a set literal." +} + +T1 = ( + "The is a short string", + ( + "This is a really long string that can't possibly be expected to fit all" + " together on one line. Also it is inside a tuple literal, so it's expected to" + " be wrapped in parens when spliting to avoid implicit str concatenation." + ), + short_call("arg", {"key": "value"}), + ( + "This is another really really (not really) long string that also can't be" + " expected to fit on one line and is, like the other string, inside a tuple" + " literal." + ), + "parens should be stripped for short string in list", +) + +T2 = ( + ( + "This is a really long string that can't be expected to fit in one line and is" + " the only child of a tuple literal." + ), +) + func_with_keywords( my_arg, my_kwarg=( @@ -363,6 +512,22 @@ def foo(): zzz, ) +inline_comments_func1( + "if there are inline comments in the middle " + # Here is the standard alone comment. + "of the implicitly concatenated string, we should handle them correctly", + xxx, +) + +inline_comments_func2( + "what if the string is very very very very very very very very very very long and" + " this part does not fit into a single line? " + # Here is the standard alone comment. + "then the string should still be properly handled by merging and splitting " + "it into parts that fit in line length.", + xxx, +) + raw_string = ( r"This is a long raw string. When re-formatting this string, black needs to make" r" sure it prepends the 'r' onto the new string." @@ -420,7 +585,7 @@ def foo(): arg_comment_string = print( "Long lines with inline comments which are apart of (and not the only member of) an" " argument list should have their comments appended to the reformatted string's" - " enclosing left parentheses.", # This comment stays on the bottom. + " enclosing left parentheses.", # This comment gets thrown to the top. "Arg #2", "Arg #3", "Arg #4", @@ -587,3 +752,43 @@ def foo(): "This is a really long string that can't be merged because it has a likely pragma at the end" # pylint: disable=some-pylint-check " of it." ) + +string_with_nameescape = ( + "........................................................................" + " \N{LAO KO LA}" +) + +string_with_nameescape = ( + "..........................................................................." + " \N{LAO KO LA}" +) + +string_with_nameescape = ( + "............................................................................" + " \N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "......................................................................" + " \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + "........................................................................." + " \\\N{LAO KO LA}" +) + +string_with_nameescape_and_escaped_backslash = ( + ".........................................................................." + " \\\N{LAO KO LA}" +) + +string_with_escaped_nameescape = ( + "........................................................................ \\N{LAO" + " KO LA}" +) + +string_with_escaped_nameescape = ( + "..........................................................................." + " \\N{LAO KO LA}" +) diff --git a/tests/data/long_strings__edge_case.py b/tests/data/preview/long_strings__edge_case.py similarity index 80% rename from tests/data/long_strings__edge_case.py rename to tests/data/preview/long_strings__edge_case.py index 6919db5..2bc0b6e 100644 --- a/tests/data/long_strings__edge_case.py +++ b/tests/data/preview/long_strings__edge_case.py @@ -29,6 +29,12 @@ ) return f'{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaa' return f'{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaaa' +assert str(result) == "This long string should be split at some point right close to or around hereeeeeee" +assert str(result) < "This long string should be split at some point right close to or around hereeeeee" +assert "A format string: %s" % "This long string should be split at some point right close to or around hereeeeeee" != result +msg += "This long string should be wrapped in parens at some point right around hereeeee" +msg += "This long string should be split at some point right close to or around hereeeeeeee" +msg += "This long string should not be split at any point ever since it is just righttt" # output @@ -108,3 +114,27 @@ f"{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaa" ) return f"{x}/b/c/d/d/d/dadfjsadjsaidoaisjdsfjaofjdfijaidfjaodfjaoifjodjafojdoajaaaaaaaaaaaa" +assert ( + str(result) + == "This long string should be split at some point right close to or around" + " hereeeeeee" +) +assert ( + str(result) + < "This long string should be split at some point right close to or around" + " hereeeeee" +) +assert ( + "A format string: %s" + % "This long string should be split at some point right close to or around" + " hereeeeeee" + != result +) +msg += ( + "This long string should be wrapped in parens at some point right around hereeeee" +) +msg += ( + "This long string should be split at some point right close to or around" + " hereeeeeeee" +) +msg += "This long string should not be split at any point ever since it is just righttt" diff --git a/tests/data/long_strings__regression.py b/tests/data/preview/long_strings__regression.py similarity index 70% rename from tests/data/long_strings__regression.py rename to tests/data/preview/long_strings__regression.py index 2e7f248..634db46 100644 --- a/tests/data/long_strings__regression.py +++ b/tests/data/preview/long_strings__regression.py @@ -396,6 +396,135 @@ def xxxxxxx_xxxxxx(xxxx): " it has now" ) + +def _legacy_listen_examples(): + text += ( + " \"listen for the '%(event_name)s' event\"\n" + "\n # ... (event logic logic logic) ...\n" + % { + "since": since, + } + ) + + +class X: + async def foo(self): + msg = "" + for candidate in CANDIDATES: + msg += ( + "**{candidate.object_type} {candidate.rev}**" + " - {candidate.description}\n" + ) + + +temp_msg = ( + f"{f'{humanize_number(pos)}.': <{pound_len+2}} " + f"{balance: <{bal_len + 5}} " + f"<<{author.display_name}>>\n" +) + +assert str(suffix_arr) == ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) != ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) <= ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) >= ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) < ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) > ( + "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert str(suffix_arr) in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +assert str(suffix_arr) not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + "3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + "5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + "9. You now have a key to add to `{prefix}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + "9. You now have a key to add to `{prefix}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{prefix}set api youtube api_key`" +) + +# It shouldn't matter if the string prefixes are capitalized. +temp_msg = ( + F"{F'{humanize_number(pos)}.': <{pound_len+2}} " + F"{balance: <{bal_len + 5}} " + F"<<{author.display_name}>>\n" +) + +fstring = ( + F"We have to remember to escape {braces}." + " Like {these}." + F" But not {this}." +) + +welcome_to_programming = R"hello," R" world!" + +fstring = F"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one." + +x = F"This is a long string which contains an f-expr that should not split {{{[i for i in range(5)]}}}." + +x = ( + "\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}" +) + +xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx( + xx_xxxxxx={ + "x3_xxxxxxxx": "xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx", + }, +) + + # output @@ -470,7 +599,7 @@ def foo(): def foo(xxxx): - for (xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx) in xxxx: + for xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx in xxxx: for xxx in xxx_xxxx: assert ("x" in xxx) or (xxx in xxx_xxx_xxxxx), ( "{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}" @@ -634,20 +763,28 @@ def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx): some_dictionary = { "xxxxx006": [ - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" - " xxxxx000 xxxxxxxxxx\n", - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" - " xxxxx010 xxxxxxxxxx\n", + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" + " xxxxx000 xxxxxxxxxx\n" + ), + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" + " xxxxx010 xxxxxxxxxx\n" + ), ], "xxxxx016": [ - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" - " xxxxx000 xxxxxxxxxx\n", - "xxx-xxx" - " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" - " xxxxx010 xxxxxxxxxx\n", + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx==" + " xxxxx000 xxxxxxxxxx\n" + ), + ( + "xxx-xxx" + " xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx==" + " xxxxx010 xxxxxxxxxx\n" + ), ], } @@ -886,3 +1023,152 @@ def xxxxxxx_xxxxxx(xxxx): " it goes over 88 characters which" " it has now" ) + + +def _legacy_listen_examples(): + text += ( + " \"listen for the '%(event_name)s' event\"\n" + "\n # ... (event logic logic logic) ...\n" + % { + "since": since, + } + ) + + +class X: + async def foo(self): + msg = "" + for candidate in CANDIDATES: + msg += ( + "**{candidate.object_type} {candidate.rev}**" + " - {candidate.description}\n" + ) + + +temp_msg = ( + f"{f'{humanize_number(pos)}.': <{pound_len+2}} " + f"{balance: <{bal_len + 5}} " + f"<<{author.display_name}>>\n" +) + +assert ( + str(suffix_arr) + == "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + != "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + <= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + >= "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + < "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + > "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', " + "'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', " + "'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']" +) +assert ( + str(suffix_arr) + in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$'," + " 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$'," + " 'ykangaroo$']" +) +assert ( + str(suffix_arr) + not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$'," + " 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$'," + " 'rykangaroo$', 'ykangaroo$']" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + f"(https://console.developers.google.com/)" + f"2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + f"4. In the list of APIs choose or search for YouTube Data API v3 and " + f"click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + f"6. Click on Create Credential at the top." + f'7. At the top click the link for "API key".' + f"8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{{prefix}}set api youtube api_key`" +) +message = ( + f"1. Go to Google Developers Console and log in with your Google account." + f"(https://console.developers.google.com/)" + f"2. You should be prompted to create a new project (name does not matter)." + f"3. Click on Enable APIs and Services at the top." + f"4. In the list of APIs choose or search for YouTube Data API v3 and " + f"click on it. Choose Enable." + f"5. Click on Credentials on the left navigation bar." + f"6. Click on Create Credential at the top." + f'7. At the top click the link for "API key".' + f"8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{{prefix}}set api youtube api_key`" +) +message = ( + "1. Go to Google Developers Console and log in with your Google account." + "(https://console.developers.google.com/)" + "2. You should be prompted to create a new project (name does not matter)." + "3. Click on Enable APIs and Services at the top." + "4. In the list of APIs choose or search for YouTube Data API v3 and " + "click on it. Choose Enable." + "5. Click on Credentials on the left navigation bar." + "6. Click on Create Credential at the top." + '7. At the top click the link for "API key".' + "8. No application restrictions are needed. Click Create at the bottom." + f"9. You now have a key to add to `{prefix}set api youtube api_key`" +) + +# It shouldn't matter if the string prefixes are capitalized. +temp_msg = ( + f"{F'{humanize_number(pos)}.': <{pound_len+2}} " + f"{balance: <{bal_len + 5}} " + f"<<{author.display_name}>>\n" +) + +fstring = f"We have to remember to escape {braces}. Like {{these}}. But not {this}." + +welcome_to_programming = R"hello," R" world!" + +fstring = ( + f"f-strings definitely make things more {difficult} than they need to be for" + " {black}. But boy they sure are handy. The problem is that some lines will need" + f" to have the 'f' whereas others do not. This {line}, for example, needs one." +) + +x = ( + "This is a long string which contains an f-expr that should not split" + f" {{{[i for i in range(5)]}}}." +) + +x = ( + "\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}" +) + +xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx( + xx_xxxxxx={ + "x3_xxxxxxxx": ( + "xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx" + ), + }, +) diff --git a/tests/data/preview/one_element_subscript.py b/tests/data/preview/one_element_subscript.py new file mode 100644 index 0000000..39205ba --- /dev/null +++ b/tests/data/preview/one_element_subscript.py @@ -0,0 +1,36 @@ +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[int, int,] +d = tuple[int, int,] + +# Magic commas still work as expected for non-subscripts. +small_list = [1,] +list_of_types = [tuple[int,],] + +# output +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[ + int, + int, +] +d = tuple[ + int, + int, +] + +# Magic commas still work as expected for non-subscripts. +small_list = [ + 1, +] +list_of_types = [ + tuple[int,], +] diff --git a/tests/data/percent_precedence.py b/tests/data/preview/percent_precedence.py similarity index 100% rename from tests/data/percent_precedence.py rename to tests/data/preview/percent_precedence.py diff --git a/tests/data/preview/remove_await_parens.py b/tests/data/preview/remove_await_parens.py new file mode 100644 index 0000000..eb7dad3 --- /dev/null +++ b/tests/data/preview/remove_await_parens.py @@ -0,0 +1,168 @@ +import asyncio + +# Control example +async def main(): + await asyncio.sleep(1) + +# Remove brackets for short coroutine/task +async def main(): + await (asyncio.sleep(1)) + +async def main(): + await ( + asyncio.sleep(1) + ) + +async def main(): + await (asyncio.sleep(1) + ) + +# Check comments +async def main(): + await ( # Hello + asyncio.sleep(1) + ) + +async def main(): + await ( + asyncio.sleep(1) # Hello + ) + +async def main(): + await ( + asyncio.sleep(1) + ) # Hello + +# Long lines +async def main(): + await asyncio.gather(asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1)) + +# Same as above but with magic trailing comma in function +async def main(): + await asyncio.gather(asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1), asyncio.sleep(1),) + +# Cr@zY Br@ck3Tz +async def main(): + await ( + ((((((((((((( + ((( ((( + ((( ((( + ((( ((( + ((( ((( + ((black(1))) + ))) ))) + ))) ))) + ))) ))) + ))) ))) + ))))))))))))) + ) + +# Keep brackets around non power operations and nested awaits +async def main(): + await (set_of_tasks | other_set) + +async def main(): + await (await asyncio.sleep(1)) + +# It's awaits all the way down... +async def main(): + await (await x) + +async def main(): + await (yield x) + +async def main(): + await (await (asyncio.sleep(1))) + +async def main(): + await (await (await (await (await (asyncio.sleep(1)))))) + +# output +import asyncio + +# Control example +async def main(): + await asyncio.sleep(1) + + +# Remove brackets for short coroutine/task +async def main(): + await asyncio.sleep(1) + + +async def main(): + await asyncio.sleep(1) + + +async def main(): + await asyncio.sleep(1) + + +# Check comments +async def main(): + await asyncio.sleep(1) # Hello + + +async def main(): + await asyncio.sleep(1) # Hello + + +async def main(): + await asyncio.sleep(1) # Hello + + +# Long lines +async def main(): + await asyncio.gather( + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + ) + + +# Same as above but with magic trailing comma in function +async def main(): + await asyncio.gather( + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + asyncio.sleep(1), + ) + + +# Cr@zY Br@ck3Tz +async def main(): + await black(1) + + +# Keep brackets around non power operations and nested awaits +async def main(): + await (set_of_tasks | other_set) + + +async def main(): + await (await asyncio.sleep(1)) + + +# It's awaits all the way down... +async def main(): + await (await x) + + +async def main(): + await (yield x) + + +async def main(): + await (await asyncio.sleep(1)) + + +async def main(): + await (await (await (await (await asyncio.sleep(1))))) diff --git a/tests/data/preview/remove_except_parens.py b/tests/data/preview/remove_except_parens.py new file mode 100644 index 0000000..322c5b7 --- /dev/null +++ b/tests/data/preview/remove_except_parens.py @@ -0,0 +1,79 @@ +# These brackets are redundant, therefore remove. +try: + a.something +except (AttributeError) as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error,) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +# output +# These brackets are redundant, therefore remove. +try: + a.something +except AttributeError as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err diff --git a/tests/data/preview/remove_for_brackets.py b/tests/data/preview/remove_for_brackets.py new file mode 100644 index 0000000..cd53404 --- /dev/null +++ b/tests/data/preview/remove_for_brackets.py @@ -0,0 +1,48 @@ +# Only remove tuple brackets after `for` +for (k, v) in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for (why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, i_dont_know_but_we_should_still_check_the_behaviour_if_they_do) in d.items(): + print(k, v) + +for (k, v) in dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items(): + print(k, v) + +# Test deeply nested brackets +for (((((k, v))))) in d.items(): + print(k, v) + +# output +# Only remove tuple brackets after `for` +for k, v in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for ( + why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, + i_dont_know_but_we_should_still_check_the_behaviour_if_they_do, +) in d.items(): + print(k, v) + +for ( + k, + v, +) in ( + dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items() +): + print(k, v) + +# Test deeply nested brackets +for k, v in d.items(): + print(k, v) diff --git a/tests/data/preview/remove_newline_after_code_block_open.py b/tests/data/preview/remove_newline_after_code_block_open.py new file mode 100644 index 0000000..ef2e5c2 --- /dev/null +++ b/tests/data/preview/remove_newline_after_code_block_open.py @@ -0,0 +1,189 @@ +import random + + +def foo1(): + + print("The newline above me should be deleted!") + + +def foo2(): + + + + print("All the newlines above me should be deleted!") + + +def foo3(): + + print("No newline above me!") + + print("There is a newline above me, and that's OK!") + + +def foo4(): + + # There is a comment here + + print("The newline above me should not be deleted!") + + +class Foo: + def bar(self): + + print("The newline above me should be deleted!") + + +for i in range(5): + + print(f"{i}) The line above me should be removed!") + + +for i in range(5): + + + + print(f"{i}) The lines above me should be removed!") + + +for i in range(5): + + for j in range(7): + + print(f"{i}) The lines above me should be removed!") + + +if random.randint(0, 3) == 0: + + print("The new line above me is about to be removed!") + + +if random.randint(0, 3) == 0: + + + + + print("The new lines above me is about to be removed!") + + +if random.randint(0, 3) == 0: + if random.uniform(0, 1) > 0.5: + print("Two lines above me are about to be removed!") + + +while True: + + print("The newline above me should be deleted!") + + +while True: + + + + print("The newlines above me should be deleted!") + + +while True: + + while False: + + print("The newlines above me should be deleted!") + + +with open("/path/to/file.txt", mode="w") as file: + + file.write("The new line above me is about to be removed!") + + +with open("/path/to/file.txt", mode="w") as file: + + + + file.write("The new lines above me is about to be removed!") + + +with open("/path/to/file.txt", mode="r") as read_file: + + with open("/path/to/output_file.txt", mode="w") as write_file: + + write_file.writelines(read_file.readlines()) + +# output + +import random + + +def foo1(): + print("The newline above me should be deleted!") + + +def foo2(): + print("All the newlines above me should be deleted!") + + +def foo3(): + print("No newline above me!") + + print("There is a newline above me, and that's OK!") + + +def foo4(): + # There is a comment here + + print("The newline above me should not be deleted!") + + +class Foo: + def bar(self): + print("The newline above me should be deleted!") + + +for i in range(5): + print(f"{i}) The line above me should be removed!") + + +for i in range(5): + print(f"{i}) The lines above me should be removed!") + + +for i in range(5): + for j in range(7): + print(f"{i}) The lines above me should be removed!") + + +if random.randint(0, 3) == 0: + print("The new line above me is about to be removed!") + + +if random.randint(0, 3) == 0: + print("The new lines above me is about to be removed!") + + +if random.randint(0, 3) == 0: + if random.uniform(0, 1) > 0.5: + print("Two lines above me are about to be removed!") + + +while True: + print("The newline above me should be deleted!") + + +while True: + print("The newlines above me should be deleted!") + + +while True: + while False: + print("The newlines above me should be deleted!") + + +with open("/path/to/file.txt", mode="w") as file: + file.write("The new line above me is about to be removed!") + + +with open("/path/to/file.txt", mode="w") as file: + file.write("The new lines above me is about to be removed!") + + +with open("/path/to/file.txt", mode="r") as read_file: + with open("/path/to/output_file.txt", mode="w") as write_file: + write_file.writelines(read_file.readlines()) diff --git a/tests/data/preview/return_annotation_brackets.py b/tests/data/preview/return_annotation_brackets.py new file mode 100644 index 0000000..27760bd --- /dev/null +++ b/tests/data/preview/return_annotation_brackets.py @@ -0,0 +1,222 @@ +# Control +def double(a: int) -> int: + return 2*a + +# Remove the brackets +def double(a: int) -> (int): + return 2*a + +# Some newline variations +def double(a: int) -> ( + int): + return 2*a + +def double(a: int) -> (int +): + return 2*a + +def double(a: int) -> ( + int +): + return 2*a + +# Don't lose the comments +def double(a: int) -> ( # Hello + int +): + return 2*a + +def double(a: int) -> ( + int # Hello +): + return 2*a + +# Really long annotations +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + +def foo() -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo() -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo(a: int, b: int, c: int,) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +def foo(a: int, b: int, c: int,) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + +# Split args but no need to split return +def foo(a: int, b: int, c: int,) -> int: + return 2 + +# Deeply nested brackets +# with *interesting* spacing +def double(a: int) -> (((((int))))): + return 2*a + +def double(a: int) -> ( + ( ( + ((int) + ) + ) + ) + ): + return 2*a + +def foo() -> ( + ( ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +) +)): + return 2 + +# Return type with commas +def foo() -> ( + tuple[int, int, int] +): + return 2 + +def foo() -> tuple[loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong]: + return 2 + +# Magic trailing comma example +def foo() -> tuple[int, int, int,]: + return 2 + +# Long string example +def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]": + pass + +# output +# Control +def double(a: int) -> int: + return 2 * a + + +# Remove the brackets +def double(a: int) -> int: + return 2 * a + + +# Some newline variations +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +# Don't lose the comments +def double(a: int) -> int: # Hello + return 2 * a + + +def double(a: int) -> int: # Hello + return 2 * a + + +# Really long annotations +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds + | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +def foo( + a: int, + b: int, + c: int, +) -> intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds: + return 2 + + +def foo( + a: int, + b: int, + c: int, +) -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds + | intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +# Split args but no need to split return +def foo( + a: int, + b: int, + c: int, +) -> int: + return 2 + + +# Deeply nested brackets +# with *interesting* spacing +def double(a: int) -> int: + return 2 * a + + +def double(a: int) -> int: + return 2 * a + + +def foo() -> ( + intsdfsafafafdfdsasdfsfsdfasdfafdsafdfdsfasdskdsdsfdsafdsafsdfdasfffsfdsfdsafafhdskfhdsfjdslkfdlfsdkjhsdfjkdshfkljds +): + return 2 + + +# Return type with commas +def foo() -> tuple[int, int, int]: + return 2 + + +def foo() -> ( + tuple[ + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong, + ] +): + return 2 + + +# Magic trailing comma example +def foo() -> ( + tuple[ + int, + int, + int, + ] +): + return 2 + + +# Long string example +def frobnicate() -> ( + "ThisIsTrulyUnreasonablyExtremelyLongClassName |" + " list[ThisIsTrulyUnreasonablyExtremelyLongClassName]" +): + pass diff --git a/tests/data/preview/skip_magic_trailing_comma.py b/tests/data/preview/skip_magic_trailing_comma.py new file mode 100644 index 0000000..e98174a --- /dev/null +++ b/tests/data/preview/skip_magic_trailing_comma.py @@ -0,0 +1,34 @@ +# We should not remove the trailing comma in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# But commas in multiple element subscripts should be removed. +c: tuple[int, int,] +d = tuple[int, int,] + +# Remove commas for non-subscripts. +small_list = [1,] +list_of_types = [tuple[int,],] +small_set = {1,} +set_of_types = {tuple[int,],} + +# Except single element tuples +small_tuple = (1,) + +# output +# We should not remove the trailing comma in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# But commas in multiple element subscripts should be removed. +c: tuple[int, int] +d = tuple[int, int] + +# Remove commas for non-subscripts. +small_list = [1] +list_of_types = [tuple[int,]] +small_set = {1} +set_of_types = {tuple[int,]} + +# Except single element tuples +small_tuple = (1,) diff --git a/tests/data/preview_310/remove_newline_after_match.py b/tests/data/preview_310/remove_newline_after_match.py new file mode 100644 index 0000000..f7bcfbf --- /dev/null +++ b/tests/data/preview_310/remove_newline_after_match.py @@ -0,0 +1,34 @@ +def http_status(status): + + match status: + + case 400: + + return "Bad request" + + case 401: + + return "Unauthorized" + + case 403: + + return "Forbidden" + + case 404: + + return "Not found" + +# output +def http_status(status): + match status: + case 400: + return "Bad request" + + case 401: + return "Unauthorized" + + case 403: + return "Forbidden" + + case 404: + return "Not found" \ No newline at end of file diff --git a/tests/data/preview_39/remove_with_brackets.py b/tests/data/preview_39/remove_with_brackets.py new file mode 100644 index 0000000..ea58ab9 --- /dev/null +++ b/tests/data/preview_39/remove_with_brackets.py @@ -0,0 +1,119 @@ +with (open("bla.txt")): + pass + +with (open("bla.txt")), (open("bla.txt")): + pass + +with (open("bla.txt") as f): + pass + +# Remove brackets within alias expression +with (open("bla.txt")) as f: + pass + +# Remove brackets around one-line context managers +with (open("bla.txt") as f, (open("x"))): + pass + +with ((open("bla.txt")) as f, open("x")): + pass + +with (CtxManager1() as example1, CtxManager2() as example2): + ... + +# Brackets remain when using magic comma +with (CtxManager1() as example1, CtxManager2() as example2,): + ... + +# Brackets remain for multi-line context managers +with (CtxManager1() as example1, CtxManager2() as example2, CtxManager2() as example2, CtxManager2() as example2, CtxManager2() as example2): + ... + +# Don't touch assignment expressions +with (y := open("./test.py")) as f: + pass + +# Deeply nested examples +# N.B. Multiple brackets are only possible +# around the context manager itself. +# Only one brackets is allowed around the +# alias expression or comma-delimited context managers. +with (((open("bla.txt")))): + pass + +with (((open("bla.txt")))), (((open("bla.txt")))): + pass + +with (((open("bla.txt")))) as f: + pass + +with ((((open("bla.txt")))) as f): + pass + +with ((((CtxManager1()))) as example1, (((CtxManager2()))) as example2): + ... + +# output +with open("bla.txt"): + pass + +with open("bla.txt"), open("bla.txt"): + pass + +with open("bla.txt") as f: + pass + +# Remove brackets within alias expression +with open("bla.txt") as f: + pass + +# Remove brackets around one-line context managers +with open("bla.txt") as f, open("x"): + pass + +with open("bla.txt") as f, open("x"): + pass + +with CtxManager1() as example1, CtxManager2() as example2: + ... + +# Brackets remain when using magic comma +with ( + CtxManager1() as example1, + CtxManager2() as example2, +): + ... + +# Brackets remain for multi-line context managers +with ( + CtxManager1() as example1, + CtxManager2() as example2, + CtxManager2() as example2, + CtxManager2() as example2, + CtxManager2() as example2, +): + ... + +# Don't touch assignment expressions +with (y := open("./test.py")) as f: + pass + +# Deeply nested examples +# N.B. Multiple brackets are only possible +# around the context manager itself. +# Only one brackets is allowed around the +# alias expression or comma-delimited context managers. +with open("bla.txt"): + pass + +with open("bla.txt"), open("bla.txt"): + pass + +with open("bla.txt") as f: + pass + +with open("bla.txt") as f: + pass + +with CtxManager1() as example1, CtxManager2() as example2: + ... diff --git a/tests/data/py_310/parenthesized_context_managers.py b/tests/data/py_310/parenthesized_context_managers.py new file mode 100644 index 0000000..ccf1f94 --- /dev/null +++ b/tests/data/py_310/parenthesized_context_managers.py @@ -0,0 +1,21 @@ +with (CtxManager() as example): + ... + +with (CtxManager1(), CtxManager2()): + ... + +with (CtxManager1() as example, CtxManager2()): + ... + +with (CtxManager1(), CtxManager2() as example): + ... + +with (CtxManager1() as example1, CtxManager2() as example2): + ... + +with ( + CtxManager1() as example1, + CtxManager2() as example2, + CtxManager3() as example3, +): + ... diff --git a/tests/data/py_310/pattern_matching_complex.py b/tests/data/py_310/pattern_matching_complex.py new file mode 100644 index 0000000..97ee194 --- /dev/null +++ b/tests/data/py_310/pattern_matching_complex.py @@ -0,0 +1,144 @@ +# Cases sampled from Lib/test/test_patma.py + +# case black_test_patma_098 +match x: + case -0j: + y = 0 +# case black_test_patma_142 +match x: + case bytes(z): + y = 0 +# case black_test_patma_073 +match x: + case 0 if 0: + y = 0 + case 0 if 1: + y = 1 +# case black_test_patma_006 +match 3: + case 0 | 1 | 2 | 3: + x = True +# case black_test_patma_049 +match x: + case [0, 1] | [1, 0]: + y = 0 +# case black_check_sequence_then_mapping +match x: + case [*_]: + return "seq" + case {}: + return "map" +# case black_test_patma_035 +match x: + case {0: [1, 2, {}]}: + y = 0 + case {0: [1, 2, {}] | True} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}: + y = 1 + case []: + y = 2 +# case black_test_patma_107 +match x: + case 0.25 + 1.75j: + y = 0 +# case black_test_patma_097 +match x: + case -0j: + y = 0 +# case black_test_patma_007 +match 4: + case 0 | 1 | 2 | 3: + x = True +# case black_test_patma_154 +match x: + case 0 if x: + y = 0 +# case black_test_patma_134 +match x: + case {1: 0}: + y = 0 + case {0: 0}: + y = 1 + case {**z}: + y = 2 +# case black_test_patma_185 +match Seq(): + case [*_]: + y = 0 +# case black_test_patma_063 +match x: + case 1: + y = 0 + case 1: + y = 1 +# case black_test_patma_248 +match x: + case {"foo": bar}: + y = bar +# case black_test_patma_019 +match (0, 1, 2): + case [0, 1, *x, 2]: + y = 0 +# case black_test_patma_052 +match x: + case [0]: + y = 0 + case [1, 0] if (x := x[:0]): + y = 1 + case [1, 0]: + y = 2 +# case black_test_patma_191 +match w: + case [x, y, *_]: + z = 0 +# case black_test_patma_110 +match x: + case -0.25 - 1.75j: + y = 0 +# case black_test_patma_151 +match (x,): + case [y]: + z = 0 +# case black_test_patma_114 +match x: + case A.B.C.D: + y = 0 +# case black_test_patma_232 +match x: + case None: + y = 0 +# case black_test_patma_058 +match x: + case 0: + y = 0 +# case black_test_patma_233 +match x: + case False: + y = 0 +# case black_test_patma_078 +match x: + case []: + y = 0 + case [""]: + y = 1 + case "": + y = 2 +# case black_test_patma_156 +match x: + case z: + y = 0 +# case black_test_patma_189 +match w: + case [x, y, *rest]: + z = 0 +# case black_test_patma_042 +match x: + case (0 as z) | (1 as z) | (2 as z) if z == x % 2: + y = 0 +# case black_test_patma_034 +match x: + case {0: [1, 2, {}]}: + y = 0 + case {0: [1, 2, {}] | False} | {1: [[]]} | {0: [1, 2, {}]} | [] | "X" | {}: + y = 1 + case []: + y = 2 diff --git a/tests/data/py_310/pattern_matching_extras.py b/tests/data/py_310/pattern_matching_extras.py new file mode 100644 index 0000000..9f6907f --- /dev/null +++ b/tests/data/py_310/pattern_matching_extras.py @@ -0,0 +1,119 @@ +import match + +match something: + case [a as b]: + print(b) + case [a as b, c, d, e as f]: + print(f) + case Point(a as b): + print(b) + case Point(int() as x, int() as y): + print(x, y) + + +match = 1 +case: int = re.match(something) + +match re.match(case): + case type("match", match): + pass + case match: + pass + + +def func(match: case, case: match) -> case: + match Something(): + case func(match, case): + ... + case another: + ... + + +match maybe, multiple: + case perhaps, 5: + pass + case perhaps, 6,: + pass + + +match more := (than, one), indeed,: + case _, (5, 6): + pass + case [[5], (6)], [7],: + pass + case _: + pass + + +match a, *b, c: + case [*_]: + assert "seq" == _ + case {}: + assert "map" == b + + +match match( + case, + match( + match, case, match, looooooooooooooooooooooooooooooooooooong, match, case, match + ), + case, +): + case case( + match=case, + case=re.match( + loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong + ), + ): + pass + + case [a as match]: + pass + + case case: + pass + + +match match: + case case: + pass + + +match a, *b(), c: + case d, *f, g: + pass + + +match something: + case { + "key": key as key_1, + "password": PASS.ONE | PASS.TWO | PASS.THREE as password, + }: + pass + case {"maybe": something(complicated as this) as that}: + pass + + +match something: + case 1 as a: + pass + + case 2 as b, 3 as c: + pass + + case 4 as d, (5 as e), (6 | 7 as g), *h: + pass + + +match bar1: + case Foo(aa=Callable() as aa, bb=int()): + print(bar1.aa, bar1.bb) + case _: + print("no match", "\n") + + +match bar1: + case Foo( + normal=x, perhaps=[list, {an: d, dict: 1.0}] as y, otherwise=something, q=t as u + ): + pass diff --git a/tests/data/py_310/pattern_matching_generic.py b/tests/data/py_310/pattern_matching_generic.py new file mode 100644 index 0000000..00a0e4a --- /dev/null +++ b/tests/data/py_310/pattern_matching_generic.py @@ -0,0 +1,107 @@ +re.match() +match = a +with match() as match: + match = f"{match}" + +re.match() +match = a +with match() as match: + match = f"{match}" + + +def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: + if not target_versions: + # No target_version specified, so try all grammars. + return [ + # Python 3.7+ + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, + # Python 3.0-3.6 + pygram.python_grammar_no_print_statement_no_exec_statement, + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + match match: + case case: + match match: + case case: + pass + + if all(version.is_python2() for version in target_versions): + # Python 2-only code, so try Python 2 grammars. + return [ + # Python 2.7 with future print_function import + pygram.python_grammar_no_print_statement, + # Python 2.7 + pygram.python_grammar, + ] + + re.match() + match = a + with match() as match: + match = f"{match}" + + def test_patma_139(self): + x = False + match x: + case bool(z): + y = 0 + self.assertIs(x, False) + self.assertEqual(y, 0) + self.assertIs(z, x) + + # Python 3-compatible code, so only try Python 3 grammar. + grammars = [] + if supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.10+ + grammars.append(pygram.python_grammar_soft_keywords) + # If we have to parse both, try to parse async as a keyword first + if not supports_feature( + target_versions, Feature.ASYNC_IDENTIFIERS + ) and not supports_feature(target_versions, Feature.PATTERN_MATCHING): + # Python 3.7-3.9 + grammars.append( + pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords + ) + if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): + # Python 3.0-3.6 + grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) + + def test_patma_155(self): + x = 0 + y = None + match x: + case 1e1000: + y = 0 + self.assertEqual(x, 0) + self.assertIs(y, None) + + x = range(3) + match x: + case [y, case as x, z]: + w = 0 + + # At least one of the above branches must have been taken, because every Python + # version has exactly one of the two 'ASYNC_*' flags + return grammars + + +def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: + """Given a string with source, return the lib2to3 Node.""" + if not src_txt.endswith("\n"): + src_txt += "\n" + + grammars = get_grammars(set(target_versions)) + + +re.match() +match = a +with match() as match: + match = f"{match}" + +re.match() +match = a +with match() as match: + match = f"{match}" diff --git a/tests/data/py_310/pattern_matching_simple.py b/tests/data/py_310/pattern_matching_simple.py new file mode 100644 index 0000000..5ed6241 --- /dev/null +++ b/tests/data/py_310/pattern_matching_simple.py @@ -0,0 +1,92 @@ +# Cases sampled from PEP 636 examples + +match command.split(): + case [action, obj]: + ... # interpret action, obj + +match command.split(): + case [action]: + ... # interpret single-verb action + case [action, obj]: + ... # interpret action, obj + +match command.split(): + case ["quit"]: + print("Goodbye!") + quit_game() + case ["look"]: + current_room.describe() + case ["get", obj]: + character.get(obj, current_room) + case ["go", direction]: + current_room = current_room.neighbor(direction) + # The rest of your commands go here + +match command.split(): + case ["drop", *objects]: + for obj in objects: + character.drop(obj, current_room) + # The rest of your commands go here + +match command.split(): + case ["quit"]: + pass + case ["go", direction]: + print("Going:", direction) + case ["drop", *objects]: + print("Dropping: ", *objects) + case _: + print(f"Sorry, I couldn't understand {command!r}") + +match command.split(): + case ["north"] | ["go", "north"]: + current_room = current_room.neighbor("north") + case ["get", obj] | ["pick", "up", obj] | ["pick", obj, "up"]: + ... # Code for picking up the given object + +match command.split(): + case ["go", ("north" | "south" | "east" | "west")]: + current_room = current_room.neighbor(...) + # how do I know which direction to go? + +match command.split(): + case ["go", ("north" | "south" | "east" | "west") as direction]: + current_room = current_room.neighbor(direction) + +match command.split(): + case ["go", direction] if direction in current_room.exits: + current_room = current_room.neighbor(direction) + case ["go", _]: + print("Sorry, you can't go that way") + +match event.get(): + case Click(position=(x, y)): + handle_click_at(x, y) + case KeyPress(key_name="Q") | Quit(): + game.quit() + case KeyPress(key_name="up arrow"): + game.go_north() + case KeyPress(): + pass # Ignore other keystrokes + case other_event: + raise ValueError(f"Unrecognized event: {other_event}") + +match event.get(): + case Click((x, y), button=Button.LEFT): # This is a left click + handle_click_at(x, y) + case Click(): + pass # ignore other clicks + + +def where_is(point): + match point: + case Point(x=0, y=0): + print("Origin") + case Point(x=0, y=y): + print(f"Y={y}") + case Point(x=x, y=0): + print(f"X={x}") + case Point(): + print("Somewhere else") + case _: + print("Not a point") diff --git a/tests/data/py_310/pattern_matching_style.py b/tests/data/py_310/pattern_matching_style.py new file mode 100644 index 0000000..8e18ce2 --- /dev/null +++ b/tests/data/py_310/pattern_matching_style.py @@ -0,0 +1,91 @@ +match something: + case b(): print(1+1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=- 1 + ): print(1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1, + ): print(2) + case a: pass + +match( + arg # comment +) + +match( +) + +match( + + +) + +case( + arg # comment +) + +case( +) + +case( + + +) + + +re.match( + something # fast +) +re.match( + + + +) +match match( + + +): + case case( + arg, # comment + ): + pass + +# output + +match something: + case b(): + print(1 + 1) + case c( + very_complex=True, perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1 + ): + print(1) + case c( + very_complex=True, + perhaps_even_loooooooooooooooooooooooooooooooooooooong=-1, + ): + print(2) + case a: + pass + +match(arg) # comment + +match() + +match() + +case(arg) # comment + +case() + +case() + + +re.match(something) # fast +re.match() +match match(): + case case( + arg, # comment + ): + pass diff --git a/tests/data/py_310/pep_572_py310.py b/tests/data/py_310/pep_572_py310.py new file mode 100644 index 0000000..2aef589 --- /dev/null +++ b/tests/data/py_310/pep_572_py310.py @@ -0,0 +1,4 @@ +# Unparenthesized walruses are now allowed in indices since Python 3.10. +x[a:=0] +x[a:=0, b:=1] +x[5, b:=0] diff --git a/tests/data/py_310/starred_for_target.py b/tests/data/py_310/starred_for_target.py new file mode 100644 index 0000000..8fc8e05 --- /dev/null +++ b/tests/data/py_310/starred_for_target.py @@ -0,0 +1,27 @@ +for x in *a, *b: + print(x) + +for x in a, b, *c: + print(x) + +for x in *a, b, c: + print(x) + +for x in *a, b, *c: + print(x) + +async for x in *a, *b: + print(x) + +async for x in *a, b, *c: + print(x) + +async for x in a, b, *c: + print(x) + +async for x in ( + *loooooooooooooooooooooong, + very, + *loooooooooooooooooooooooooooooooooooooooooooooooong, +): + print(x) diff --git a/tests/data/py_311/pep_646.py b/tests/data/py_311/pep_646.py new file mode 100644 index 0000000..e843ecf --- /dev/null +++ b/tests/data/py_311/pep_646.py @@ -0,0 +1,194 @@ +A[*b] +A[*b] = 1 +A +del A[*b] +A +A[*b, *b] +A[*b, *b] = 1 +A +del A[*b, *b] +A +A[b, *b] +A[b, *b] = 1 +A +del A[b, *b] +A +A[*b, b] +A[*b, b] = 1 +A +del A[*b, b] +A +A[b, b, *b] +A[b, b, *b] = 1 +A +del A[b, b, *b] +A +A[*b, b, b] +A[*b, b, b] = 1 +A +del A[*b, b, b] +A +A[b, *b, b] +A[b, *b, b] = 1 +A +del A[b, *b, b] +A +A[b, b, *b, b] +A[b, b, *b, b] = 1 +A +del A[b, b, *b, b] +A +A[b, *b, b, b] +A[b, *b, b, b] = 1 +A +del A[b, *b, b, b] +A +A[A[b, *b, b]] +A[A[b, *b, b]] = 1 +A +del A[A[b, *b, b]] +A +A[*A[b, *b, b]] +A[*A[b, *b, b]] = 1 +A +del A[*A[b, *b, b]] +A +A[b, ...] +A[b, ...] = 1 +A +del A[b, ...] +A +A[*A[b, ...]] +A[*A[b, ...]] = 1 +A +del A[*A[b, ...]] +A +l = [1, 2, 3] +A[*l] +A[*l] = 1 +A +del A[*l] +A +A[*l, 4] +A[*l, 4] = 1 +A +del A[*l, 4] +A +A[0, *l] +A[0, *l] = 1 +A +del A[0, *l] +A +A[1:2, *l] +A[1:2, *l] = 1 +A +del A[1:2, *l] +A +repr(A[1:2, *l]) == repr(A[1:2, 1, 2, 3]) +t = (1, 2, 3) +A[*t] +A[*t] = 1 +A +del A[*t] +A +A[*t, 4] +A[*t, 4] = 1 +A +del A[*t, 4] +A +A[0, *t] +A[0, *t] = 1 +A +del A[0, *t] +A +A[1:2, *t] +A[1:2, *t] = 1 +A +del A[1:2, *t] +A +repr(A[1:2, *t]) == repr(A[1:2, 1, 2, 3]) + + +def returns_list(): + return [1, 2, 3] + + +A[returns_list()] +A[returns_list()] = 1 +A +del A[returns_list()] +A +A[returns_list(), 4] +A[returns_list(), 4] = 1 +A +del A[returns_list(), 4] +A +A[*returns_list()] +A[*returns_list()] = 1 +A +del A[*returns_list()] +A +A[*returns_list(), 4] +A[*returns_list(), 4] = 1 +A +del A[*returns_list(), 4] +A +A[0, *returns_list()] +A[0, *returns_list()] = 1 +A +del A[0, *returns_list()] +A +A[*returns_list(), *returns_list()] +A[*returns_list(), *returns_list()] = 1 +A +del A[*returns_list(), *returns_list()] +A +A[1:2, *b] +A[*b, 1:2] +A[1:2, *b, 1:2] +A[*b, 1:2, *b] +A[1:, *b] +A[*b, 1:] +A[1:, *b, 1:] +A[*b, 1:, *b] +A[:1, *b] +A[*b, :1] +A[:1, *b, :1] +A[*b, :1, *b] +A[:, *b] +A[*b, :] +A[:, *b, :] +A[*b, :, *b] +A[a * b()] +A[a * b(), *c, *d(), e * f(g * h)] +A[a * b(), :] +A[a * b(), *c, *d(), e * f(g * h) :] +A[[b] * len(c), :] + + +def f1(*args: *b): + pass + + +f1.__annotations__ + + +def f2(*args: *b, arg1): + pass + + +f2.__annotations__ + + +def f3(*args: *b, arg1: int): + pass + + +f3.__annotations__ + + +def f4(*args: *b, arg1: int = 2): + pass + + +f4.__annotations__ diff --git a/tests/data/py_311/pep_654.py b/tests/data/py_311/pep_654.py new file mode 100644 index 0000000..387c081 --- /dev/null +++ b/tests/data/py_311/pep_654.py @@ -0,0 +1,53 @@ +try: + raise OSError("blah") +except* ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except* ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except* Exception: + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except* TypeError as e: + tes = e + raise + except* ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except* (TypeError, ValueError) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except* OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e diff --git a/tests/data/py_311/pep_654_style.py b/tests/data/py_311/pep_654_style.py new file mode 100644 index 0000000..568e5e3 --- /dev/null +++ b/tests/data/py_311/pep_654_style.py @@ -0,0 +1,111 @@ +try: + raise OSError("blah") +except * ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except *ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except *(Exception): + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except \ + *TypeError as e: + tes = e + raise + except * ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except *(TypeError, ValueError, *OTHER_EXCEPTIONS) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except\ + * OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e + +# output + +try: + raise OSError("blah") +except* ExceptionGroup as e: + pass + + +try: + async with trio.open_nursery() as nursery: + # Make two concurrent calls to child() + nursery.start_soon(child) + nursery.start_soon(child) +except* ValueError: + pass + +try: + try: + raise ValueError(42) + except: + try: + raise TypeError(int) + except* (Exception): + pass + 1 / 0 +except Exception as e: + exc = e + +try: + try: + raise FalsyEG("eg", [TypeError(1), ValueError(2)]) + except* TypeError as e: + tes = e + raise + except* ValueError as e: + ves = e + pass +except Exception as e: + exc = e + +try: + try: + raise orig + except* (TypeError, ValueError, *OTHER_EXCEPTIONS) as e: + raise SyntaxError(3) from e +except BaseException as e: + exc = e + +try: + try: + raise orig + except* OSError as e: + raise TypeError(3) from e +except ExceptionGroup as e: + exc = e diff --git a/tests/data/numeric_literals.py b/tests/data/py_36/numeric_literals.py similarity index 100% rename from tests/data/numeric_literals.py rename to tests/data/py_36/numeric_literals.py diff --git a/tests/data/numeric_literals_skip_underscores.py b/tests/data/py_36/numeric_literals_skip_underscores.py similarity index 100% rename from tests/data/numeric_literals_skip_underscores.py rename to tests/data/py_36/numeric_literals_skip_underscores.py diff --git a/tests/data/python37.py b/tests/data/py_37/python37.py similarity index 100% rename from tests/data/python37.py rename to tests/data/py_37/python37.py diff --git a/tests/data/pep_570.py b/tests/data/py_38/pep_570.py similarity index 100% rename from tests/data/pep_570.py rename to tests/data/py_38/pep_570.py diff --git a/tests/data/pep_572.py b/tests/data/py_38/pep_572.py similarity index 97% rename from tests/data/pep_572.py rename to tests/data/py_38/pep_572.py index c6867f2..d41805f 100644 --- a/tests/data/pep_572.py +++ b/tests/data/py_38/pep_572.py @@ -4,7 +4,7 @@ pass if match := pattern.search(data): pass -[y := f(x), y ** 2, y ** 3] +[y := f(x), y**2, y**3] filtered_data = [y for x in data if (y := f(x)) is None] (y := f(x)) y0 = (y1 := f(x)) diff --git a/tests/data/pep_572_remove_parens.py b/tests/data/py_38/pep_572_remove_parens.py similarity index 100% rename from tests/data/pep_572_remove_parens.py rename to tests/data/py_38/pep_572_remove_parens.py diff --git a/tests/data/python38.py b/tests/data/py_38/python38.py similarity index 100% rename from tests/data/python38.py rename to tests/data/py_38/python38.py diff --git a/tests/data/py_39/pep_572_py39.py b/tests/data/py_39/pep_572_py39.py new file mode 100644 index 0000000..b8b081b --- /dev/null +++ b/tests/data/py_39/pep_572_py39.py @@ -0,0 +1,7 @@ +# Unparenthesized walruses are now allowed in set literals & set comprehensions +# since Python 3.9 +{x := 1, 2, 3} +{x4 := x**5 for x in range(7)} +# We better not remove the parentheses here (since it's a 3.10 feature) +x[(a := 1)] +x[(a := 1), (b := 3)] diff --git a/tests/data/python39.py b/tests/data/py_39/python39.py similarity index 100% rename from tests/data/python39.py rename to tests/data/py_39/python39.py diff --git a/tests/data/python2.py b/tests/data/python2.py deleted file mode 100644 index 4a22f46..0000000 --- a/tests/data/python2.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python2 - -import sys - -print >> sys.stderr , "Warning:" , -print >> sys.stderr , "this is a blast from the past." -print >> sys.stderr , "Look, a repr:", `sys` - - -def function((_globals, _locals)): - exec ur"print 'hi from exec!'" in _globals, _locals - - -function((globals(), locals())) - - -# output - - -#!/usr/bin/env python2 - -import sys - -print >>sys.stderr, "Warning:", -print >>sys.stderr, "this is a blast from the past." -print >>sys.stderr, "Look, a repr:", ` sys ` - - -def function((_globals, _locals)): - exec ur"print 'hi from exec!'" in _globals, _locals - - -function((globals(), locals())) diff --git a/tests/data/python2_print_function.py b/tests/data/python2_print_function.py deleted file mode 100755 index 81b8d8a..0000000 --- a/tests/data/python2_print_function.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python2 -from __future__ import print_function - -print('hello') -print(u'hello') -print(a, file=sys.stderr) - -# output - - -#!/usr/bin/env python2 -from __future__ import print_function - -print("hello") -print(u"hello") -print(a, file=sys.stderr) diff --git a/tests/data/python2_unicode_literals.py b/tests/data/python2_unicode_literals.py deleted file mode 100644 index 2fe7039..0000000 --- a/tests/data/python2_unicode_literals.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python2 -from __future__ import unicode_literals as _unicode_literals -from __future__ import absolute_import -from __future__ import print_function as lol, with_function - -u'hello' -U"hello" -Ur"hello" - -# output - - -#!/usr/bin/env python2 -from __future__ import unicode_literals as _unicode_literals -from __future__ import absolute_import -from __future__ import print_function as lol, with_function - -"hello" -"hello" -r"hello" diff --git a/tests/data/simple_cases/attribute_access_on_number_literals.py b/tests/data/simple_cases/attribute_access_on_number_literals.py new file mode 100644 index 0000000..7c16bdf --- /dev/null +++ b/tests/data/simple_cases/attribute_access_on_number_literals.py @@ -0,0 +1,47 @@ +x = 123456789 .bit_count() +x = (123456).__abs__() +x = .1.is_integer() +x = 1. .imag +x = 1E+1.imag +x = 1E-1.real +x = 123456789.123456789.hex() +x = 123456789.123456789E123456789 .real +x = 123456789E123456789 .conjugate() +x = 123456789J.real +x = 123456789.123456789J.__add__(0b1011.bit_length()) +x = 0XB1ACC.conjugate() +x = 0B1011 .conjugate() +x = 0O777 .real +x = 0.000000006 .hex() +x = -100.0000J + +if 10 .real: + ... + +y = 100[no] +y = 100(no) + +# output + +x = (123456789).bit_count() +x = (123456).__abs__() +x = (0.1).is_integer() +x = (1.0).imag +x = (1e1).imag +x = (1e-1).real +x = (123456789.123456789).hex() +x = (123456789.123456789e123456789).real +x = (123456789e123456789).conjugate() +x = 123456789j.real +x = 123456789.123456789j.__add__(0b1011.bit_length()) +x = 0xB1ACC.conjugate() +x = 0b1011.conjugate() +x = 0o777.real +x = (0.000000006).hex() +x = -100.0000j + +if (10).real: + ... + +y = 100[no] +y = 100(no) diff --git a/tests/data/beginning_backslash.py b/tests/data/simple_cases/beginning_backslash.py similarity index 100% rename from tests/data/beginning_backslash.py rename to tests/data/simple_cases/beginning_backslash.py diff --git a/tests/data/bracketmatch.py b/tests/data/simple_cases/bracketmatch.py similarity index 100% rename from tests/data/bracketmatch.py rename to tests/data/simple_cases/bracketmatch.py diff --git a/tests/data/class_blank_parentheses.py b/tests/data/simple_cases/class_blank_parentheses.py similarity index 100% rename from tests/data/class_blank_parentheses.py rename to tests/data/simple_cases/class_blank_parentheses.py diff --git a/tests/data/class_methods_new_line.py b/tests/data/simple_cases/class_methods_new_line.py similarity index 100% rename from tests/data/class_methods_new_line.py rename to tests/data/simple_cases/class_methods_new_line.py diff --git a/tests/data/collections.py b/tests/data/simple_cases/collections.py similarity index 100% rename from tests/data/collections.py rename to tests/data/simple_cases/collections.py diff --git a/tests/data/comment_after_escaped_newline.py b/tests/data/simple_cases/comment_after_escaped_newline.py similarity index 100% rename from tests/data/comment_after_escaped_newline.py rename to tests/data/simple_cases/comment_after_escaped_newline.py diff --git a/tests/data/comments.py b/tests/data/simple_cases/comments.py similarity index 100% rename from tests/data/comments.py rename to tests/data/simple_cases/comments.py diff --git a/tests/data/comments2.py b/tests/data/simple_cases/comments2.py similarity index 98% rename from tests/data/comments2.py rename to tests/data/simple_cases/comments2.py index 221cb3f..4eea013 100644 --- a/tests/data/comments2.py +++ b/tests/data/simple_cases/comments2.py @@ -159,7 +159,7 @@ def _init_host(self, parsed) -> None: ####################### -instruction() +instruction()#comment with bad spacing # END COMMENTS # MORE END COMMENTS @@ -336,7 +336,7 @@ def _init_host(self, parsed) -> None: ####################### -instruction() +instruction() # comment with bad spacing # END COMMENTS # MORE END COMMENTS diff --git a/tests/data/comments3.py b/tests/data/simple_cases/comments3.py similarity index 100% rename from tests/data/comments3.py rename to tests/data/simple_cases/comments3.py diff --git a/tests/data/comments4.py b/tests/data/simple_cases/comments4.py similarity index 100% rename from tests/data/comments4.py rename to tests/data/simple_cases/comments4.py diff --git a/tests/data/comments5.py b/tests/data/simple_cases/comments5.py similarity index 100% rename from tests/data/comments5.py rename to tests/data/simple_cases/comments5.py diff --git a/tests/data/comments6.py b/tests/data/simple_cases/comments6.py similarity index 100% rename from tests/data/comments6.py rename to tests/data/simple_cases/comments6.py diff --git a/tests/data/comments_non_breaking_space.py b/tests/data/simple_cases/comments_non_breaking_space.py similarity index 100% rename from tests/data/comments_non_breaking_space.py rename to tests/data/simple_cases/comments_non_breaking_space.py diff --git a/tests/data/composition.py b/tests/data/simple_cases/composition.py similarity index 100% rename from tests/data/composition.py rename to tests/data/simple_cases/composition.py diff --git a/tests/data/composition_no_trailing_comma.py b/tests/data/simple_cases/composition_no_trailing_comma.py similarity index 100% rename from tests/data/composition_no_trailing_comma.py rename to tests/data/simple_cases/composition_no_trailing_comma.py diff --git a/tests/data/docstring.py b/tests/data/simple_cases/docstring.py similarity index 74% rename from tests/data/docstring.py rename to tests/data/simple_cases/docstring.py index e977619..f08bba5 100644 --- a/tests/data/docstring.py +++ b/tests/data/simple_cases/docstring.py @@ -102,7 +102,7 @@ def and_this(): "hey yah"''' -def empty(): +def multiline_whitespace(): ''' @@ -111,11 +111,11 @@ def empty(): ''' -def oneline_empty(): +def oneline_whitespace(): ''' ''' -def oneline_nothing(): +def empty(): """""" @@ -188,6 +188,34 @@ def my_god_its_full_of_stars_2(): "I'm sorry Dave " +def docstring_almost_at_line_limit(): + """long docstring.................................................................""" + + +def docstring_almost_at_line_limit2(): + """long docstring................................................................. + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def stable_quote_normalization_with_immediate_inner_single_quote(self): + '''' + + + ''' + + # output class MyClass: @@ -293,16 +321,16 @@ def and_this(): "hey yah"''' -def empty(): +def multiline_whitespace(): """ """ -def oneline_empty(): +def oneline_whitespace(): """ """ -def oneline_nothing(): - """ """ +def empty(): + """""" def single_quotes(): @@ -374,4 +402,32 @@ def my_god_its_full_of_stars_1(): # the space below is actually a \u2001, removed in output def my_god_its_full_of_stars_2(): - "I'm sorry Dave" \ No newline at end of file + "I'm sorry Dave" + + +def docstring_almost_at_line_limit(): + """long docstring.................................................................""" + + +def docstring_almost_at_line_limit2(): + """long docstring................................................................. + + .................................................................................. + """ + + +def docstring_at_line_limit(): + """long docstring................................................................""" + + +def multiline_docstring_at_line_limit(): + """first line----------------------------------------------------------------------- + + second line----------------------------------------------------------------------""" + + +def stable_quote_normalization_with_immediate_inner_single_quote(self): + """' + + + """ diff --git a/tests/data/empty_lines.py b/tests/data/simple_cases/empty_lines.py similarity index 100% rename from tests/data/empty_lines.py rename to tests/data/simple_cases/empty_lines.py diff --git a/tests/data/expression.diff b/tests/data/simple_cases/expression.diff similarity index 91% rename from tests/data/expression.diff rename to tests/data/simple_cases/expression.diff index 721a07d..2eaaeb4 100644 --- a/tests/data/expression.diff +++ b/tests/data/simple_cases/expression.diff @@ -11,7 +11,17 @@ True False 1 -@@ -29,63 +29,96 @@ +@@ -21,99 +21,135 @@ + Name1 or (Name2 and Name3) or Name4 + Name1 or Name2 and Name3 or Name4 + v1 << 2 + 1 >> v2 + 1 % finished +-1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 +-((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) ++1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 ++((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) + not great ~great +value -1 @@ -19,7 +29,7 @@ (~int) and (not ((v1 ^ (123 + v2)) | True)) -+really ** -confusing ** ~operator ** -precedence -flags & ~ select.EPOLLIN and waiters.write_task is not None -++(really ** -(confusing ** ~(operator ** -precedence))) +++(really ** -(confusing ** ~(operator**-precedence))) +flags & ~select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda a=True: a @@ -88,15 +98,19 @@ + *more, +] {i for i in (1, 2, 3)} - {(i ** 2) for i in (1, 2, 3)} +-{(i ** 2) for i in (1, 2, 3)} -{(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))} -+{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} - {((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +-{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} ++{(i**2) for i in (1, 2, 3)} ++{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} ++{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] - [(i ** 2) for i in (1, 2, 3)] +-[(i ** 2) for i in (1, 2, 3)] -[(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))] -+[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] - [((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +-[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] ++[(i**2) for i in (1, 2, 3)] ++[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] ++[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} -{i: j for i, j in ((1, 'a'), (2, 'b'), (3, 'c'))} +{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} @@ -130,8 +144,11 @@ call(**self.screen_kwargs) call(b, **self.screen_kwargs) lukasz.langa.pl -@@ -94,26 +127,29 @@ - 1.0 .real + call.me(maybe) +-1 .real +-1.0 .real ++(1).real ++(1.0).real ....__class__ list[str] dict[str, int] @@ -181,10 +198,12 @@ SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) - ((i ** 2) for i in (1, 2, 3)) +-((i ** 2) for i in (1, 2, 3)) -((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))) -+((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) - (((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +-(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) ++((i**2) for i in (1, 2, 3)) ++((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) ++(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) -{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs} +{ @@ -403,13 +422,13 @@ + return True +if ( + ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e -+ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n ++ | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n +): + return True +if ( + ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e + | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h -+ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n ++ ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n +): + return True +if ( @@ -419,7 +438,7 @@ + | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h + ^ aaaaaaaaaaaaaaaa.i + << aaaaaaaaaaaaaaaa.k -+ >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ++ >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n +): + return True +( diff --git a/tests/data/expression.py b/tests/data/simple_cases/expression.py similarity index 95% rename from tests/data/expression.py rename to tests/data/simple_cases/expression.py index d13450c..06096c5 100644 --- a/tests/data/expression.py +++ b/tests/data/simple_cases/expression.py @@ -282,15 +282,15 @@ async def f(): v1 << 2 1 >> v2 1 % finished -1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8 -((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8) +1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8 +((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8) not great ~great +value -1 ~int and not v1 ^ 123 + v2 | True (~int) and (not ((v1 ^ (123 + v2)) | True)) -+(really ** -(confusing ** ~(operator ** -precedence))) ++(really ** -(confusing ** ~(operator**-precedence))) flags & ~select.EPOLLIN and waiters.write_task is not None lambda arg: None lambda a=True: a @@ -347,13 +347,13 @@ async def f(): *more, ] {i for i in (1, 2, 3)} -{(i ** 2) for i in (1, 2, 3)} -{(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} -{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} +{(i**2) for i in (1, 2, 3)} +{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))} +{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)} [i for i in (1, 2, 3)] -[(i ** 2) for i in (1, 2, 3)] -[(i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] -[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] +[(i**2) for i in (1, 2, 3)] +[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))] +[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)] {i: 0 for i in (1, 2, 3)} {i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))} {a: b * 2 for a, b in dictionary.items()} @@ -382,8 +382,8 @@ async def f(): call(b, **self.screen_kwargs) lukasz.langa.pl call.me(maybe) -1 .real -1.0 .real +(1).real +(1.0).real ....__class__ list[str] dict[str, int] @@ -441,9 +441,9 @@ async def f(): SomeName (Good, Bad, Ugly) (i for i in (1, 2, 3)) -((i ** 2) for i in (1, 2, 3)) -((i ** 2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) -(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) +((i**2) for i in (1, 2, 3)) +((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))) +(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)) (*starred,) { "id": "1", @@ -588,13 +588,13 @@ async def f(): return True if ( ~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e - | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n + | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n ): return True if ( ~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h - ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n + ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n ): return True if ( @@ -604,7 +604,7 @@ async def f(): | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k - >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n + >> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n ): return True ( diff --git a/tests/data/fmtonoff.py b/tests/data/simple_cases/fmtonoff.py similarity index 100% rename from tests/data/fmtonoff.py rename to tests/data/simple_cases/fmtonoff.py diff --git a/tests/data/fmtonoff2.py b/tests/data/simple_cases/fmtonoff2.py similarity index 100% rename from tests/data/fmtonoff2.py rename to tests/data/simple_cases/fmtonoff2.py diff --git a/tests/data/fmtonoff3.py b/tests/data/simple_cases/fmtonoff3.py similarity index 100% rename from tests/data/fmtonoff3.py rename to tests/data/simple_cases/fmtonoff3.py diff --git a/tests/data/fmtonoff4.py b/tests/data/simple_cases/fmtonoff4.py similarity index 100% rename from tests/data/fmtonoff4.py rename to tests/data/simple_cases/fmtonoff4.py diff --git a/tests/data/simple_cases/fmtonoff5.py b/tests/data/simple_cases/fmtonoff5.py new file mode 100644 index 0000000..71b1381 --- /dev/null +++ b/tests/data/simple_cases/fmtonoff5.py @@ -0,0 +1,158 @@ +# Regression test for https://github.com/psf/black/issues/3129. +setup( + entry_points={ + # fmt: off + "console_scripts": [ + "foo-bar" + "=foo.bar.:main", + # fmt: on + ] # Includes an formatted indentation. + }, +) + + +# Regression test for https://github.com/psf/black/issues/2015. +run( + # fmt: off + [ + "ls", + "-la", + ] + # fmt: on + + path, + check=True, +) + + +# Regression test for https://github.com/psf/black/issues/3026. +def test_func(): + # yapf: disable + if unformatted( args ): + return True + # yapf: enable + elif b: + return True + + return False + + +# Regression test for https://github.com/psf/black/issues/2567. +if True: + # fmt: off + for _ in range( 1 ): + # fmt: on + print ( "This won't be formatted" ) + print ( "This won't be formatted either" ) +else: + print ( "This will be formatted" ) + + +# Regression test for https://github.com/psf/black/issues/3184. +class A: + async def call(param): + if param: + # fmt: off + if param[0:4] in ( + "ABCD", "EFGH" + ) : + # fmt: on + print ( "This won't be formatted" ) + + elif param[0:4] in ("ZZZZ",): + print ( "This won't be formatted either" ) + + print ( "This will be formatted" ) + + +# Regression test for https://github.com/psf/black/issues/2985 +class Named(t.Protocol): + # fmt: off + @property + def this_wont_be_formatted ( self ) -> str: ... + +class Factory(t.Protocol): + def this_will_be_formatted ( self, **kwargs ) -> Named: ... + # fmt: on + + +# output + + +# Regression test for https://github.com/psf/black/issues/3129. +setup( + entry_points={ + # fmt: off + "console_scripts": [ + "foo-bar" + "=foo.bar.:main", + # fmt: on + ] # Includes an formatted indentation. + }, +) + + +# Regression test for https://github.com/psf/black/issues/2015. +run( + # fmt: off + [ + "ls", + "-la", + ] + # fmt: on + + path, + check=True, +) + + +# Regression test for https://github.com/psf/black/issues/3026. +def test_func(): + # yapf: disable + if unformatted( args ): + return True + # yapf: enable + elif b: + return True + + return False + + +# Regression test for https://github.com/psf/black/issues/2567. +if True: + # fmt: off + for _ in range( 1 ): + # fmt: on + print ( "This won't be formatted" ) + print ( "This won't be formatted either" ) +else: + print("This will be formatted") + + +# Regression test for https://github.com/psf/black/issues/3184. +class A: + async def call(param): + if param: + # fmt: off + if param[0:4] in ( + "ABCD", "EFGH" + ) : + # fmt: on + print ( "This won't be formatted" ) + + elif param[0:4] in ("ZZZZ",): + print ( "This won't be formatted either" ) + + print("This will be formatted") + + +# Regression test for https://github.com/psf/black/issues/2985 +class Named(t.Protocol): + # fmt: off + @property + def this_wont_be_formatted ( self ) -> str: ... + + +class Factory(t.Protocol): + def this_will_be_formatted(self, **kwargs) -> Named: + ... + + # fmt: on diff --git a/tests/data/fmtskip.py b/tests/data/simple_cases/fmtskip.py similarity index 100% rename from tests/data/fmtskip.py rename to tests/data/simple_cases/fmtskip.py diff --git a/tests/data/fmtskip2.py b/tests/data/simple_cases/fmtskip2.py similarity index 100% rename from tests/data/fmtskip2.py rename to tests/data/simple_cases/fmtskip2.py diff --git a/tests/data/fmtskip3.py b/tests/data/simple_cases/fmtskip3.py similarity index 100% rename from tests/data/fmtskip3.py rename to tests/data/simple_cases/fmtskip3.py diff --git a/tests/data/fmtskip4.py b/tests/data/simple_cases/fmtskip4.py similarity index 100% rename from tests/data/fmtskip4.py rename to tests/data/simple_cases/fmtskip4.py diff --git a/tests/data/fmtskip5.py b/tests/data/simple_cases/fmtskip5.py similarity index 100% rename from tests/data/fmtskip5.py rename to tests/data/simple_cases/fmtskip5.py diff --git a/tests/data/simple_cases/fmtskip6.py b/tests/data/simple_cases/fmtskip6.py new file mode 100644 index 0000000..0a779fc --- /dev/null +++ b/tests/data/simple_cases/fmtskip6.py @@ -0,0 +1,13 @@ +class A: + def f(self): + for line in range(10): + if True: + pass # fmt: skip + +# output + +class A: + def f(self): + for line in range(10): + if True: + pass # fmt: skip diff --git a/tests/data/simple_cases/fmtskip7.py b/tests/data/simple_cases/fmtskip7.py new file mode 100644 index 0000000..15ac0ad --- /dev/null +++ b/tests/data/simple_cases/fmtskip7.py @@ -0,0 +1,11 @@ +a = "this is some code" +b = 5 #fmt:skip +c = 9 #fmt: skip +d = "thisisasuperlongstringthisisasuperlongstringthisisasuperlongstringthisisasuperlongstring" #fmt:skip + +# output + +a = "this is some code" +b = 5 # fmt:skip +c = 9 # fmt: skip +d = "thisisasuperlongstringthisisasuperlongstringthisisasuperlongstringthisisasuperlongstring" # fmt:skip diff --git a/tests/data/simple_cases/fmtskip8.py b/tests/data/simple_cases/fmtskip8.py new file mode 100644 index 0000000..38e9c2a --- /dev/null +++ b/tests/data/simple_cases/fmtskip8.py @@ -0,0 +1,62 @@ +# Make sure a leading comment is not removed. +def some_func( unformatted, args ): # fmt: skip + print("I am some_func") + return 0 + # Make sure this comment is not removed. + + +# Make sure a leading comment is not removed. +async def some_async_func( unformatted, args): # fmt: skip + print("I am some_async_func") + await asyncio.sleep(1) + + +# Make sure a leading comment is not removed. +class SomeClass( Unformatted, SuperClasses ): # fmt: skip + def some_method( self, unformatted, args ): # fmt: skip + print("I am some_method") + return 0 + + async def some_async_method( self, unformatted, args ): # fmt: skip + print("I am some_async_method") + await asyncio.sleep(1) + + +# Make sure a leading comment is not removed. +if unformatted_call( args ): # fmt: skip + print("First branch") + # Make sure this is not removed. +elif another_unformatted_call( args ): # fmt: skip + print("Second branch") +else : # fmt: skip + print("Last branch") + + +while some_condition( unformatted, args ): # fmt: skip + print("Do something") + + +for i in some_iter( unformatted, args ): # fmt: skip + print("Do something") + + +async def test_async_for(): + async for i in some_async_iter( unformatted, args ): # fmt: skip + print("Do something") + + +try : # fmt: skip + some_call() +except UnformattedError as ex: # fmt: skip + handle_exception() +finally : # fmt: skip + finally_call() + + +with give_me_context( unformatted, args ): # fmt: skip + print("Do something") + + +async def test_async_with(): + async with give_me_async_context( unformatted, args ): # fmt: skip + print("Do something") diff --git a/tests/data/fstring.py b/tests/data/simple_cases/fstring.py similarity index 100% rename from tests/data/fstring.py rename to tests/data/simple_cases/fstring.py diff --git a/tests/data/function.py b/tests/data/simple_cases/function.py similarity index 100% rename from tests/data/function.py rename to tests/data/simple_cases/function.py diff --git a/tests/data/function2.py b/tests/data/simple_cases/function2.py similarity index 52% rename from tests/data/function2.py rename to tests/data/simple_cases/function2.py index cfc259e..5bb36c2 100644 --- a/tests/data/function2.py +++ b/tests/data/simple_cases/function2.py @@ -23,6 +23,35 @@ def inner(): pass print("Inner defs should breathe a little.") + +if os.name == "posix": + import termios + def i_should_be_followed_by_only_one_newline(): + pass +elif os.name == "nt": + try: + import msvcrt + def i_should_be_followed_by_only_one_newline(): + pass + + except ImportError: + + def i_should_be_followed_by_only_one_newline(): + pass + +elif False: + + class IHopeYouAreHavingALovelyDay: + def __call__(self): + print("i_should_be_followed_by_only_one_newline") +else: + + def foo(): + pass + +with hmm_but_this_should_get_two_preceding_newlines(): + pass + # output def f( @@ -56,3 +85,37 @@ def inner(): pass print("Inner defs should breathe a little.") + + +if os.name == "posix": + import termios + + def i_should_be_followed_by_only_one_newline(): + pass + +elif os.name == "nt": + try: + import msvcrt + + def i_should_be_followed_by_only_one_newline(): + pass + + except ImportError: + + def i_should_be_followed_by_only_one_newline(): + pass + +elif False: + + class IHopeYouAreHavingALovelyDay: + def __call__(self): + print("i_should_be_followed_by_only_one_newline") + +else: + + def foo(): + pass + + +with hmm_but_this_should_get_two_preceding_newlines(): + pass diff --git a/tests/data/simple_cases/function_trailing_comma.py b/tests/data/simple_cases/function_trailing_comma.py new file mode 100644 index 0000000..429eb0e --- /dev/null +++ b/tests/data/simple_cases/function_trailing_comma.py @@ -0,0 +1,153 @@ +def f(a,): + d = {'key': 'value',} + tup = (1,) + +def f2(a,b,): + d = {'key': 'value', 'key2': 'value2',} + tup = (1,2,) + +def f(a:int=1,): + call(arg={'explode': 'this',}) + call2(arg=[1,2,3],) + x = { + "a": 1, + "b": 2, + }["a"] + if a == {"a": 1,"b": 2,"c": 3,"d": 4,"e": 5,"f": 6,"g": 7,"h": 8,}["a"]: + pass + +def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +]: + json = {"k": {"k2": {"k3": [1,]}}} + + + +# The type annotation shouldn't get a trailing comma since that would change its type. +# Relevant bug report: https://github.com/psf/black/issues/2381. +def some_function_with_a_really_long_name() -> ( + returning_a_deeply_nested_import_of_a_type_i_suppose +): + pass + + +def some_method_with_a_really_long_name(very_long_parameter_so_yeah: str, another_long_parameter: int) -> ( + another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not +): + pass + + +def func() -> ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(this_shouldn_t_get_a_trailing_comma_too) +): + pass + + +def func() -> ((also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + )) +): + pass + +# output + +def f( + a, +): + d = { + "key": "value", + } + tup = (1,) + + +def f2( + a, + b, +): + d = { + "key": "value", + "key2": "value2", + } + tup = ( + 1, + 2, + ) + + +def f( + a: int = 1, +): + call( + arg={ + "explode": "this", + } + ) + call2( + arg=[1, 2, 3], + ) + x = { + "a": 1, + "b": 2, + }["a"] + if ( + a + == { + "a": 1, + "b": 2, + "c": 3, + "d": 4, + "e": 5, + "f": 6, + "g": 7, + "h": 8, + }["a"] + ): + pass + + +def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[ + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +]: + json = { + "k": { + "k2": { + "k3": [ + 1, + ] + } + } + } + + +# The type annotation shouldn't get a trailing comma since that would change its type. +# Relevant bug report: https://github.com/psf/black/issues/2381. +def some_function_with_a_really_long_name() -> ( + returning_a_deeply_nested_import_of_a_type_i_suppose +): + pass + + +def some_method_with_a_really_long_name( + very_long_parameter_so_yeah: str, another_long_parameter: int +) -> ( + another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not +): + pass + + +def func() -> ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + ) +): + pass + + +def func() -> ( + ( + also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black( + this_shouldn_t_get_a_trailing_comma_too + ) + ) +): + pass diff --git a/tests/data/import_spacing.py b/tests/data/simple_cases/import_spacing.py similarity index 100% rename from tests/data/import_spacing.py rename to tests/data/simple_cases/import_spacing.py diff --git a/tests/data/simple_cases/power_op_spacing.py b/tests/data/simple_cases/power_op_spacing.py new file mode 100644 index 0000000..c95fa78 --- /dev/null +++ b/tests/data/simple_cases/power_op_spacing.py @@ -0,0 +1,131 @@ +def function(**kwargs): + t = a**2 + b**3 + return t ** 2 + + +def function_replace_spaces(**kwargs): + t = a **2 + b** 3 + c ** 4 + + +def function_dont_replace_spaces(): + {**a, **b, **c} + + +a = 5**~4 +b = 5 ** f() +c = -(5**2) +d = 5 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5 +g = a.b**c.d +h = 5 ** funcs.f() +i = funcs.f() ** 5 +j = super().name ** 5 +k = [(2**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2**63], [1, 2**63])] +n = count <= 10**5 +o = settings(max_examples=10**6) +p = {(k, k**2): v**2 for k, v in pairs} +q = [10**i for i in range(6)] +r = x**y + +a = 5.0**~4.0 +b = 5.0 ** f() +c = -(5.0**2.0) +d = 5.0 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5.0 +g = a.b**c.d +h = 5.0 ** funcs.f() +i = funcs.f() ** 5.0 +j = super().name ** 5.0 +k = [(2.0**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2.0**63.0], [1.0, 2**63.0])] +n = count <= 10**5.0 +o = settings(max_examples=10**6.0) +p = {(k, k**2): v**2.0 for k, v in pairs} +q = [10.5**i for i in range(6)] + + +# WE SHOULD DEFINITELY NOT EAT THESE COMMENTS (https://github.com/psf/black/issues/2873) +if hasattr(view, "sum_of_weights"): + return np.divide( # type: ignore[no-any-return] + view.variance, # type: ignore[union-attr] + view.sum_of_weights, # type: ignore[union-attr] + out=np.full(view.sum_of_weights.shape, np.nan), # type: ignore[union-attr] + where=view.sum_of_weights**2 > view.sum_of_weights_squared, # type: ignore[union-attr] + ) + +return np.divide( + where=view.sum_of_weights_of_weight_long**2 > view.sum_of_weights_squared, # type: ignore +) + + +# output + + +def function(**kwargs): + t = a**2 + b**3 + return t**2 + + +def function_replace_spaces(**kwargs): + t = a**2 + b**3 + c**4 + + +def function_dont_replace_spaces(): + {**a, **b, **c} + + +a = 5**~4 +b = 5 ** f() +c = -(5**2) +d = 5 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5 +g = a.b**c.d +h = 5 ** funcs.f() +i = funcs.f() ** 5 +j = super().name ** 5 +k = [(2**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2**63], [1, 2**63])] +n = count <= 10**5 +o = settings(max_examples=10**6) +p = {(k, k**2): v**2 for k, v in pairs} +q = [10**i for i in range(6)] +r = x**y + +a = 5.0**~4.0 +b = 5.0 ** f() +c = -(5.0**2.0) +d = 5.0 ** f["hi"] +e = lazy(lambda **kwargs: 5) +f = f() ** 5.0 +g = a.b**c.d +h = 5.0 ** funcs.f() +i = funcs.f() ** 5.0 +j = super().name ** 5.0 +k = [(2.0**idx, value) for idx, value in pairs] +l = mod.weights_[0] == pytest.approx(0.95**100, abs=0.001) +m = [([2.0**63.0], [1.0, 2**63.0])] +n = count <= 10**5.0 +o = settings(max_examples=10**6.0) +p = {(k, k**2): v**2.0 for k, v in pairs} +q = [10.5**i for i in range(6)] + + +# WE SHOULD DEFINITELY NOT EAT THESE COMMENTS (https://github.com/psf/black/issues/2873) +if hasattr(view, "sum_of_weights"): + return np.divide( # type: ignore[no-any-return] + view.variance, # type: ignore[union-attr] + view.sum_of_weights, # type: ignore[union-attr] + out=np.full(view.sum_of_weights.shape, np.nan), # type: ignore[union-attr] + where=view.sum_of_weights**2 > view.sum_of_weights_squared, # type: ignore[union-attr] + ) + +return np.divide( + where=view.sum_of_weights_of_weight_long**2 > view.sum_of_weights_squared, # type: ignore +) diff --git a/tests/data/remove_parens.py b/tests/data/simple_cases/remove_parens.py similarity index 100% rename from tests/data/remove_parens.py rename to tests/data/simple_cases/remove_parens.py diff --git a/tests/data/slices.py b/tests/data/simple_cases/slices.py similarity index 94% rename from tests/data/slices.py rename to tests/data/simple_cases/slices.py index 7a42678..165117c 100644 --- a/tests/data/slices.py +++ b/tests/data/simple_cases/slices.py @@ -9,7 +9,7 @@ slice[:c, c - 1] slice[c, c + 1, d::] slice[ham[c::d] :: 1] -slice[ham[cheese ** 2 : -1] : 1 : 1, ham[1:2]] +slice[ham[cheese**2 : -1] : 1 : 1, ham[1:2]] slice[:-1:] slice[lambda: None : lambda: None] slice[lambda x, y, *args, really=2, **kwargs: None :, None::] diff --git a/tests/data/simple_cases/string_prefixes.py b/tests/data/simple_cases/string_prefixes.py new file mode 100644 index 0000000..f86da69 --- /dev/null +++ b/tests/data/simple_cases/string_prefixes.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +name = "Łukasz" +(f"hello {name}", F"hello {name}") +(b"", B"") +(u"", U"") +(r"", R"") + +(rf"", fr"", Rf"", fR"", rF"", Fr"", RF"", FR"") +(rb"", br"", Rb"", bR"", rB"", Br"", RB"", BR"") + + +def docstring_singleline(): + R"""2020 was one hell of a year. The good news is that we were able to""" + + +def docstring_multiline(): + R""" + clear out all of the issues opened in that time :p + """ + + +# output + + +#!/usr/bin/env python3 + +name = "Łukasz" +(f"hello {name}", f"hello {name}") +(b"", b"") +("", "") +(r"", R"") + +(rf"", rf"", Rf"", Rf"", rf"", rf"", Rf"", Rf"") +(rb"", rb"", Rb"", Rb"", rb"", rb"", Rb"", Rb"") + + +def docstring_singleline(): + R"""2020 was one hell of a year. The good news is that we were able to""" + + +def docstring_multiline(): + R""" + clear out all of the issues opened in that time :p + """ diff --git a/tests/data/simple_cases/torture.py b/tests/data/simple_cases/torture.py new file mode 100644 index 0000000..2a19475 --- /dev/null +++ b/tests/data/simple_cases/torture.py @@ -0,0 +1,91 @@ +importA;() << 0 ** 101234234242352525425252352352525234890264906820496920680926538059059209922523523525 # + +assert sort_by_dependency( + { + "1": {"2", "3"}, "2": {"2a", "2b"}, "3": {"3a", "3b"}, + "2a": set(), "2b": set(), "3a": set(), "3b": set() + } +) == ["2a", "2b", "2", "3a", "3b", "3", "1"] + +importA +0;0^0# + +class A: + def foo(self): + for _ in range(10): + aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( # pylint: disable=no-member + xxxxxxxxxxxx + ) + +def test(self, othr): + return (1 == 2 and + (name, description, self.default, self.selected, self.auto_generated, self.parameters, self.meta_data, self.schedule) == + (name, description, othr.default, othr.selected, othr.auto_generated, othr.parameters, othr.meta_data, othr.schedule)) + + +assert ( + a_function(very_long_arguments_that_surpass_the_limit, which_is_eighty_eight_in_this_case_plus_a_bit_more) + == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} +) + +# output + +importA +( + () + << 0 + ** 101234234242352525425252352352525234890264906820496920680926538059059209922523523525 +) # + +assert sort_by_dependency( + { + "1": {"2", "3"}, + "2": {"2a", "2b"}, + "3": {"3a", "3b"}, + "2a": set(), + "2b": set(), + "3a": set(), + "3b": set(), + } +) == ["2a", "2b", "2", "3a", "3b", "3", "1"] + +importA +0 +0 ^ 0 # + + +class A: + def foo(self): + for _ in range(10): + aaaaaaaaaaaaaaaaaaa = bbbbbbbbbbbbbbb.cccccccccc( + xxxxxxxxxxxx + ) # pylint: disable=no-member + + +def test(self, othr): + return 1 == 2 and ( + name, + description, + self.default, + self.selected, + self.auto_generated, + self.parameters, + self.meta_data, + self.schedule, + ) == ( + name, + description, + othr.default, + othr.selected, + othr.auto_generated, + othr.parameters, + othr.meta_data, + othr.schedule, + ) + + +assert a_function( + very_long_arguments_that_surpass_the_limit, + which_is_eighty_eight_in_this_case_plus_a_bit_more, +) == {"x": "this need to pass the line limit as well", "b": "but only by a little bit"} + diff --git a/tests/data/simple_cases/trailing_comma_optional_parens1.py b/tests/data/simple_cases/trailing_comma_optional_parens1.py new file mode 100644 index 0000000..85aa8ba --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens1.py @@ -0,0 +1,63 @@ +if e1234123412341234.winerror not in (_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): + pass + +if x: + if y: + new_id = max(Vegetable.objects.order_by('-id')[0].id, + Mineral.objects.order_by('-id')[0].id) + 1 + +class X: + def get_help_text(self): + return ngettext( + "Your password must contain at least %(min_length)d character.", + "Your password must contain at least %(min_length)d characters.", + self.min_length, + ) % {'min_length': self.min_length} + +class A: + def b(self): + if self.connection.mysql_is_mariadb and ( + 10, + 4, + 3, + ) < self.connection.mysql_version < (10, 5, 2): + pass + + +# output + +if e1234123412341234.winerror not in ( + _winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY, +) or _check_timeout(t): + pass + +if x: + if y: + new_id = ( + max( + Vegetable.objects.order_by("-id")[0].id, + Mineral.objects.order_by("-id")[0].id, + ) + + 1 + ) + + +class X: + def get_help_text(self): + return ngettext( + "Your password must contain at least %(min_length)d character.", + "Your password must contain at least %(min_length)d characters.", + self.min_length, + ) % {"min_length": self.min_length} + + +class A: + def b(self): + if self.connection.mysql_is_mariadb and ( + 10, + 4, + 3, + ) < self.connection.mysql_version < (10, 5, 2): + pass diff --git a/tests/data/simple_cases/trailing_comma_optional_parens2.py b/tests/data/simple_cases/trailing_comma_optional_parens2.py new file mode 100644 index 0000000..9541670 --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens2.py @@ -0,0 +1,12 @@ +if (e123456.get_tk_patchlevel() >= (8, 6, 0, 'final') or + (8, 5, 8) <= get_tk_patchlevel() < (8, 6)): + pass + +# output + +if e123456.get_tk_patchlevel() >= (8, 6, 0, "final") or ( + 8, + 5, + 8, +) <= get_tk_patchlevel() < (8, 6): + pass diff --git a/tests/data/simple_cases/trailing_comma_optional_parens3.py b/tests/data/simple_cases/trailing_comma_optional_parens3.py new file mode 100644 index 0000000..c0ed699 --- /dev/null +++ b/tests/data/simple_cases/trailing_comma_optional_parens3.py @@ -0,0 +1,21 @@ +if True: + if True: + if True: + return _( + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " + + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", + ) % {"reported_username": reported_username, "report_reason": report_reason} + + +# output + + +if True: + if True: + if True: + return _( + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " + + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", + ) % {"reported_username": reported_username, "report_reason": report_reason} diff --git a/tests/data/tricky_unicode_symbols.py b/tests/data/simple_cases/tricky_unicode_symbols.py similarity index 76% rename from tests/data/tricky_unicode_symbols.py rename to tests/data/simple_cases/tricky_unicode_symbols.py index 366a92f..ad8b610 100644 --- a/tests/data/tricky_unicode_symbols.py +++ b/tests/data/simple_cases/tricky_unicode_symbols.py @@ -4,3 +4,6 @@ x󠄀 = 4 មុ = 1 Q̇_per_meter = 4 + +A᧚ = 3 +A፩ = 8 diff --git a/tests/data/tupleassign.py b/tests/data/simple_cases/tupleassign.py similarity index 100% rename from tests/data/tupleassign.py rename to tests/data/simple_cases/tupleassign.py diff --git a/tests/data/string_prefixes.py b/tests/data/string_prefixes.py deleted file mode 100644 index 0ca3686..0000000 --- a/tests/data/string_prefixes.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python3.6 - -name = R"Łukasz" -F"hello {name}" -B"hello" -r"hello" -fR"hello" - -# output - - -#!/usr/bin/env python3.6 - -name = R"Łukasz" -f"hello {name}" -b"hello" -r"hello" -fR"hello" diff --git a/tests/data/stub.pyi b/tests/data/stub.pyi deleted file mode 100644 index 94ba852..0000000 --- a/tests/data/stub.pyi +++ /dev/null @@ -1,35 +0,0 @@ -X: int - -def f(): ... - -class C: - ... - -class B: - ... - -class A: - def f(self) -> int: - ... - - def g(self) -> str: ... - -def g(): - ... - -def h(): ... - -# output -X: int - -def f(): ... - -class C: ... -class B: ... - -class A: - def f(self) -> int: ... - def g(self) -> str: ... - -def g(): ... -def h(): ... diff --git a/tests/data/trailing_comma_optional_parens1.py b/tests/data/trailing_comma_optional_parens1.py deleted file mode 100644 index 5ad29a8..0000000 --- a/tests/data/trailing_comma_optional_parens1.py +++ /dev/null @@ -1,3 +0,0 @@ -if e1234123412341234.winerror not in (_winapi.ERROR_SEM_TIMEOUT, - _winapi.ERROR_PIPE_BUSY) or _check_timeout(t): - pass \ No newline at end of file diff --git a/tests/data/trailing_comma_optional_parens2.py b/tests/data/trailing_comma_optional_parens2.py deleted file mode 100644 index 2817073..0000000 --- a/tests/data/trailing_comma_optional_parens2.py +++ /dev/null @@ -1,3 +0,0 @@ -if (e123456.get_tk_patchlevel() >= (8, 6, 0, 'final') or - (8, 5, 8) <= get_tk_patchlevel() < (8, 6)): - pass \ No newline at end of file diff --git a/tests/data/trailing_comma_optional_parens3.py b/tests/data/trailing_comma_optional_parens3.py deleted file mode 100644 index e6a673e..0000000 --- a/tests/data/trailing_comma_optional_parens3.py +++ /dev/null @@ -1,8 +0,0 @@ -if True: - if True: - if True: - return _( - "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweas " - + "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwegqweasdzxcqweasdzxc.", - "qweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqweasdzxcqwe", - ) % {"reported_username": reported_username, "report_reason": report_reason} \ No newline at end of file diff --git a/tests/optional.py b/tests/optional.py new file mode 100644 index 0000000..8a39cc4 --- /dev/null +++ b/tests/optional.py @@ -0,0 +1,124 @@ +""" +Allows configuring optional test markers in config, see pyproject.toml. + +Run optional tests with `pytest --run-optional=...`. + +Mark tests to run only if an optional test ISN'T selected by prepending the mark with +"no_". + +You can specify a "no_" prefix straight in config, in which case you can mark tests +to run when this tests ISN'T selected by omitting the "no_" prefix. + +Specifying the name of the default behavior in `--run-optional=` is harmless. + +Adapted from https://pypi.org/project/pytest-optional-tests/, (c) 2019 Reece Hart +""" + +import itertools +import logging +import re +from functools import lru_cache +from typing import TYPE_CHECKING, FrozenSet, List, Set + +import pytest + +try: + from pytest import StashKey +except ImportError: + # pytest < 7 + from _pytest.store import StoreKey as StashKey # type: ignore[no-redef] + +log = logging.getLogger(__name__) + + +if TYPE_CHECKING: + from _pytest.config import Config + from _pytest.config.argparsing import Parser + from _pytest.mark.structures import MarkDecorator + from _pytest.nodes import Node + + +ALL_POSSIBLE_OPTIONAL_MARKERS = StashKey[FrozenSet[str]]() +ENABLED_OPTIONAL_MARKERS = StashKey[FrozenSet[str]]() + + +def pytest_addoption(parser: "Parser") -> None: + group = parser.getgroup("collect") + group.addoption( + "--run-optional", + action="append", + dest="run_optional", + default=None, + help="Optional test markers to run; comma-separated", + ) + parser.addini("optional-tests", "List of optional tests markers", "linelist") + + +def pytest_configure(config: "Config") -> None: + """Optional tests are markers. + + Use the syntax in https://docs.pytest.org/en/stable/mark.html#registering-marks. + """ + ot_ini = config.inicfg.get("optional-tests") or [] + ot_markers = set() + ot_run: Set[str] = set() + if isinstance(ot_ini, str): + ot_ini = ot_ini.strip().split("\n") + marker_re = re.compile(r"^\s*(?Pno_)?(?P\w+)(:\s*(?P.*))?") + for ot in ot_ini: + m = marker_re.match(ot) + if not m: + raise ValueError(f"{ot!r} doesn't match pytest marker syntax") + + marker = (m.group("no") or "") + m.group("marker") + description = m.group("description") + config.addinivalue_line("markers", f"{marker}: {description}") + config.addinivalue_line( + "markers", f"{no(marker)}: run when `{marker}` not passed" + ) + ot_markers.add(marker) + + # collect requested optional tests + passed_args = config.getoption("run_optional") + if passed_args: + ot_run.update(itertools.chain.from_iterable(a.split(",") for a in passed_args)) + ot_run |= {no(excluded) for excluded in ot_markers - ot_run} + ot_markers |= {no(m) for m in ot_markers} + + log.info("optional tests to run:", ot_run) + unknown_tests = ot_run - ot_markers + if unknown_tests: + raise ValueError(f"Unknown optional tests wanted: {unknown_tests!r}") + + store = config._store + store[ALL_POSSIBLE_OPTIONAL_MARKERS] = frozenset(ot_markers) + store[ENABLED_OPTIONAL_MARKERS] = frozenset(ot_run) + + +def pytest_collection_modifyitems(config: "Config", items: "List[Node]") -> None: + store = config._store + all_possible_optional_markers = store[ALL_POSSIBLE_OPTIONAL_MARKERS] + enabled_optional_markers = store[ENABLED_OPTIONAL_MARKERS] + + for item in items: + all_markers_on_test = {m.name for m in item.iter_markers()} + optional_markers_on_test = all_markers_on_test & all_possible_optional_markers + if not optional_markers_on_test or ( + optional_markers_on_test & enabled_optional_markers + ): + continue + log.info("skipping non-requested optional", item) + item.add_marker(skip_mark(frozenset(optional_markers_on_test))) + + +@lru_cache() +def skip_mark(tests: FrozenSet[str]) -> "MarkDecorator": + names = ", ".join(sorted(tests)) + return pytest.mark.skip(reason=f"Marked with disabled optional tests ({names})") + + +@lru_cache() +def no(name: str) -> str: + if name.startswith("no_"): + return name[len("no_") :] + return "no_" + name diff --git a/tests/test.toml b/tests/test.toml index 405c00c..e5fb922 100644 --- a/tests/test.toml +++ b/tests/test.toml @@ -7,4 +7,11 @@ line-length = 79 target-version = ["py36", "py37", "py38"] exclude='\.pyi?$' include='\.py?$' +python-cell-magics = ["custom1", "custom2"] +[v1.0.0-syntax] +# This shouldn't break Black. +contributors = [ + "Foo Bar ", + { name = "Baz Qux", email = "bazqux@example.com", url = "https://example.com/bazqux" } +] diff --git a/tests/test_black.py b/tests/test_black.py index c643f27..5d0175d 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -1,66 +1,80 @@ #!/usr/bin/env python3 -import multiprocessing + import asyncio +import inspect +import io import logging +import multiprocessing +import os +import re +import sys +import types +import unittest from concurrent.futures import ThreadPoolExecutor -from contextlib import contextmanager +from contextlib import contextmanager, redirect_stderr from dataclasses import replace -import inspect -from io import BytesIO, TextIOWrapper -import os +from io import BytesIO from pathlib import Path from platform import system -import regex as re -import sys from tempfile import TemporaryDirectory -import types from typing import ( Any, - BinaryIO, Callable, Dict, - Generator, - List, Iterator, + List, + Optional, + Sequence, TypeVar, + Union, ) -import unittest -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch import click +import pytest from click import unstyle from click.testing import CliRunner +from pathspec import PathSpec import black +import black.files from black import Feature, TargetVersion - -from pathspec import PathSpec +from black import re_compile_maybe_verbose as compile_pattern +from black.cache import get_cache_dir, get_cache_file +from black.debug import DebugVisitor +from black.output import color_diff, diff +from black.report import Report # Import other test classes from tests.util import ( - THIS_DIR, - read_data, + DATA_DIR, + DEFAULT_MODE, DETERMINISTIC_HEADER, + PROJECT_ROOT, + PY36_VERSIONS, + THIS_DIR, BlackBaseTestCase, - DEFAULT_MODE, - fs, - ff, + assert_format, + change_directory, dump_to_stderr, + ff, + fs, + get_case_path, + read_data, + read_data_from_file, ) -from .test_primer import PrimerCLITests # noqa: F401 - THIS_FILE = Path(__file__) -PY36_VERSIONS = { - TargetVersion.PY36, - TargetVersion.PY37, - TargetVersion.PY38, - TargetVersion.PY39, -} +EMPTY_CONFIG = THIS_DIR / "data" / "empty_pyproject.toml" PY36_ARGS = [f"--target-version={version.name.lower()}" for version in PY36_VERSIONS] +DEFAULT_EXCLUDE = black.re_compile_maybe_verbose(black.const.DEFAULT_EXCLUDES) +DEFAULT_INCLUDE = black.re_compile_maybe_verbose(black.const.DEFAULT_INCLUDES) T = TypeVar("T") R = TypeVar("R") +# Match the time output in a diff, but nothing else +DIFF_TIME = re.compile(r"\t[\d\-:+\. ]+") + @contextmanager def cache_dir(exists: bool = True) -> Iterator[Path]: @@ -68,7 +82,7 @@ def cache_dir(exists: bool = True) -> Iterator[Path]: cache_dir = Path(workspace) if not exists: cache_dir = cache_dir / "new" - with patch("black.CACHE_DIR", cache_dir): + with patch("black.cache.CACHE_DIR", cache_dir): yield cache_dir @@ -89,6 +103,8 @@ class FakeContext(click.Context): def __init__(self) -> None: self.default_map: Dict[str, Any] = {} + # Dummy root, since most of the tests don't care about it + self.obj: Dict[str, Any] = {"root": PROJECT_ROOT} class FakeParameter(click.Parameter): @@ -99,56 +115,32 @@ def __init__(self) -> None: class BlackRunner(CliRunner): - """Modify CliRunner so that stderr is not merged with stdout. - - This is a hack that can be removed once we depend on Click 7.x""" + """Make sure STDOUT and STDERR are kept separate when testing Black via its CLI.""" def __init__(self) -> None: - self.stderrbuf = BytesIO() - self.stdoutbuf = BytesIO() - self.stdout_bytes = b"" - self.stderr_bytes = b"" - super().__init__() - - @contextmanager - def isolation(self, *args: Any, **kwargs: Any) -> Generator[BinaryIO, None, None]: - with super().isolation(*args, **kwargs) as output: - try: - hold_stderr = sys.stderr - sys.stderr = TextIOWrapper(self.stderrbuf, encoding=self.charset) - yield output - finally: - self.stdout_bytes = sys.stdout.buffer.getvalue() # type: ignore - self.stderr_bytes = sys.stderr.buffer.getvalue() # type: ignore - sys.stderr = hold_stderr + super().__init__(mix_stderr=False) + + +def invokeBlack( + args: List[str], exit_code: int = 0, ignore_config: bool = True +) -> None: + runner = BlackRunner() + if ignore_config: + args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args] + result = runner.invoke(black.main, args, catch_exceptions=False) + assert result.stdout_bytes is not None + assert result.stderr_bytes is not None + msg = ( + f"Failed with args: {args}\n" + f"stdout: {result.stdout_bytes.decode()!r}\n" + f"stderr: {result.stderr_bytes.decode()!r}\n" + f"exception: {result.exception}" + ) + assert result.exit_code == exit_code, msg class BlackTestCase(BlackBaseTestCase): - def invokeBlack( - self, args: List[str], exit_code: int = 0, ignore_config: bool = True - ) -> None: - runner = BlackRunner() - if ignore_config: - args = ["--verbose", "--config", str(THIS_DIR / "empty.toml"), *args] - result = runner.invoke(black.main, args) - self.assertEqual( - result.exit_code, - exit_code, - msg=( - f"Failed with args: {args}\n" - f"stdout: {runner.stdout_bytes.decode()!r}\n" - f"stderr: {runner.stderr_bytes.decode()!r}\n" - f"exception: {result.exception}" - ), - ) - - @patch("black.dump_to_file", dump_to_stderr) - def test_empty(self) -> None: - source = expected = "" - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) + invokeBlack = staticmethod(invokeBlack) def test_empty_ff(self) -> None: expected = "" @@ -161,32 +153,42 @@ def test_empty_ff(self) -> None: os.unlink(tmp_file) self.assertFormatEqual(expected, actual) + def test_experimental_string_processing_warns(self) -> None: + self.assertWarns( + black.mode.Deprecated, black.Mode, experimental_string_processing=True + ) + def test_piping(self) -> None: - source, expected = read_data("src/black/__init__", data=False) + source, expected = read_data_from_file(PROJECT_ROOT / "src/black/__init__.py") result = BlackRunner().invoke( black.main, - ["-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}"], + [ + "-", + "--fast", + f"--line-length={black.DEFAULT_LINE_LENGTH}", + f"--config={EMPTY_CONFIG}", + ], input=BytesIO(source.encode("utf8")), ) self.assertEqual(result.exit_code, 0) self.assertFormatEqual(expected, result.output) - black.assert_equivalent(source, result.output) - black.assert_stable(source, result.output, DEFAULT_MODE) + if source != result.output: + black.assert_equivalent(source, result.output) + black.assert_stable(source, result.output, DEFAULT_MODE) def test_piping_diff(self) -> None: diff_header = re.compile( r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d " r"\+\d\d\d\d" ) - source, _ = read_data("expression.py") - expected, _ = read_data("expression.diff") - config = THIS_DIR / "data" / "empty_pyproject.toml" + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") args = [ "-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}", "--diff", - f"--config={config}", + f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( black.main, args, input=BytesIO(source.encode("utf8")) @@ -197,22 +199,21 @@ def test_piping_diff(self) -> None: self.assertEqual(expected, actual) def test_piping_diff_with_color(self) -> None: - source, _ = read_data("expression.py") - config = THIS_DIR / "data" / "empty_pyproject.toml" + source, _ = read_data("simple_cases", "expression.py") args = [ "-", "--fast", f"--line-length={black.DEFAULT_LINE_LENGTH}", "--diff", "--color", - f"--config={config}", + f"--config={EMPTY_CONFIG}", ] result = BlackRunner().invoke( black.main, args, input=BytesIO(source.encode("utf8")) ) actual = result.output # Again, the contents are checked in a different test, so only look for colors. - self.assertIn("\033[1;37m", actual) + self.assertIn("\033[1m", actual) self.assertIn("\033[36m", actual) self.assertIn("\033[32m", actual) self.assertIn("\033[31m", actual) @@ -220,7 +221,7 @@ def test_piping_diff_with_color(self) -> None: @patch("black.dump_to_file", dump_to_stderr) def _test_wip(self) -> None: - source, expected = read_data("wip") + source, expected = read_data("miscellaneous", "wip") sys.settrace(tracefunc) mode = replace( DEFAULT_MODE, @@ -233,73 +234,8 @@ def _test_wip(self) -> None: black.assert_equivalent(source, actual) black.assert_stable(source, actual, black.FileMode()) - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability1(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens1") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability2(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens2") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @unittest.expectedFailure - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability3(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens3") - actual = fs(source) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability1_pass2(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens1") - actual = fs(fs(source)) # this is what `format_file_contents` does with --safe - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability2_pass2(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens2") - actual = fs(fs(source)) # this is what `format_file_contents` does with --safe - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_trailing_comma_optional_parens_stability3_pass2(self) -> None: - source, _expected = read_data("trailing_comma_optional_parens3") - actual = fs(fs(source)) # this is what `format_file_contents` does with --safe - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_572(self) -> None: - source, expected = read_data("pep_572") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - if sys.version_info >= (3, 8): - black.assert_equivalent(source, actual) - - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_572_remove_parens(self) -> None: - source, expected = read_data("pep_572_remove_parens") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - if sys.version_info >= (3, 8): - black.assert_equivalent(source, actual) - - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_572_do_not_remove_parens(self) -> None: - source, expected = read_data("pep_572_do_not_remove_parens") - # the AST safety checks will fail, but that's expected, just make sure no - # parentheses are touched - actual = black.format_str(source, mode=DEFAULT_MODE) - self.assertFormatEqual(expected, actual) - def test_pep_572_version_detection(self) -> None: - source, _ = read_data("pep_572") + source, _ = read_data("py_38", "pep_572") root = black.lib2to3_parse(source) features = black.get_features_used(root) self.assertIn(black.Feature.ASSIGNMENT_EXPRESSIONS, features) @@ -307,7 +243,7 @@ def test_pep_572_version_detection(self) -> None: self.assertIn(black.TargetVersion.PY38, versions) def test_expression_ff(self) -> None: - source, expected = read_data("expression") + source, expected = read_data("simple_cases", "expression.py") tmp_file = Path(black.dump_to_file(source)) try: self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES)) @@ -321,9 +257,8 @@ def test_expression_ff(self) -> None: black.assert_stable(source, actual, DEFAULT_MODE) def test_expression_diff(self) -> None: - source, _ = read_data("expression.py") - config = THIS_DIR / "data" / "empty_pyproject.toml" - expected, _ = read_data("expression.diff") + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") tmp_file = Path(black.dump_to_file(source)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " @@ -331,7 +266,7 @@ def test_expression_diff(self) -> None: ) try: result = BlackRunner().invoke( - black.main, ["--diff", str(tmp_file), f"--config={config}"] + black.main, ["--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"] ) self.assertEqual(result.exit_code, 0) finally: @@ -348,102 +283,102 @@ def test_expression_diff(self) -> None: self.assertEqual(expected, actual, msg) def test_expression_diff_with_color(self) -> None: - source, _ = read_data("expression.py") - config = THIS_DIR / "data" / "empty_pyproject.toml" - expected, _ = read_data("expression.diff") + source, _ = read_data("simple_cases", "expression.py") + expected, _ = read_data("simple_cases", "expression.diff") tmp_file = Path(black.dump_to_file(source)) try: result = BlackRunner().invoke( - black.main, ["--diff", "--color", str(tmp_file), f"--config={config}"] + black.main, + ["--diff", "--color", str(tmp_file), f"--config={EMPTY_CONFIG}"], ) finally: os.unlink(tmp_file) actual = result.output # We check the contents of the diff in `test_expression_diff`. All # we need to check here is that color codes exist in the result. - self.assertIn("\033[1;37m", actual) + self.assertIn("\033[1m", actual) self.assertIn("\033[36m", actual) self.assertIn("\033[32m", actual) self.assertIn("\033[31m", actual) self.assertIn("\033[0m", actual) - @patch("black.dump_to_file", dump_to_stderr) - def test_pep_570(self) -> None: - source, expected = read_data("pep_570") - actual = fs(source) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - if sys.version_info >= (3, 8): - black.assert_equivalent(source, actual) - def test_detect_pos_only_arguments(self) -> None: - source, _ = read_data("pep_570") + source, _ = read_data("py_38", "pep_570") root = black.lib2to3_parse(source) features = black.get_features_used(root) self.assertIn(black.Feature.POS_ONLY_ARGUMENTS, features) versions = black.detect_target_versions(root) self.assertIn(black.TargetVersion.PY38, versions) + def test_detect_debug_f_strings(self) -> None: + root = black.lib2to3_parse("""f"{x=}" """) + features = black.get_features_used(root) + self.assertIn(black.Feature.DEBUG_F_STRINGS, features) + versions = black.detect_target_versions(root) + self.assertIn(black.TargetVersion.PY38, versions) + + root = black.lib2to3_parse( + """f"{x}"\nf'{"="}'\nf'{(x:=5)}'\nf'{f(a="3=")}'\nf'{x:=10}'\n""" + ) + features = black.get_features_used(root) + self.assertNotIn(black.Feature.DEBUG_F_STRINGS, features) + + # We don't yet support feature version detection in nested f-strings + root = black.lib2to3_parse( + """f"heard a rumour that { f'{1+1=}' } ... seems like it could be true" """ + ) + features = black.get_features_used(root) + self.assertNotIn(black.Feature.DEBUG_F_STRINGS, features) + @patch("black.dump_to_file", dump_to_stderr) def test_string_quotes(self) -> None: - source, expected = read_data("string_quotes") - mode = black.Mode(experimental_string_processing=True) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) + source, expected = read_data("miscellaneous", "string_quotes") + mode = black.Mode(preview=True) + assert_format(source, expected, mode) mode = replace(mode, string_normalization=False) not_normalized = fs(source, mode=mode) self.assertFormatEqual(source.replace("\\\n", ""), not_normalized) black.assert_equivalent(source, not_normalized) black.assert_stable(source, not_normalized, mode=mode) - @patch("black.dump_to_file", dump_to_stderr) - def test_docstring_no_string_normalization(self) -> None: - """Like test_docstring but with string normalization off.""" - source, expected = read_data("docstring_no_string_normalization") - mode = replace(DEFAULT_MODE, string_normalization=False) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - - def test_long_strings_flag_disabled(self) -> None: - """Tests for turning off the string processing logic.""" - source, expected = read_data("long_strings_flag_disabled") - mode = replace(DEFAULT_MODE, experimental_string_processing=False) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_stable(expected, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_numeric_literals(self) -> None: - source, expected = read_data("numeric_literals") - mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) + def test_skip_source_first_line(self) -> None: + source, _ = read_data("miscellaneous", "invalid_header") + tmp_file = Path(black.dump_to_file(source)) + # Full source should fail (invalid syntax at header) + self.invokeBlack([str(tmp_file), "--diff", "--check"], exit_code=123) + # So, skipping the first line should work + result = BlackRunner().invoke( + black.main, [str(tmp_file), "-x", f"--config={EMPTY_CONFIG}"] + ) + self.assertEqual(result.exit_code, 0) + with open(tmp_file, encoding="utf8") as f: + actual = f.read() + self.assertFormatEqual(source, actual) - @patch("black.dump_to_file", dump_to_stderr) - def test_numeric_literals_ignoring_underscores(self) -> None: - source, expected = read_data("numeric_literals_skip_underscores") - mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) + def test_skip_source_first_line_when_mixing_newlines(self) -> None: + code_mixing_newlines = b"Header will be skipped\r\ni = [1,2,3]\nj = [1,2,3]\n" + expected = b"Header will be skipped\r\ni = [1, 2, 3]\nj = [1, 2, 3]\n" + with TemporaryDirectory() as workspace: + test_file = Path(workspace) / "skip_header.py" + test_file.write_bytes(code_mixing_newlines) + mode = replace(DEFAULT_MODE, skip_source_first_line=True) + ff(test_file, mode=mode, write_back=black.WriteBack.YES) + self.assertEqual(test_file.read_bytes(), expected) def test_skip_magic_trailing_comma(self) -> None: - source, _ = read_data("expression.py") - expected, _ = read_data("expression_skip_magic_trailing_comma.diff") + source, _ = read_data("simple_cases", "expression") + expected, _ = read_data( + "miscellaneous", "expression_skip_magic_trailing_comma.diff" + ) tmp_file = Path(black.dump_to_file(source)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" ) try: - result = BlackRunner().invoke(black.main, ["-C", "--diff", str(tmp_file)]) + result = BlackRunner().invoke( + black.main, ["-C", "--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"] + ) self.assertEqual(result.exit_code, 0) finally: os.unlink(tmp_file) @@ -459,27 +394,10 @@ def test_skip_magic_trailing_comma(self) -> None: ) self.assertEqual(expected, actual, msg) - @patch("black.dump_to_file", dump_to_stderr) - def test_python2_print_function(self) -> None: - source, expected = read_data("python2_print_function") - mode = replace(DEFAULT_MODE, target_versions={TargetVersion.PY27}) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) - - @patch("black.dump_to_file", dump_to_stderr) - def test_stub(self) -> None: - mode = replace(DEFAULT_MODE, is_pyi=True) - source, expected = read_data("stub.pyi") - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_stable(source, actual, mode) - @patch("black.dump_to_file", dump_to_stderr) def test_async_as_identifier(self) -> None: - source_path = (THIS_DIR / "data" / "async_as_identifier.py").resolve() - source, expected = read_data("async_as_identifier") + source_path = get_case_path("miscellaneous", "async_as_identifier") + source, expected = read_data_from_file(source_path) actual = fs(source) self.assertFormatEqual(expected, actual) major, minor = sys.version_info[:2] @@ -493,8 +411,8 @@ def test_async_as_identifier(self) -> None: @patch("black.dump_to_file", dump_to_stderr) def test_python37(self) -> None: - source_path = (THIS_DIR / "data" / "python37.py").resolve() - source, expected = read_data("python37") + source_path = get_case_path("py_37", "python37") + source, expected = read_data_from_file(source_path) actual = fs(source) self.assertFormatEqual(expected, actual) major, minor = sys.version_info[:2] @@ -506,26 +424,6 @@ def test_python37(self) -> None: # but not on 3.6, because we use async as a reserved keyword self.invokeBlack([str(source_path), "--target-version", "py36"], exit_code=123) - @patch("black.dump_to_file", dump_to_stderr) - def test_python38(self) -> None: - source, expected = read_data("python38") - actual = fs(source) - self.assertFormatEqual(expected, actual) - major, minor = sys.version_info[:2] - if major > 3 or (major == 3 and minor >= 8): - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - - @patch("black.dump_to_file", dump_to_stderr) - def test_python39(self) -> None: - source, expected = read_data("python39") - actual = fs(source) - self.assertFormatEqual(expected, actual) - major, minor = sys.version_info[:2] - if major > 3 or (major == 3 and minor >= 9): - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, DEFAULT_MODE) - def test_tab_comment_indentation(self) -> None: contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n" contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n" @@ -549,7 +447,7 @@ def test_tab_comment_indentation(self) -> None: self.assertFormatEqual(contents_spc, fs(contents_tab)) def test_report_verbose(self) -> None: - report = black.Report(verbose=True) + report = Report(verbose=True) out_lines = [] err_lines = [] @@ -559,7 +457,7 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 1) self.assertEqual(len(err_lines), 0) @@ -639,19 +537,19 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_quiet(self) -> None: - report = black.Report(quiet=True) + report = Report(quiet=True) out_lines = [] err_lines = [] @@ -661,7 +559,7 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 0) self.assertEqual(len(err_lines), 0) @@ -733,15 +631,15 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_report_normal(self) -> None: @@ -755,7 +653,7 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): report.done(Path("f1"), black.Changed.NO) self.assertEqual(len(out_lines), 0) self.assertEqual(len(err_lines), 0) @@ -830,15 +728,15 @@ def err(msg: str, **kwargs: Any) -> None: report.check = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) report.check = False report.diff = True self.assertEqual( unstyle(str(report)), - "2 files would be reformatted, 3 files would be left unchanged, 2 files" - " would fail to reformat.", + "2 files would be reformatted, 3 files would be left unchanged, 2" + " files would fail to reformat.", ) def test_lib2to3_parse(self) -> None: @@ -847,31 +745,22 @@ def test_lib2to3_parse(self) -> None: straddling = "x + y" black.lib2to3_parse(straddling) - black.lib2to3_parse(straddling, {TargetVersion.PY27}) black.lib2to3_parse(straddling, {TargetVersion.PY36}) - black.lib2to3_parse(straddling, {TargetVersion.PY27, TargetVersion.PY36}) py2_only = "print x" - black.lib2to3_parse(py2_only) - black.lib2to3_parse(py2_only, {TargetVersion.PY27}) with self.assertRaises(black.InvalidInput): black.lib2to3_parse(py2_only, {TargetVersion.PY36}) - with self.assertRaises(black.InvalidInput): - black.lib2to3_parse(py2_only, {TargetVersion.PY27, TargetVersion.PY36}) py3_only = "exec(x, end=y)" black.lib2to3_parse(py3_only) - with self.assertRaises(black.InvalidInput): - black.lib2to3_parse(py3_only, {TargetVersion.PY27}) black.lib2to3_parse(py3_only, {TargetVersion.PY36}) - black.lib2to3_parse(py3_only, {TargetVersion.PY27, TargetVersion.PY36}) def test_get_features_used_decorator(self) -> None: # Test the feature detection of new decorator syntax # since this makes some test cases of test_get_features_used() # fails if it fails, this is tested first so that a useful case # is identified - simples, relaxed = read_data("decorators") + simples, relaxed = read_data("miscellaneous", "decorators") # skip explanation comments at the top of the file for simple_test in simples.split("##")[1:]: node = black.lib2to3_parse(simple_test) @@ -914,7 +803,7 @@ def test_get_features_used(self) -> None: self.assertEqual(black.get_features_used(node), {Feature.NUMERIC_UNDERSCORES}) node = black.lib2to3_parse("123456\n") self.assertEqual(black.get_features_used(node), set()) - source, expected = read_data("function") + source, expected = read_data("simple_cases", "function") node = black.lib2to3_parse(source) expected_features = { Feature.TRAILING_COMMA_IN_CALL, @@ -924,11 +813,65 @@ def test_get_features_used(self) -> None: self.assertEqual(black.get_features_used(node), expected_features) node = black.lib2to3_parse(expected) self.assertEqual(black.get_features_used(node), expected_features) - source, expected = read_data("expression") + source, expected = read_data("simple_cases", "expression") node = black.lib2to3_parse(source) self.assertEqual(black.get_features_used(node), set()) node = black.lib2to3_parse(expected) self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("lambda a, /, b: ...") + self.assertEqual(black.get_features_used(node), {Feature.POS_ONLY_ARGUMENTS}) + node = black.lib2to3_parse("def fn(a, /, b): ...") + self.assertEqual(black.get_features_used(node), {Feature.POS_ONLY_ARGUMENTS}) + node = black.lib2to3_parse("def fn(): yield a, b") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("def fn(): return a, b") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("def fn(): yield *b, c") + self.assertEqual(black.get_features_used(node), {Feature.UNPACKING_ON_FLOW}) + node = black.lib2to3_parse("def fn(): return a, *b, c") + self.assertEqual(black.get_features_used(node), {Feature.UNPACKING_ON_FLOW}) + node = black.lib2to3_parse("x = a, *b, c") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = regular") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = (regular, regular)") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Any = Complex(Type(1))[something]") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("x: Tuple[int, ...] = a, b, c") + self.assertEqual( + black.get_features_used(node), {Feature.ANN_ASSIGN_EXTENDED_RHS} + ) + node = black.lib2to3_parse("try: pass\nexcept Something: pass") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("try: pass\nexcept (*Something,): pass") + self.assertEqual(black.get_features_used(node), set()) + node = black.lib2to3_parse("try: pass\nexcept *Group: pass") + self.assertEqual(black.get_features_used(node), {Feature.EXCEPT_STAR}) + node = black.lib2to3_parse("a[*b]") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + node = black.lib2to3_parse("a[x, *y(), z] = t") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + node = black.lib2to3_parse("def fn(*args: *T): pass") + self.assertEqual(black.get_features_used(node), {Feature.VARIADIC_GENERICS}) + + def test_get_features_used_for_future_flags(self) -> None: + for src, features in [ + ("from __future__ import annotations", {Feature.FUTURE_ANNOTATIONS}), + ( + "from __future__ import (other, annotations)", + {Feature.FUTURE_ANNOTATIONS}, + ), + ("a = 1 + 2\nfrom something import annotations", set()), + ("from __future__ import x, y", set()), + ]: + with self.subTest(src=src, features=features): + node = black.lib2to3_parse(src) + future_imports = black.get_future_imports(node) + self.assertEqual( + black.get_features_used(node, future_imports=future_imports), + features, + ) def test_get_future_imports(self) -> None: node = black.lib2to3_parse("\n") @@ -960,9 +903,10 @@ def test_get_future_imports(self) -> None: ) self.assertEqual({"unicode_literals", "print"}, black.get_future_imports(node)) + @pytest.mark.incompatible_with_mypyc def test_debug_visitor(self) -> None: - source, _ = read_data("debug_visitor.py") - expected, _ = read_data("debug_visitor.out") + source, _ = read_data("miscellaneous", "debug_visitor") + expected, _ = read_data("miscellaneous", "debug_visitor.out") out_lines = [] err_lines = [] @@ -972,8 +916,8 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): - black.DebugVisitor.show(source) + with patch("black.debug.out", out): + DebugVisitor.show(source) actual = "\n".join(out_lines) + "\n" log_name = "" if expected != actual: @@ -1010,6 +954,7 @@ def test_endmarker(self) -> None: self.assertEqual(len(n.children), 1) self.assertEqual(n.children[0].type, black.token.ENDMARKER) + @pytest.mark.incompatible_with_mypyc @unittest.skipIf(os.environ.get("SKIP_AST_PRINT"), "user set SKIP_AST_PRINT") def test_assertFormatEqual(self) -> None: out_lines = [] @@ -1021,265 +966,79 @@ def out(msg: str, **kwargs: Any) -> None: def err(msg: str, **kwargs: Any) -> None: err_lines.append(msg) - with patch("black.out", out), patch("black.err", err): + with patch("black.output._out", out), patch("black.output._err", err): with self.assertRaises(AssertionError): self.assertFormatEqual("j = [1, 2, 3]", "j = [1, 2, 3,]") out_str = "".join(out_lines) - self.assertTrue("Expected tree:" in out_str) - self.assertTrue("Actual tree:" in out_str) + self.assertIn("Expected tree:", out_str) + self.assertIn("Actual tree:", out_str) self.assertEqual("".join(err_lines), "") - def test_cache_broken_file(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace: - cache_file = black.get_cache_file(mode) - with cache_file.open("w") as fobj: - fobj.write("this is not a pickle") - self.assertEqual(black.read_cache(mode), {}) - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - self.invokeBlack([str(src)]) - cache = black.read_cache(mode) - self.assertIn(str(src), cache) - - def test_cache_single_file_already_cached(self) -> None: - mode = DEFAULT_MODE + @event_loop() + @patch("concurrent.futures.ProcessPoolExecutor", MagicMock(side_effect=OSError)) + def test_works_in_mono_process_only_environment(self) -> None: with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - black.write_cache({}, [src], mode) - self.invokeBlack([str(src)]) - with src.open("r") as fobj: - self.assertEqual(fobj.read(), "print('hello')") + for f in [ + (workspace / "one.py").resolve(), + (workspace / "two.py").resolve(), + ]: + f.write_text('print("hello")\n') + self.invokeBlack([str(workspace)]) @event_loop() - def test_cache_multiple_files(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "black.ProcessPoolExecutor", new=ThreadPoolExecutor - ): - one = (workspace / "one.py").resolve() - with one.open("w") as fobj: - fobj.write("print('hello')") - two = (workspace / "two.py").resolve() - with two.open("w") as fobj: - fobj.write("print('hello')") - black.write_cache({}, [one], mode) - self.invokeBlack([str(workspace)]) - with one.open("r") as fobj: - self.assertEqual(fobj.read(), "print('hello')") - with two.open("r") as fobj: - self.assertEqual(fobj.read(), 'print("hello")\n') - cache = black.read_cache(mode) - self.assertIn(str(one), cache) - self.assertIn(str(two), cache) + def test_check_diff_use_together(self) -> None: + with cache_dir(): + # Files which will be reformatted. + src1 = get_case_path("miscellaneous", "string_quotes") + self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1) + # Files which will not be reformatted. + src2 = get_case_path("simple_cases", "composition") + self.invokeBlack([str(src2), "--diff", "--check"]) + # Multi file command. + self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1) - def test_no_cache_when_writeback_diff(self) -> None: - mode = DEFAULT_MODE + def test_no_src_fails(self) -> None: + with cache_dir(): + self.invokeBlack([], exit_code=1) + + def test_src_and_code_fails(self) -> None: + with cache_dir(): + self.invokeBlack([".", "-c", "0"], exit_code=1) + + def test_broken_symlink(self) -> None: with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - with patch("black.read_cache") as read_cache, patch( - "black.write_cache" - ) as write_cache: - self.invokeBlack([str(src), "--diff"]) - cache_file = black.get_cache_file(mode) - self.assertFalse(cache_file.exists()) - write_cache.assert_not_called() - read_cache.assert_not_called() + symlink = workspace / "broken_link.py" + try: + symlink.symlink_to("nonexistent.py") + except (OSError, NotImplementedError) as e: + self.skipTest(f"Can't create symlinks: {e}") + self.invokeBlack([str(workspace.resolve())]) - def test_no_cache_when_writeback_color_diff(self) -> None: - mode = DEFAULT_MODE + def test_single_file_force_pyi(self) -> None: + pyi_mode = replace(DEFAULT_MODE, is_pyi=True) + contents, expected = read_data("miscellaneous", "force_pyi") with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - with patch("black.read_cache") as read_cache, patch( - "black.write_cache" - ) as write_cache: - self.invokeBlack([str(src), "--diff", "--color"]) - cache_file = black.get_cache_file(mode) - self.assertFalse(cache_file.exists()) - write_cache.assert_not_called() - read_cache.assert_not_called() + path = (workspace / "file.py").resolve() + with open(path, "w") as fh: + fh.write(contents) + self.invokeBlack([str(path), "--pyi"]) + with open(path, "r") as fh: + actual = fh.read() + # verify cache with --pyi is separate + pyi_cache = black.read_cache(pyi_mode) + self.assertIn(str(path), pyi_cache) + normal_cache = black.read_cache(DEFAULT_MODE) + self.assertNotIn(str(path), normal_cache) + self.assertFormatEqual(expected, actual) + black.assert_equivalent(contents, actual) + black.assert_stable(contents, actual, pyi_mode) @event_loop() - def test_output_locking_when_writeback_diff(self) -> None: - with cache_dir() as workspace: - for tag in range(0, 4): - src = (workspace / f"test{tag}.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - with patch("black.Manager", wraps=multiprocessing.Manager) as mgr: - self.invokeBlack(["--diff", str(workspace)], exit_code=0) - # this isn't quite doing what we want, but if it _isn't_ - # called then we cannot be using the lock it provides - mgr.assert_called() - - @event_loop() - def test_output_locking_when_writeback_color_diff(self) -> None: - with cache_dir() as workspace: - for tag in range(0, 4): - src = (workspace / f"test{tag}.py").resolve() - with src.open("w") as fobj: - fobj.write("print('hello')") - with patch("black.Manager", wraps=multiprocessing.Manager) as mgr: - self.invokeBlack(["--diff", "--color", str(workspace)], exit_code=0) - # this isn't quite doing what we want, but if it _isn't_ - # called then we cannot be using the lock it provides - mgr.assert_called() - - def test_no_cache_when_stdin(self) -> None: - mode = DEFAULT_MODE - with cache_dir(): - result = CliRunner().invoke( - black.main, ["-"], input=BytesIO(b"print('hello')") - ) - self.assertEqual(result.exit_code, 0) - cache_file = black.get_cache_file(mode) - self.assertFalse(cache_file.exists()) - - def test_read_cache_no_cachefile(self) -> None: - mode = DEFAULT_MODE - with cache_dir(): - self.assertEqual(black.read_cache(mode), {}) - - def test_write_cache_read_cache(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace: - src = (workspace / "test.py").resolve() - src.touch() - black.write_cache({}, [src], mode) - cache = black.read_cache(mode) - self.assertIn(str(src), cache) - self.assertEqual(cache[str(src)], black.get_cache_info(src)) - - def test_filter_cached(self) -> None: - with TemporaryDirectory() as workspace: - path = Path(workspace) - uncached = (path / "uncached").resolve() - cached = (path / "cached").resolve() - cached_but_changed = (path / "changed").resolve() - uncached.touch() - cached.touch() - cached_but_changed.touch() - cache = { - str(cached): black.get_cache_info(cached), - str(cached_but_changed): (0.0, 0), - } - todo, done = black.filter_cached( - cache, {uncached, cached, cached_but_changed} - ) - self.assertEqual(todo, {uncached, cached_but_changed}) - self.assertEqual(done, {cached}) - - def test_write_cache_creates_directory_if_needed(self) -> None: - mode = DEFAULT_MODE - with cache_dir(exists=False) as workspace: - self.assertFalse(workspace.exists()) - black.write_cache({}, [], mode) - self.assertTrue(workspace.exists()) - - @event_loop() - def test_failed_formatting_does_not_get_cached(self) -> None: - mode = DEFAULT_MODE - with cache_dir() as workspace, patch( - "black.ProcessPoolExecutor", new=ThreadPoolExecutor - ): - failing = (workspace / "failing.py").resolve() - with failing.open("w") as fobj: - fobj.write("not actually python") - clean = (workspace / "clean.py").resolve() - with clean.open("w") as fobj: - fobj.write('print("hello")\n') - self.invokeBlack([str(workspace)], exit_code=123) - cache = black.read_cache(mode) - self.assertNotIn(str(failing), cache) - self.assertIn(str(clean), cache) - - def test_write_cache_write_fail(self) -> None: - mode = DEFAULT_MODE - with cache_dir(), patch.object(Path, "open") as mock: - mock.side_effect = OSError - black.write_cache({}, [], mode) - - @event_loop() - @patch("black.ProcessPoolExecutor", MagicMock(side_effect=OSError)) - def test_works_in_mono_process_only_environment(self) -> None: - with cache_dir() as workspace: - for f in [ - (workspace / "one.py").resolve(), - (workspace / "two.py").resolve(), - ]: - f.write_text('print("hello")\n') - self.invokeBlack([str(workspace)]) - - @event_loop() - def test_check_diff_use_together(self) -> None: - with cache_dir(): - # Files which will be reformatted. - src1 = (THIS_DIR / "data" / "string_quotes.py").resolve() - self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1) - # Files which will not be reformatted. - src2 = (THIS_DIR / "data" / "composition.py").resolve() - self.invokeBlack([str(src2), "--diff", "--check"]) - # Multi file command. - self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1) - - def test_no_files(self) -> None: - with cache_dir(): - # Without an argument, black exits with error code 0. - self.invokeBlack([]) - - def test_broken_symlink(self) -> None: - with cache_dir() as workspace: - symlink = workspace / "broken_link.py" - try: - symlink.symlink_to("nonexistent.py") - except OSError as e: - self.skipTest(f"Can't create symlinks: {e}") - self.invokeBlack([str(workspace.resolve())]) - - def test_read_cache_line_lengths(self) -> None: - mode = DEFAULT_MODE - short_mode = replace(DEFAULT_MODE, line_length=1) - with cache_dir() as workspace: - path = (workspace / "file.py").resolve() - path.touch() - black.write_cache({}, [path], mode) - one = black.read_cache(mode) - self.assertIn(str(path), one) - two = black.read_cache(short_mode) - self.assertNotIn(str(path), two) - - def test_single_file_force_pyi(self) -> None: - pyi_mode = replace(DEFAULT_MODE, is_pyi=True) - contents, expected = read_data("force_pyi") - with cache_dir() as workspace: - path = (workspace / "file.py").resolve() - with open(path, "w") as fh: - fh.write(contents) - self.invokeBlack([str(path), "--pyi"]) - with open(path, "r") as fh: - actual = fh.read() - # verify cache with --pyi is separate - pyi_cache = black.read_cache(pyi_mode) - self.assertIn(str(path), pyi_cache) - normal_cache = black.read_cache(DEFAULT_MODE) - self.assertNotIn(str(path), normal_cache) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(contents, actual) - black.assert_stable(contents, actual, pyi_mode) - - @event_loop() - def test_multi_file_force_pyi(self) -> None: - reg_mode = DEFAULT_MODE - pyi_mode = replace(DEFAULT_MODE, is_pyi=True) - contents, expected = read_data("force_pyi") + def test_multi_file_force_pyi(self) -> None: + reg_mode = DEFAULT_MODE + pyi_mode = replace(DEFAULT_MODE, is_pyi=True) + contents, expected = read_data("miscellaneous", "force_pyi") with cache_dir() as workspace: paths = [ (workspace / "file1.py").resolve(), @@ -1301,7 +1060,7 @@ def test_multi_file_force_pyi(self) -> None: self.assertNotIn(str(path), normal_cache) def test_pipe_force_pyi(self) -> None: - source, expected = read_data("force_pyi") + source, expected = read_data("miscellaneous", "force_pyi") result = CliRunner().invoke( black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf8")) ) @@ -1312,7 +1071,7 @@ def test_pipe_force_pyi(self) -> None: def test_single_file_force_py36(self) -> None: reg_mode = DEFAULT_MODE py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) - source, expected = read_data("force_py36") + source, expected = read_data("miscellaneous", "force_py36") with cache_dir() as workspace: path = (workspace / "file.py").resolve() with open(path, "w") as fh: @@ -1331,7 +1090,7 @@ def test_single_file_force_py36(self) -> None: def test_multi_file_force_py36(self) -> None: reg_mode = DEFAULT_MODE py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS) - source, expected = read_data("force_py36") + source, expected = read_data("miscellaneous", "force_py36") with cache_dir() as workspace: paths = [ (workspace / "file1.py").resolve(), @@ -1353,7 +1112,7 @@ def test_multi_file_force_py36(self) -> None: self.assertNotIn(str(path), normal_cache) def test_pipe_force_py36(self) -> None: - source, expected = read_data("force_py36") + source, expected = read_data("miscellaneous", "force_py36") result = CliRunner().invoke( black.main, ["-", "-q", "--target-version=py36"], @@ -1363,188 +1122,7 @@ def test_pipe_force_py36(self) -> None: actual = result.output self.assertFormatEqual(actual, expected) - def test_include_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - include = re.compile(r"\.pyi?$") - exclude = re.compile(r"/exclude/|/\.definitely_exclude/") - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - sources: List[Path] = [] - expected = [ - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - include, - exclude, - None, - None, - report, - gitignore, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_exclude_for_issue_1572(self) -> None: - # Exclude shouldn't touch files that were explicitly given to Black through the - # CLI. Exclude is supposed to only apply to the recursive discovery of files. - # https://github.com/psf/black/issues/1572 - path = THIS_DIR / "data" / "include_exclude_tests" - include = "" - exclude = r"/exclude/|a\.py" - src = str(path / "b/exclude/a.py") - report = black.Report() - expected = [Path(path / "b/exclude/a.py")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(exclude), - extend_exclude=None, - force_exclude=None, - report=report, - stdin_filename=None, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_get_sources_with_stdin(self) -> None: - include = "" - exclude = r"/exclude/|a\.py" - src = "-" - report = black.Report() - expected = [Path("-")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(exclude), - extend_exclude=None, - force_exclude=None, - report=report, - stdin_filename=None, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_get_sources_with_stdin_filename(self) -> None: - include = "" - exclude = r"/exclude/|a\.py" - src = "-" - report = black.Report() - stdin_filename = str(THIS_DIR / "data/collections.py") - expected = [Path(f"__BLACK_STDIN_FILENAME__{stdin_filename}")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(exclude), - extend_exclude=None, - force_exclude=None, - report=report, - stdin_filename=stdin_filename, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_get_sources_with_stdin_filename_and_exclude(self) -> None: - # Exclude shouldn't exclude stdin_filename since it is mimicing the - # file being passed directly. This is the same as - # test_exclude_for_issue_1572 - path = THIS_DIR / "data" / "include_exclude_tests" - include = "" - exclude = r"/exclude/|a\.py" - src = "-" - report = black.Report() - stdin_filename = str(path / "b/exclude/a.py") - expected = [Path(f"__BLACK_STDIN_FILENAME__{stdin_filename}")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(exclude), - extend_exclude=None, - force_exclude=None, - report=report, - stdin_filename=stdin_filename, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_get_sources_with_stdin_filename_and_extend_exclude(self) -> None: - # Extend exclude shouldn't exclude stdin_filename since it is mimicking the - # file being passed directly. This is the same as - # test_exclude_for_issue_1572 - path = THIS_DIR / "data" / "include_exclude_tests" - include = "" - extend_exclude = r"/exclude/|a\.py" - src = "-" - report = black.Report() - stdin_filename = str(path / "b/exclude/a.py") - expected = [Path(f"__BLACK_STDIN_FILENAME__{stdin_filename}")] - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(""), - extend_exclude=re.compile(extend_exclude), - force_exclude=None, - report=report, - stdin_filename=stdin_filename, - ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - @patch("black.find_project_root", lambda *args: THIS_DIR.resolve()) - def test_get_sources_with_stdin_filename_and_force_exclude(self) -> None: - # Force exclude should exclude the file when passing it through - # stdin_filename - path = THIS_DIR / "data" / "include_exclude_tests" - include = "" - force_exclude = r"/exclude/|a\.py" - src = "-" - report = black.Report() - stdin_filename = str(path / "b/exclude/a.py") - sources = list( - black.get_sources( - ctx=FakeContext(), - src=(src,), - quiet=True, - verbose=False, - include=re.compile(include), - exclude=re.compile(""), - extend_exclude=None, - force_exclude=re.compile(force_exclude), - report=report, - stdin_filename=stdin_filename, - ) - ) - self.assertEqual([], sorted(sources)) - + @pytest.mark.incompatible_with_mypyc def test_reformat_one_with_stdin(self) -> None: with patch( "black.format_stdin_to_stdout", @@ -1562,6 +1140,7 @@ def test_reformat_one_with_stdin(self) -> None: fsts.assert_called_once() report.done.assert_called_with(path, black.Changed.YES) + @pytest.mark.incompatible_with_mypyc def test_reformat_one_with_stdin_filename(self) -> None: with patch( "black.format_stdin_to_stdout", @@ -1578,21 +1157,20 @@ def test_reformat_one_with_stdin_filename(self) -> None: mode=DEFAULT_MODE, report=report, ) - fsts.assert_called_once() - # __BLACK_STDIN_FILENAME__ should have been striped + fsts.assert_called_once_with( + fast=True, write_back=black.WriteBack.YES, mode=DEFAULT_MODE + ) + # __BLACK_STDIN_FILENAME__ should have been stripped report.done.assert_called_with(expected, black.Changed.YES) - def test_reformat_one_with_stdin_and_existing_path(self) -> None: + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_filename_pyi(self) -> None: with patch( "black.format_stdin_to_stdout", return_value=lambda *args, **kwargs: black.Changed.YES, ) as fsts: report = MagicMock() - # Even with an existing file, since we are forcing stdin, black - # should output to stdout and not modify the file inplace - p = Path(str(THIS_DIR / "data/collections.py")) - # Make sure is_file actually returns True - self.assertTrue(p.is_file()) + p = "foo.pyi" path = Path(f"__BLACK_STDIN_FILENAME__{p}") expected = Path(p) black.reformat_one( @@ -1602,98 +1180,111 @@ def test_reformat_one_with_stdin_and_existing_path(self) -> None: mode=DEFAULT_MODE, report=report, ) - fsts.assert_called_once() - # __BLACK_STDIN_FILENAME__ should have been striped + fsts.assert_called_once_with( + fast=True, + write_back=black.WriteBack.YES, + mode=replace(DEFAULT_MODE, is_pyi=True), + ) + # __BLACK_STDIN_FILENAME__ should have been stripped report.done.assert_called_with(expected, black.Changed.YES) - def test_gitignore_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - include = re.compile(r"\.pyi?$") - exclude = re.compile(r"") - report = black.Report() - gitignore = PathSpec.from_lines( - "gitwildmatch", ["exclude/", ".definitely_exclude"] - ) - sources: List[Path] = [] - expected = [ - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - include, - exclude, - None, - None, - report, - gitignore, + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_filename_ipynb(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + p = "foo.ipynb" + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, ) - ) - self.assertEqual(sorted(expected), sorted(sources)) - - def test_empty_include(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - empty = re.compile(r"") - sources: List[Path] = [] - expected = [ - Path(path / "b/exclude/a.pie"), - Path(path / "b/exclude/a.py"), - Path(path / "b/exclude/a.pyi"), - Path(path / "b/dont_exclude/a.pie"), - Path(path / "b/dont_exclude/a.py"), - Path(path / "b/dont_exclude/a.pyi"), - Path(path / "b/.definitely_exclude/a.pie"), - Path(path / "b/.definitely_exclude/a.py"), - Path(path / "b/.definitely_exclude/a.pyi"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - empty, - re.compile(black.DEFAULT_EXCLUDES), - None, - None, - report, - gitignore, + fsts.assert_called_once_with( + fast=True, + write_back=black.WriteBack.YES, + mode=replace(DEFAULT_MODE, is_ipynb=True), ) - ) - self.assertEqual(sorted(expected), sorted(sources)) + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) - def test_extend_exclude(self) -> None: - path = THIS_DIR / "data" / "include_exclude_tests" - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - sources: List[Path] = [] - expected = [ - Path(path / "b/exclude/a.py"), - Path(path / "b/dont_exclude/a.py"), - ] - this_abs = THIS_DIR.resolve() - sources.extend( - black.gen_python_files( - path.iterdir(), - this_abs, - re.compile(black.DEFAULT_INCLUDES), - re.compile(r"\.pyi$"), - re.compile(r"\.definitely_exclude"), - None, - report, - gitignore, + @pytest.mark.incompatible_with_mypyc + def test_reformat_one_with_stdin_and_existing_path(self) -> None: + with patch( + "black.format_stdin_to_stdout", + return_value=lambda *args, **kwargs: black.Changed.YES, + ) as fsts: + report = MagicMock() + # Even with an existing file, since we are forcing stdin, black + # should output to stdout and not modify the file inplace + p = THIS_DIR / "data" / "simple_cases" / "collections.py" + # Make sure is_file actually returns True + self.assertTrue(p.is_file()) + path = Path(f"__BLACK_STDIN_FILENAME__{p}") + expected = Path(p) + black.reformat_one( + path, + fast=True, + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + report=report, ) - ) - self.assertEqual(sorted(expected), sorted(sources)) + fsts.assert_called_once() + # __BLACK_STDIN_FILENAME__ should have been stripped + report.done.assert_called_with(expected, black.Changed.YES) + + def test_reformat_one_with_stdin_empty(self) -> None: + output = io.StringIO() + with patch("io.TextIOWrapper", lambda *args, **kwargs: output): + try: + black.format_stdin_to_stdout( + fast=True, + content="", + write_back=black.WriteBack.YES, + mode=DEFAULT_MODE, + ) + except io.UnsupportedOperation: + pass # StringIO does not support detach + assert output.getvalue() == "" def test_invalid_cli_regex(self) -> None: for option in ["--include", "--exclude", "--extend-exclude", "--force-exclude"]: self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2) + def test_required_version_matches_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__, "-c", "0"], + exit_code=0, + ignore_config=True, + ) + + def test_required_version_matches_partial_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__.split(".")[0], "-c", "0"], + exit_code=0, + ignore_config=True, + ) + + def test_required_version_does_not_match_on_minor_version(self) -> None: + self.invokeBlack( + ["--required-version", black.__version__.split(".")[0] + ".999", "-c", "0"], + exit_code=1, + ignore_config=True, + ) + + def test_required_version_does_not_match_version(self) -> None: + result = BlackRunner().invoke( + black.main, + ["--required-version", "20.99b", "-c", "0"], + ) + self.assertEqual(result.exit_code, 1) + self.assertIn("required version", result.stderr) + def test_preserves_line_endings(self) -> None: with TemporaryDirectory() as workspace: test_file = Path(workspace) / "test.py" @@ -1714,89 +1305,47 @@ def test_preserves_line_endings_via_stdin(self) -> None: black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf8")) ) self.assertEqual(result.exit_code, 0) - output = runner.stdout_bytes + output = result.stdout_bytes self.assertIn(nl.encode("utf8"), output) if nl == "\n": self.assertNotIn(b"\r\n", output) + def test_normalize_line_endings(self) -> None: + with TemporaryDirectory() as workspace: + test_file = Path(workspace) / "test.py" + for data, expected in ( + (b"c\r\nc\n ", b"c\r\nc\r\n"), + (b"l\nl\r\n ", b"l\nl\n"), + ): + test_file.write_bytes(data) + ff(test_file, write_back=black.WriteBack.YES) + self.assertEqual(test_file.read_bytes(), expected) + def test_assert_equivalent_different_asts(self) -> None: with self.assertRaises(AssertionError): black.assert_equivalent("{}", "None") - def test_symlink_out_of_root_directory(self) -> None: - path = MagicMock() - root = THIS_DIR.resolve() - child = MagicMock() - include = re.compile(black.DEFAULT_INCLUDES) - exclude = re.compile(black.DEFAULT_EXCLUDES) - report = black.Report() - gitignore = PathSpec.from_lines("gitwildmatch", []) - # `child` should behave like a symlink which resolved path is clearly - # outside of the `root` directory. - path.iterdir.return_value = [child] - child.resolve.return_value = Path("/a/b/c") - child.as_posix.return_value = "/a/b/c" - child.is_symlink.return_value = True - try: - list( - black.gen_python_files( - path.iterdir(), - root, - include, - exclude, - None, - None, - report, - gitignore, - ) - ) - except ValueError as ve: - self.fail(f"`get_python_files_in_dir()` failed: {ve}") - path.iterdir.assert_called_once() - child.resolve.assert_called_once() - child.is_symlink.assert_called_once() - # `child` should behave like a strange file which resolved path is clearly - # outside of the `root` directory. - child.is_symlink.return_value = False - with self.assertRaises(ValueError): - list( - black.gen_python_files( - path.iterdir(), - root, - include, - exclude, - None, - None, - report, - gitignore, - ) - ) - path.iterdir.assert_called() - self.assertEqual(path.iterdir.call_count, 2) - child.resolve.assert_called() - self.assertEqual(child.resolve.call_count, 2) - child.is_symlink.assert_called() - self.assertEqual(child.is_symlink.call_count, 2) - def test_shhh_click(self) -> None: try: from click import _unicodefun # type: ignore - except ModuleNotFoundError: + except ImportError: self.skipTest("Incompatible Click version") - if not hasattr(_unicodefun, "_verify_python3_env"): + + if not hasattr(_unicodefun, "_verify_python_env"): self.skipTest("Incompatible Click version") + # First, let's see if Click is crashing with a preferred ASCII charset. with patch("locale.getpreferredencoding") as gpe: gpe.return_value = "ASCII" with self.assertRaises(RuntimeError): - _unicodefun._verify_python3_env() + _unicodefun._verify_python_env() # Now, let's silence Click... black.patch_click() # ...and confirm it's silent. with patch("locale.getpreferredencoding") as gpe: gpe.return_value = "ASCII" try: - _unicodefun._verify_python3_env() + _unicodefun._verify_python_env() except RuntimeError as re: self.fail(f"`patch_click()` failed, exception still raised: {re}") @@ -1813,7 +1362,7 @@ def fail(*args: Any, **kwargs: Any) -> None: critical=fail, log=fail, ): - ff(THIS_FILE) + ff(THIS_DIR / "util.py") def test_invalid_config_return_code(self) -> None: tmp_file = Path(black.dump_to_file()) @@ -1834,6 +1383,7 @@ def test_parse_pyproject_toml(self) -> None: self.assertEqual(config["color"], True) self.assertEqual(config["line_length"], 79) self.assertEqual(config["target_version"], ["py36", "py37", "py38"]) + self.assertEqual(config["python_cell_magics"], ["custom1", "custom2"]) self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") @@ -1851,6 +1401,7 @@ def test_read_pyproject_toml(self) -> None: self.assertEqual(config["exclude"], r"\.pyi?$") self.assertEqual(config["include"], r"\.py?$") + @pytest.mark.incompatible_with_mypyc def test_find_project_root(self) -> None: with TemporaryDirectory() as workspace: root = Path(workspace) @@ -1868,12 +1419,43 @@ def test_find_project_root(self) -> None: src_python.touch() self.assertEqual( - black.find_project_root((src_dir, test_dir)), root.resolve() + black.find_project_root((src_dir, test_dir)), + (root.resolve(), "pyproject.toml"), + ) + self.assertEqual( + black.find_project_root((src_dir,)), + (src_dir.resolve(), "pyproject.toml"), + ) + self.assertEqual( + black.find_project_root((src_python,)), + (src_dir.resolve(), "pyproject.toml"), + ) + + with change_directory(test_dir): + self.assertEqual( + black.find_project_root(("-",), stdin_filename="../src/a.py"), + (src_dir.resolve(), "pyproject.toml"), + ) + + @patch( + "black.files.find_user_pyproject_toml", + ) + def test_find_pyproject_toml(self, find_user_pyproject_toml: MagicMock) -> None: + find_user_pyproject_toml.side_effect = RuntimeError() + + with redirect_stderr(io.StringIO()) as stderr: + result = black.files.find_pyproject_toml( + path_search_start=(str(Path.cwd().root),) ) - self.assertEqual(black.find_project_root((src_dir,)), src_dir.resolve()) - self.assertEqual(black.find_project_root((src_python,)), src_dir.resolve()) - @patch("black.find_user_pyproject_toml", black.find_user_pyproject_toml.__wrapped__) + assert result is None + err = stderr.getvalue() + assert "Ignoring user configuration" in err + + @patch( + "black.files.find_user_pyproject_toml", + black.files.find_user_pyproject_toml.__wrapped__, + ) def test_find_user_pyproject_toml_linux(self) -> None: if system() == "Windows": return @@ -1883,7 +1465,7 @@ def test_find_user_pyproject_toml_linux(self) -> None: tmp_user_config = Path(workspace) / "black" with patch.dict("os.environ", {"XDG_CONFIG_HOME": workspace}): self.assertEqual( - black.find_user_pyproject_toml(), tmp_user_config.resolve() + black.files.find_user_pyproject_toml(), tmp_user_config.resolve() ) # Test fallback for XDG_CONFIG_HOME @@ -1891,7 +1473,7 @@ def test_find_user_pyproject_toml_linux(self) -> None: os.environ.pop("XDG_CONFIG_HOME", None) fallback_user_config = Path("~/.config").expanduser() / "black" self.assertEqual( - black.find_user_pyproject_toml(), fallback_user_config.resolve() + black.files.find_user_pyproject_toml(), fallback_user_config.resolve() ) def test_find_user_pyproject_toml_windows(self) -> None: @@ -1899,24 +1481,40 @@ def test_find_user_pyproject_toml_windows(self) -> None: return user_config_path = Path.home() / ".black" - self.assertEqual(black.find_user_pyproject_toml(), user_config_path.resolve()) + self.assertEqual( + black.files.find_user_pyproject_toml(), user_config_path.resolve() + ) def test_bpo_33660_workaround(self) -> None: if system() == "Windows": return # https://bugs.python.org/issue33660 - - old_cwd = Path.cwd() - try: - root = Path("/") - os.chdir(str(root)) + root = Path("/") + with change_directory(root): path = Path("workspace") / "project" report = black.Report(verbose=True) normalized_path = black.normalize_path_maybe_ignore(path, root, report) self.assertEqual(normalized_path, "workspace/project") - finally: - os.chdir(str(old_cwd)) + + def test_normalize_path_ignore_windows_junctions_outside_of_root(self) -> None: + if system() != "Windows": + return + + with TemporaryDirectory() as workspace: + root = Path(workspace) + junction_dir = root / "junction" + junction_target_outside_of_root = root / ".." + os.system(f"mklink /J {junction_dir} {junction_target_outside_of_root}") + + report = black.Report(verbose=True) + normalized_path = black.normalize_path_maybe_ignore( + junction_dir, root, report + ) + # Manually delete for Python < 3.8 + os.system(f"rmdir {junction_dir}") + + self.assertEqual(normalized_path, None) def test_newline_comment_interaction(self) -> None: source = "class A:\\\r\n# type: ignore\n pass\n" @@ -1924,13 +1522,12 @@ def test_newline_comment_interaction(self) -> None: black.assert_stable(source, output, mode=DEFAULT_MODE) def test_bpo_2142_workaround(self) -> None: - # https://bugs.python.org/issue2142 - source, _ = read_data("missing_final_newline.py") + source, _ = read_data("miscellaneous", "missing_final_newline") # read_data adds a trailing newline source = source.rstrip() - expected, _ = read_data("missing_final_newline.diff") + expected, _ = read_data("miscellaneous", "missing_final_newline.diff") tmp_file = Path(black.dump_to_file(source, ensure_final_newline=False)) diff_header = re.compile( rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d " @@ -1945,32 +1542,659 @@ def test_bpo_2142_workaround(self) -> None: actual = diff_header.sub(DETERMINISTIC_HEADER, actual) self.assertEqual(actual, expected) - def test_docstring_reformat_for_py27(self) -> None: + @staticmethod + def compare_results( + result: click.testing.Result, expected_value: str, expected_exit_code: int + ) -> None: + """Helper method to test the value and exit code of a click Result.""" + assert ( + result.output == expected_value + ), "The output did not match the expected value." + assert result.exit_code == expected_exit_code, "The exit code is incorrect." + + def test_code_option(self) -> None: + """Test the code option with no changes.""" + code = 'print("Hello world")\n' + args = ["--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, code, 0) + + def test_code_option_changed(self) -> None: + """Test the code option when changes are required.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + + args = ["--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, formatted, 0) + + def test_code_option_check(self) -> None: + """Test the code option when check is passed.""" + args = ["--check", "--code", 'print("Hello world")\n'] + result = CliRunner().invoke(black.main, args) + self.compare_results(result, "", 0) + + def test_code_option_check_changed(self) -> None: + """Test the code option when changes are required, and check is passed.""" + args = ["--check", "--code", "print('hello world')"] + result = CliRunner().invoke(black.main, args) + self.compare_results(result, "", 1) + + def test_code_option_diff(self) -> None: + """Test the code option when diff is passed.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + result_diff = diff(code, formatted, "STDIN", "STDOUT") + + args = ["--diff", "--code", code] + result = CliRunner().invoke(black.main, args) + + # Remove time from diff + output = DIFF_TIME.sub("", result.output) + + assert output == result_diff, "The output did not match the expected value." + assert result.exit_code == 0, "The exit code is incorrect." + + def test_code_option_color_diff(self) -> None: + """Test the code option when color and diff are passed.""" + code = "print('hello world')" + formatted = black.format_str(code, mode=DEFAULT_MODE) + + result_diff = diff(code, formatted, "STDIN", "STDOUT") + result_diff = color_diff(result_diff) + + args = ["--diff", "--color", "--code", code] + result = CliRunner().invoke(black.main, args) + + # Remove time from diff + output = DIFF_TIME.sub("", result.output) + + assert output == result_diff, "The output did not match the expected value." + assert result.exit_code == 0, "The exit code is incorrect." + + @pytest.mark.incompatible_with_mypyc + def test_code_option_safe(self) -> None: + """Test that the code option throws an error when the sanity checks fail.""" + # Patch black.assert_equivalent to ensure the sanity checks fail + with patch.object(black, "assert_equivalent", side_effect=AssertionError): + code = 'print("Hello world")' + error_msg = f"{code}\nerror: cannot format : \n" + + args = ["--safe", "--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, error_msg, 123) + + def test_code_option_fast(self) -> None: + """Test that the code option ignores errors when the sanity checks fail.""" + # Patch black.assert_equivalent to ensure the sanity checks fail + with patch.object(black, "assert_equivalent", side_effect=AssertionError): + code = 'print("Hello world")' + formatted = black.format_str(code, mode=DEFAULT_MODE) + + args = ["--fast", "--code", code] + result = CliRunner().invoke(black.main, args) + + self.compare_results(result, formatted, 0) + + @pytest.mark.incompatible_with_mypyc + def test_code_option_config(self) -> None: + """ + Test that the code option finds the pyproject.toml in the current directory. + """ + with patch.object(black, "parse_pyproject_toml", return_value={}) as parse: + args = ["--code", "print"] + # This is the only directory known to contain a pyproject.toml + with change_directory(PROJECT_ROOT): + CliRunner().invoke(black.main, args) + pyproject_path = Path(Path.cwd(), "pyproject.toml").resolve() + + assert ( + len(parse.mock_calls) >= 1 + ), "Expected config parse to be called with the current directory." + + _, call_args, _ = parse.mock_calls[0] + assert ( + call_args[0].lower() == str(pyproject_path).lower() + ), "Incorrect config loaded." + + @pytest.mark.incompatible_with_mypyc + def test_code_option_parent_config(self) -> None: + """ + Test that the code option finds the pyproject.toml in the parent directory. + """ + with patch.object(black, "parse_pyproject_toml", return_value={}) as parse: + with change_directory(THIS_DIR): + args = ["--code", "print"] + CliRunner().invoke(black.main, args) + + pyproject_path = Path(Path().cwd().parent, "pyproject.toml").resolve() + assert ( + len(parse.mock_calls) >= 1 + ), "Expected config parse to be called with the current directory." + + _, call_args, _ = parse.mock_calls[0] + assert ( + call_args[0].lower() == str(pyproject_path).lower() + ), "Incorrect config loaded." + + def test_for_handled_unexpected_eof_error(self) -> None: """ - Check that stripping trailing whitespace from Python 2 docstrings - doesn't trigger a "not equivalent to source" error + Test that an unexpected EOF SyntaxError is nicely presented. """ - source = ( - b'def foo():\r\n """Testing\r\n Testing """\r\n print "Foo"\r\n' + with pytest.raises(black.parsing.InvalidInput) as exc_info: + black.lib2to3_parse("print(", {}) + + exc_info.match("Cannot parse: 2:0: EOF in multi-line statement") + + def test_equivalency_ast_parse_failure_includes_error(self) -> None: + with pytest.raises(AssertionError) as err: + black.assert_equivalent("a«»a = 1", "a«»a = 1") + + err.match("--safe") + # Unfortunately the SyntaxError message has changed in newer versions so we + # can't match it directly. + err.match("invalid character") + err.match(r"\(, line 1\)") + + +class TestCaching: + def test_get_cache_dir( + self, + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + # Create multiple cache directories + workspace1 = tmp_path / "ws1" + workspace1.mkdir() + workspace2 = tmp_path / "ws2" + workspace2.mkdir() + + # Force user_cache_dir to use the temporary directory for easier assertions + patch_user_cache_dir = patch( + target="black.cache.user_cache_dir", + autospec=True, + return_value=str(workspace1), ) - expected = 'def foo():\n """Testing\n Testing"""\n print "Foo"\n' - result = CliRunner().invoke( - black.main, - ["-", "-q", "--target-version=py27"], - input=BytesIO(source), + # If BLACK_CACHE_DIR is not set, use user_cache_dir + monkeypatch.delenv("BLACK_CACHE_DIR", raising=False) + with patch_user_cache_dir: + assert get_cache_dir() == workspace1 + + # If it is set, use the path provided in the env var. + monkeypatch.setenv("BLACK_CACHE_DIR", str(workspace2)) + assert get_cache_dir() == workspace2 + + def test_cache_broken_file(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + cache_file = get_cache_file(mode) + cache_file.write_text("this is not a pickle") + assert black.read_cache(mode) == {} + src = (workspace / "test.py").resolve() + src.write_text("print('hello')") + invokeBlack([str(src)]) + cache = black.read_cache(mode) + assert str(src) in cache + + def test_cache_single_file_already_cached(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + src.write_text("print('hello')") + black.write_cache({}, [src], mode) + invokeBlack([str(src)]) + assert src.read_text() == "print('hello')" + + @event_loop() + def test_cache_multiple_files(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace, patch( + "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + ): + one = (workspace / "one.py").resolve() + with one.open("w") as fobj: + fobj.write("print('hello')") + two = (workspace / "two.py").resolve() + with two.open("w") as fobj: + fobj.write("print('hello')") + black.write_cache({}, [one], mode) + invokeBlack([str(workspace)]) + with one.open("r") as fobj: + assert fobj.read() == "print('hello')" + with two.open("r") as fobj: + assert fobj.read() == 'print("hello")\n' + cache = black.read_cache(mode) + assert str(one) in cache + assert str(two) in cache + + @pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"]) + def test_no_cache_when_writeback_diff(self, color: bool) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + with src.open("w") as fobj: + fobj.write("print('hello')") + with patch("black.read_cache") as read_cache, patch( + "black.write_cache" + ) as write_cache: + cmd = [str(src), "--diff"] + if color: + cmd.append("--color") + invokeBlack(cmd) + cache_file = get_cache_file(mode) + assert cache_file.exists() is False + write_cache.assert_not_called() + read_cache.assert_not_called() + + @pytest.mark.parametrize("color", [False, True], ids=["no-color", "with-color"]) + @event_loop() + def test_output_locking_when_writeback_diff(self, color: bool) -> None: + with cache_dir() as workspace: + for tag in range(0, 4): + src = (workspace / f"test{tag}.py").resolve() + with src.open("w") as fobj: + fobj.write("print('hello')") + with patch( + "black.concurrency.Manager", wraps=multiprocessing.Manager + ) as mgr: + cmd = ["--diff", str(workspace)] + if color: + cmd.append("--color") + invokeBlack(cmd, exit_code=0) + # this isn't quite doing what we want, but if it _isn't_ + # called then we cannot be using the lock it provides + mgr.assert_called() + + def test_no_cache_when_stdin(self) -> None: + mode = DEFAULT_MODE + with cache_dir(): + result = CliRunner().invoke( + black.main, ["-"], input=BytesIO(b"print('hello')") + ) + assert not result.exit_code + cache_file = get_cache_file(mode) + assert not cache_file.exists() + + def test_read_cache_no_cachefile(self) -> None: + mode = DEFAULT_MODE + with cache_dir(): + assert black.read_cache(mode) == {} + + def test_write_cache_read_cache(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace: + src = (workspace / "test.py").resolve() + src.touch() + black.write_cache({}, [src], mode) + cache = black.read_cache(mode) + assert str(src) in cache + assert cache[str(src)] == black.get_cache_info(src) + + def test_filter_cached(self) -> None: + with TemporaryDirectory() as workspace: + path = Path(workspace) + uncached = (path / "uncached").resolve() + cached = (path / "cached").resolve() + cached_but_changed = (path / "changed").resolve() + uncached.touch() + cached.touch() + cached_but_changed.touch() + cache = { + str(cached): black.get_cache_info(cached), + str(cached_but_changed): (0.0, 0), + } + todo, done = black.cache.filter_cached( + cache, {uncached, cached, cached_but_changed} + ) + assert todo == {uncached, cached_but_changed} + assert done == {cached} + + def test_write_cache_creates_directory_if_needed(self) -> None: + mode = DEFAULT_MODE + with cache_dir(exists=False) as workspace: + assert not workspace.exists() + black.write_cache({}, [], mode) + assert workspace.exists() + + @event_loop() + def test_failed_formatting_does_not_get_cached(self) -> None: + mode = DEFAULT_MODE + with cache_dir() as workspace, patch( + "concurrent.futures.ProcessPoolExecutor", new=ThreadPoolExecutor + ): + failing = (workspace / "failing.py").resolve() + with failing.open("w") as fobj: + fobj.write("not actually python") + clean = (workspace / "clean.py").resolve() + with clean.open("w") as fobj: + fobj.write('print("hello")\n') + invokeBlack([str(workspace)], exit_code=123) + cache = black.read_cache(mode) + assert str(failing) not in cache + assert str(clean) in cache + + def test_write_cache_write_fail(self) -> None: + mode = DEFAULT_MODE + with cache_dir(), patch.object(Path, "open") as mock: + mock.side_effect = OSError + black.write_cache({}, [], mode) + + def test_read_cache_line_lengths(self) -> None: + mode = DEFAULT_MODE + short_mode = replace(DEFAULT_MODE, line_length=1) + with cache_dir() as workspace: + path = (workspace / "file.py").resolve() + path.touch() + black.write_cache({}, [path], mode) + one = black.read_cache(mode) + assert str(path) in one + two = black.read_cache(short_mode) + assert str(path) not in two + + +def assert_collected_sources( + src: Sequence[Union[str, Path]], + expected: Sequence[Union[str, Path]], + *, + ctx: Optional[FakeContext] = None, + exclude: Optional[str] = None, + include: Optional[str] = None, + extend_exclude: Optional[str] = None, + force_exclude: Optional[str] = None, + stdin_filename: Optional[str] = None, +) -> None: + gs_src = tuple(str(Path(s)) for s in src) + gs_expected = [Path(s) for s in expected] + gs_exclude = None if exclude is None else compile_pattern(exclude) + gs_include = DEFAULT_INCLUDE if include is None else compile_pattern(include) + gs_extend_exclude = ( + None if extend_exclude is None else compile_pattern(extend_exclude) + ) + gs_force_exclude = None if force_exclude is None else compile_pattern(force_exclude) + collected = black.get_sources( + ctx=ctx or FakeContext(), + src=gs_src, + quiet=False, + verbose=False, + include=gs_include, + exclude=gs_exclude, + extend_exclude=gs_extend_exclude, + force_exclude=gs_force_exclude, + report=black.Report(), + stdin_filename=stdin_filename, + ) + assert sorted(collected) == sorted(gs_expected) + + +class TestFileCollection: + def test_include_exclude(self) -> None: + path = THIS_DIR / "data" / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + ] + assert_collected_sources( + src, + expected, + include=r"\.pyi?$", + exclude=r"/exclude/|/\.definitely_exclude/", ) - self.assertEqual(result.exit_code, 0) - actual = result.output - self.assertFormatEqual(actual, expected) + def test_gitignore_used_as_default(self) -> None: + base = Path(DATA_DIR / "include_exclude_tests") + expected = [ + base / "b/.definitely_exclude/a.py", + base / "b/.definitely_exclude/a.pyi", + ] + src = [base / "b/"] + ctx = FakeContext() + ctx.obj["root"] = base + assert_collected_sources(src, expected, ctx=ctx, extend_exclude=r"/exclude/") + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_exclude_for_issue_1572(self) -> None: + # Exclude shouldn't touch files that were explicitly given to Black through the + # CLI. Exclude is supposed to only apply to the recursive discovery of files. + # https://github.com/psf/black/issues/1572 + path = DATA_DIR / "include_exclude_tests" + src = [path / "b/exclude/a.py"] + expected = [path / "b/exclude/a.py"] + assert_collected_sources(src, expected, include="", exclude=r"/exclude/|a\.py") + + def test_gitignore_exclude(self) -> None: + path = THIS_DIR / "data" / "include_exclude_tests" + include = re.compile(r"\.pyi?$") + exclude = re.compile(r"") + report = black.Report() + gitignore = PathSpec.from_lines( + "gitwildmatch", ["exclude/", ".definitely_exclude"] + ) + sources: List[Path] = [] + expected = [ + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + ] + this_abs = THIS_DIR.resolve() + sources.extend( + black.gen_python_files( + path.iterdir(), + this_abs, + include, + exclude, + None, + None, + report, + gitignore, + verbose=False, + quiet=False, + ) + ) + assert sorted(expected) == sorted(sources) + def test_nested_gitignore(self) -> None: + path = Path(THIS_DIR / "data" / "nested_gitignore_tests") + include = re.compile(r"\.pyi?$") + exclude = re.compile(r"") + root_gitignore = black.files.get_gitignore(path) + report = black.Report() + expected: List[Path] = [ + Path(path / "x.py"), + Path(path / "root/b.py"), + Path(path / "root/c.py"), + Path(path / "root/child/c.py"), + ] + this_abs = THIS_DIR.resolve() + sources = list( + black.gen_python_files( + path.iterdir(), + this_abs, + include, + exclude, + None, + None, + report, + root_gitignore, + verbose=False, + quiet=False, + ) + ) + assert sorted(expected) == sorted(sources) + + def test_nested_gitignore_directly_in_source_directory(self) -> None: + # https://github.com/psf/black/issues/2598 + path = Path(DATA_DIR / "nested_gitignore_tests") + src = Path(path / "root" / "child") + expected = [src / "a.py", src / "c.py"] + assert_collected_sources([src], expected) + + def test_invalid_gitignore(self) -> None: + path = THIS_DIR / "data" / "invalid_gitignore_tests" + empty_config = path / "pyproject.toml" + result = BlackRunner().invoke( + black.main, ["--verbose", "--config", str(empty_config), str(path)] + ) + assert result.exit_code == 1 + assert result.stderr_bytes is not None + + gitignore = path / ".gitignore" + assert f"Could not parse {gitignore}" in result.stderr_bytes.decode() + + def test_invalid_nested_gitignore(self) -> None: + path = THIS_DIR / "data" / "invalid_nested_gitignore_tests" + empty_config = path / "pyproject.toml" + result = BlackRunner().invoke( + black.main, ["--verbose", "--config", str(empty_config), str(path)] + ) + assert result.exit_code == 1 + assert result.stderr_bytes is not None -with open(black.__file__, "r", encoding="utf-8") as _bf: - black_source_lines = _bf.readlines() + gitignore = path / "a" / ".gitignore" + assert f"Could not parse {gitignore}" in result.stderr_bytes.decode() + + def test_empty_include(self) -> None: + path = DATA_DIR / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/exclude/a.pie"), + Path(path / "b/exclude/a.py"), + Path(path / "b/exclude/a.pyi"), + Path(path / "b/dont_exclude/a.pie"), + Path(path / "b/dont_exclude/a.py"), + Path(path / "b/dont_exclude/a.pyi"), + Path(path / "b/.definitely_exclude/a.pie"), + Path(path / "b/.definitely_exclude/a.py"), + Path(path / "b/.definitely_exclude/a.pyi"), + Path(path / ".gitignore"), + Path(path / "pyproject.toml"), + ] + # Setting exclude explicitly to an empty string to block .gitignore usage. + assert_collected_sources(src, expected, include="", exclude="") + def test_extend_exclude(self) -> None: + path = DATA_DIR / "include_exclude_tests" + src = [path] + expected = [ + Path(path / "b/exclude/a.py"), + Path(path / "b/dont_exclude/a.py"), + ] + assert_collected_sources( + src, expected, exclude=r"\.pyi$", extend_exclude=r"\.definitely_exclude" + ) -def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable: + @pytest.mark.incompatible_with_mypyc + def test_symlink_out_of_root_directory(self) -> None: + path = MagicMock() + root = THIS_DIR.resolve() + child = MagicMock() + include = re.compile(black.DEFAULT_INCLUDES) + exclude = re.compile(black.DEFAULT_EXCLUDES) + report = black.Report() + gitignore = PathSpec.from_lines("gitwildmatch", []) + # `child` should behave like a symlink which resolved path is clearly + # outside of the `root` directory. + path.iterdir.return_value = [child] + child.resolve.return_value = Path("/a/b/c") + child.as_posix.return_value = "/a/b/c" + try: + list( + black.gen_python_files( + path.iterdir(), + root, + include, + exclude, + None, + None, + report, + gitignore, + verbose=False, + quiet=False, + ) + ) + except ValueError as ve: + pytest.fail(f"`get_python_files_in_dir()` failed: {ve}") + path.iterdir.assert_called_once() + child.resolve.assert_called_once() + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin(self) -> None: + src = ["-"] + expected = ["-"] + assert_collected_sources(src, expected, include="", exclude=r"/exclude/|a\.py") + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename(self) -> None: + src = ["-"] + stdin_filename = str(THIS_DIR / "data/collections.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + exclude=r"/exclude/a\.py", + stdin_filename=stdin_filename, + ) + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_exclude(self) -> None: + # Exclude shouldn't exclude stdin_filename since it is mimicking the + # file being passed directly. This is the same as + # test_exclude_for_issue_1572 + path = DATA_DIR / "include_exclude_tests" + src = ["-"] + stdin_filename = str(path / "b/exclude/a.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, + ) + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_extend_exclude(self) -> None: + # Extend exclude shouldn't exclude stdin_filename since it is mimicking the + # file being passed directly. This is the same as + # test_exclude_for_issue_1572 + src = ["-"] + path = THIS_DIR / "data" / "include_exclude_tests" + stdin_filename = str(path / "b/exclude/a.py") + expected = [f"__BLACK_STDIN_FILENAME__{stdin_filename}"] + assert_collected_sources( + src, + expected, + extend_exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, + ) + + @patch("black.find_project_root", lambda *args: (THIS_DIR.resolve(), None)) + def test_get_sources_with_stdin_filename_and_force_exclude(self) -> None: + # Force exclude should exclude the file when passing it through + # stdin_filename + path = THIS_DIR / "data" / "include_exclude_tests" + stdin_filename = str(path / "b/exclude/a.py") + assert_collected_sources( + src=["-"], + expected=[], + force_exclude=r"/exclude/|a\.py", + stdin_filename=stdin_filename, + ) + + +try: + with open(black.__file__, "r", encoding="utf-8") as _bf: + black_source_lines = _bf.readlines() +except UnicodeDecodeError: + if not black.COMPILED: + raise + + +def tracefunc( + frame: types.FrameType, event: str, arg: Any +) -> Callable[[types.FrameType, str, Any], Any]: """Show function calls `from black/__init__.py` as they happen. Register this with `sys.settrace()` in a test you're debugging. @@ -1990,7 +2214,3 @@ def tracefunc(frame: types.FrameType, event: str, arg: Any) -> Callable: if "black/__init__.py" in filename: print(f"{' ' * stack}{lineno}:{funcname}") return tracefunc - - -if __name__ == "__main__": - unittest.main(module="test_black") diff --git a/tests/test_blackd.py b/tests/test_blackd.py index 9127297..5b6461f 100644 --- a/tests/test_blackd.py +++ b/tests/test_blackd.py @@ -1,23 +1,37 @@ import re -import unittest +from typing import TYPE_CHECKING, Any, Callable, TypeVar from unittest.mock import patch +import pytest from click.testing import CliRunner -from tests.util import read_data, DETERMINISTIC_HEADER, skip_if_exception +from tests.util import DETERMINISTIC_HEADER, read_data try: - import blackd - from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop from aiohttp import web -except ImportError: - has_blackd_deps = False -else: - has_blackd_deps = True + from aiohttp.test_utils import AioHTTPTestCase + + import blackd +except ImportError as e: + raise RuntimeError("Please install Black with the 'd' extra") from e +if TYPE_CHECKING: + F = TypeVar("F", bound=Callable[..., Any]) -class BlackDTestCase(AioHTTPTestCase): - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") + unittest_run_loop: Callable[[F], F] = lambda x: x +else: + try: + from aiohttp.test_utils import unittest_run_loop + except ImportError: + # unittest_run_loop is unnecessary and a no-op since aiohttp 3.8, and + # aiohttp 4 removed it. To maintain compatibility we can make our own + # no-op decorator. + def unittest_run_loop(func, *args, **kwargs): + return func + + +@pytest.mark.blackd +class BlackDTestCase(AioHTTPTestCase): # type: ignore[misc] def test_blackd_main(self) -> None: with patch("blackd.web.run_app"): result = CliRunner().invoke(blackd.main, []) @@ -28,10 +42,6 @@ def test_blackd_main(self) -> None: async def get_application(self) -> web.Application: return blackd.make_app() - # TODO: remove these decorators once the below is released - # https://github.com/aio-libs/aiohttp/pull/3727 - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_request_needs_formatting(self) -> None: response = await self.client.post("/", data=b"print('hello world')") @@ -39,16 +49,12 @@ async def test_blackd_request_needs_formatting(self) -> None: self.assertEqual(response.charset, "utf8") self.assertEqual(await response.read(), b'print("hello world")\n') - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_request_no_change(self) -> None: response = await self.client.post("/", data=b'print("hello world")\n') self.assertEqual(response.status, 204) self.assertEqual(await response.read(), b"") - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_request_syntax_error(self) -> None: response = await self.client.post("/", data=b"what even ( is") @@ -59,8 +65,6 @@ async def test_blackd_request_syntax_error(self) -> None: msg=f"Expected error to start with 'Cannot parse', got {repr(content)}", ) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_unsupported_version(self) -> None: response = await self.client.post( @@ -68,8 +72,6 @@ async def test_blackd_unsupported_version(self) -> None: ) self.assertEqual(response.status, 501) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_supported_version(self) -> None: response = await self.client.post( @@ -77,13 +79,13 @@ async def test_blackd_supported_version(self) -> None: ) self.assertEqual(response.status, 200) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_invalid_python_variant(self) -> None: async def check(header_value: str, expected_status: int = 400) -> None: response = await self.client.post( - "/", data=b"what", headers={blackd.PYTHON_VARIANT_HEADER: header_value} + "/", + data=b"what", + headers={blackd.PYTHON_VARIANT_HEADER: header_value}, ) self.assertEqual(response.status, expected_status) @@ -91,33 +93,32 @@ async def check(header_value: str, expected_status: int = 400) -> None: await check("ruby3.5") await check("pyi3.6") await check("py1.5") + await check("2") + await check("2.7") + await check("py2.7") await check("2.8") await check("py2.8") await check("3.0") await check("pypy3.0") await check("jython3.4") - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_pyi(self) -> None: - source, expected = read_data("stub.pyi") + source, expected = read_data("miscellaneous", "stub.pyi") response = await self.client.post( "/", data=source, headers={blackd.PYTHON_VARIANT_HEADER: "pyi"} ) self.assertEqual(response.status, 200) self.assertEqual(await response.text(), expected) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_diff(self) -> None: diff_header = re.compile( r"(In|Out)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d \+\d\d\d\d" ) - source, _ = read_data("blackd_diff.py") - expected, _ = read_data("blackd_diff.diff") + source, _ = read_data("miscellaneous", "blackd_diff") + expected, _ = read_data("miscellaneous", "blackd_diff.diff") response = await self.client.post( "/", data=source, headers={blackd.DIFF_HEADER: "true"} @@ -128,8 +129,6 @@ async def test_blackd_diff(self) -> None: actual = diff_header.sub(DETERMINISTIC_HEADER, actual) self.assertEqual(actual, expected) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_python_variant(self) -> None: code = ( @@ -157,17 +156,11 @@ async def check(header_value: str, expected_status: int) -> None: await check("py36,py37", 200) await check("36", 200) await check("3.6.4", 200) - - await check("2", 204) - await check("2.7", 204) - await check("py2.7", 204) await check("3.4", 204) await check("py3.4", 204) await check("py34,py36", 204) await check("34", 204) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_line_length(self) -> None: response = await self.client.post( @@ -175,18 +168,75 @@ async def test_blackd_line_length(self) -> None: ) self.assertEqual(response.status, 200) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") @unittest_run_loop async def test_blackd_invalid_line_length(self) -> None: response = await self.client.post( - "/", data=b'print("hello")\n', headers={blackd.LINE_LENGTH_HEADER: "NaN"} + "/", + data=b'print("hello")\n', + headers={blackd.LINE_LENGTH_HEADER: "NaN"}, ) self.assertEqual(response.status, 400) - @skip_if_exception("ClientOSError") - @unittest.skipUnless(has_blackd_deps, "blackd's dependencies are not installed") + @unittest_run_loop + async def test_blackd_skip_first_source_line(self) -> None: + invalid_first_line = b"Header will be skipped\r\ni = [1,2,3]\nj = [1,2,3]\n" + expected_result = b"Header will be skipped\r\ni = [1, 2, 3]\nj = [1, 2, 3]\n" + response = await self.client.post("/", data=invalid_first_line) + self.assertEqual(response.status, 400) + response = await self.client.post( + "/", + data=invalid_first_line, + headers={blackd.SKIP_SOURCE_FIRST_LINE: "true"}, + ) + self.assertEqual(response.status, 200) + self.assertEqual(await response.read(), expected_result) + + @unittest_run_loop + async def test_blackd_preview(self) -> None: + response = await self.client.post( + "/", data=b'print("hello")\n', headers={blackd.PREVIEW: "true"} + ) + self.assertEqual(response.status, 204) + @unittest_run_loop async def test_blackd_response_black_version_header(self) -> None: response = await self.client.post("/") self.assertIsNotNone(response.headers.get(blackd.BLACK_VERSION_HEADER)) + + @unittest_run_loop + async def test_cors_preflight(self) -> None: + response = await self.client.options( + "/", + headers={ + "Access-Control-Request-Method": "POST", + "Origin": "*", + "Access-Control-Request-Headers": "Content-Type", + }, + ) + self.assertEqual(response.status, 200) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Origin")) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Headers")) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Methods")) + + @unittest_run_loop + async def test_cors_headers_present(self) -> None: + response = await self.client.post("/", headers={"Origin": "*"}) + self.assertIsNotNone(response.headers.get("Access-Control-Allow-Origin")) + self.assertIsNotNone(response.headers.get("Access-Control-Expose-Headers")) + + @unittest_run_loop + async def test_preserves_line_endings(self) -> None: + for data in (b"c\r\nc\r\n", b"l\nl\n"): + # test preserved newlines when reformatted + response = await self.client.post("/", data=data + b" ") + self.assertEqual(await response.text(), data.decode()) + # test 204 when no change + response = await self.client.post("/", data=data) + self.assertEqual(response.status, 204) + + @unittest_run_loop + async def test_normalizes_line_endings(self) -> None: + for data, expected in ((b"c\r\nc\n", "c\r\nc\r\n"), (b"l\nl\r\n", "l\nl\n")): + response = await self.client.post("/", data=data) + self.assertEqual(await response.text(), expected) + self.assertEqual(response.status, 200) diff --git a/tests/test_format.py b/tests/test_format.py index e1335ae..01cd61e 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -1,111 +1,177 @@ +from dataclasses import replace +from typing import Any, Iterator from unittest.mock import patch -import black -from parameterized import parameterized +import pytest +import black from tests.util import ( - BlackBaseTestCase, - fs, - ff, DEFAULT_MODE, + PY36_VERSIONS, + all_data_cases, + assert_format, dump_to_stderr, read_data, - THIS_DIR, ) -SIMPLE_CASES = [ - "beginning_backslash", - "bracketmatch", - "class_blank_parentheses", - "class_methods_new_line", - "collections", - "comments", - "comments2", - "comments3", - "comments4", - "comments5", - "comments6", - "comments_non_breaking_space", - "comment_after_escaped_newline", - "composition", - "composition_no_trailing_comma", - "docstring", - "empty_lines", - "expression", - "fmtonoff", - "fmtonoff2", - "fmtonoff3", - "fmtonoff4", - "fmtskip", - "fmtskip2", - "fmtskip3", - "fmtskip4", - "fmtskip5", - "fstring", - "function", - "function2", - "function_trailing_comma", - "import_spacing", - "numeric_literals_py2", - "python2", - "python2_unicode_literals", - "remove_parens", - "slices", - "string_prefixes", - "tricky_unicode_symbols", - "tupleassign", -] - -EXPERIMENTAL_STRING_PROCESSING_CASES = [ - "cantfit", - "comments7", - "long_strings", - "long_strings__edge_case", - "long_strings__regression", - "percent_precedence", -] - - -SOURCES = [ - "tests/test_black.py", - "tests/test_format.py", - "tests/test_blackd.py", - "src/black/__init__.py", - "src/blib2to3/pygram.py", - "src/blib2to3/pytree.py", - "src/blib2to3/pgen2/conv.py", - "src/blib2to3/pgen2/driver.py", - "src/blib2to3/pgen2/grammar.py", - "src/blib2to3/pgen2/literals.py", - "src/blib2to3/pgen2/parse.py", - "src/blib2to3/pgen2/pgen.py", - "src/blib2to3/pgen2/tokenize.py", - "src/blib2to3/pgen2/token.py", - "setup.py", -] - - -class TestSimpleFormat(BlackBaseTestCase): - @parameterized.expand(SIMPLE_CASES) - @patch("black.dump_to_file", dump_to_stderr) - def test_simple_format(self, filename: str) -> None: - self.check_file(filename, DEFAULT_MODE) - - @parameterized.expand(EXPERIMENTAL_STRING_PROCESSING_CASES) - @patch("black.dump_to_file", dump_to_stderr) - def test_experimental_format(self, filename: str) -> None: - self.check_file(filename, black.Mode(experimental_string_processing=True)) - - @parameterized.expand(SOURCES) - @patch("black.dump_to_file", dump_to_stderr) - def test_source_is_formatted(self, filename: str) -> None: - path = THIS_DIR.parent / filename - self.check_file(str(path), DEFAULT_MODE, data=False) - self.assertFalse(ff(path)) - - def check_file(self, filename: str, mode: black.Mode, *, data: bool = True) -> None: - source, expected = read_data(filename, data=data) - actual = fs(source, mode=mode) - self.assertFormatEqual(expected, actual) - black.assert_equivalent(source, actual) - black.assert_stable(source, actual, mode) + +@pytest.fixture(autouse=True) +def patch_dump_to_file(request: Any) -> Iterator[None]: + with patch("black.dump_to_file", dump_to_stderr): + yield + + +def check_file( + subdir: str, filename: str, mode: black.Mode, *, data: bool = True +) -> None: + source, expected = read_data(subdir, filename, data=data) + assert_format(source, expected, mode, fast=False) + + +@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning") +@pytest.mark.parametrize("filename", all_data_cases("simple_cases")) +def test_simple_format(filename: str) -> None: + check_file("simple_cases", filename, DEFAULT_MODE) + + +@pytest.mark.parametrize("filename", all_data_cases("preview")) +def test_preview_format(filename: str) -> None: + magic_trailing_comma = filename != "skip_magic_trailing_comma" + check_file( + "preview", + filename, + black.Mode(preview=True, magic_trailing_comma=magic_trailing_comma), + ) + + +@pytest.mark.parametrize("filename", all_data_cases("preview_39")) +def test_preview_minimum_python_39_format(filename: str) -> None: + source, expected = read_data("preview_39", filename) + mode = black.Mode(preview=True) + assert_format(source, expected, mode, minimum_version=(3, 9)) + + +@pytest.mark.parametrize("filename", all_data_cases("preview_310")) +def test_preview_minimum_python_310_format(filename: str) -> None: + source, expected = read_data("preview_310", filename) + mode = black.Mode(preview=True) + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +# =============== # +# Complex cases +# ============= # + + +def test_empty() -> None: + source = expected = "" + assert_format(source, expected) + + +@pytest.mark.parametrize("filename", all_data_cases("py_36")) +def test_python_36(filename: str) -> None: + source, expected = read_data("py_36", filename) + mode = black.Mode(target_versions=PY36_VERSIONS) + assert_format(source, expected, mode, minimum_version=(3, 6)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_37")) +def test_python_37(filename: str) -> None: + source, expected = read_data("py_37", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY37}) + assert_format(source, expected, mode, minimum_version=(3, 7)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_38")) +def test_python_38(filename: str) -> None: + source, expected = read_data("py_38", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY38}) + assert_format(source, expected, mode, minimum_version=(3, 8)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_39")) +def test_python_39(filename: str) -> None: + source, expected = read_data("py_39", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY39}) + assert_format(source, expected, mode, minimum_version=(3, 9)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_310")) +def test_python_310(filename: str) -> None: + source, expected = read_data("py_310", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY310}) + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +@pytest.mark.parametrize("filename", all_data_cases("py_310")) +def test_python_310_without_target_version(filename: str) -> None: + source, expected = read_data("py_310", filename) + mode = black.Mode() + assert_format(source, expected, mode, minimum_version=(3, 10)) + + +def test_patma_invalid() -> None: + source, expected = read_data("miscellaneous", "pattern_matching_invalid") + mode = black.Mode(target_versions={black.TargetVersion.PY310}) + with pytest.raises(black.parsing.InvalidInput) as exc_info: + assert_format(source, expected, mode, minimum_version=(3, 10)) + + exc_info.match("Cannot parse: 10:11") + + +@pytest.mark.parametrize("filename", all_data_cases("py_311")) +def test_python_311(filename: str) -> None: + source, expected = read_data("py_311", filename) + mode = black.Mode(target_versions={black.TargetVersion.PY311}) + assert_format(source, expected, mode, minimum_version=(3, 11)) + + +@pytest.mark.parametrize("filename", all_data_cases("fast")) +def test_fast_cases(filename: str) -> None: + source, expected = read_data("fast", filename) + assert_format(source, expected, fast=True) + + +def test_python_2_hint() -> None: + with pytest.raises(black.parsing.InvalidInput) as exc_info: + assert_format("print 'daylily'", "print 'daylily'") + exc_info.match(black.parsing.PY2_HINT) + + +@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning") +def test_docstring_no_string_normalization() -> None: + """Like test_docstring but with string normalization off.""" + source, expected = read_data("miscellaneous", "docstring_no_string_normalization") + mode = replace(DEFAULT_MODE, string_normalization=False) + assert_format(source, expected, mode) + + +def test_preview_docstring_no_string_normalization() -> None: + """ + Like test_docstring but with string normalization off *and* the preview style + enabled. + """ + source, expected = read_data( + "miscellaneous", "docstring_preview_no_string_normalization" + ) + mode = replace(DEFAULT_MODE, string_normalization=False, preview=True) + assert_format(source, expected, mode) + + +def test_long_strings_flag_disabled() -> None: + """Tests for turning off the string processing logic.""" + source, expected = read_data("miscellaneous", "long_strings_flag_disabled") + mode = replace(DEFAULT_MODE, experimental_string_processing=False) + assert_format(source, expected, mode) + + +def test_stub() -> None: + mode = replace(DEFAULT_MODE, is_pyi=True) + source, expected = read_data("miscellaneous", "stub.pyi") + assert_format(source, expected, mode) + + +def test_power_op_newline() -> None: + # requires line_length=0 + source, expected = read_data("miscellaneous", "power_op_newline") + assert_format(source, expected, mode=black.Mode(line_length=0)) diff --git a/tests/test_ipynb.py b/tests/test_ipynb.py new file mode 100644 index 0000000..7aa2e91 --- /dev/null +++ b/tests/test_ipynb.py @@ -0,0 +1,524 @@ +import contextlib +import pathlib +import re +from contextlib import ExitStack as does_not_raise +from dataclasses import replace +from typing import ContextManager + +import pytest +from _pytest.monkeypatch import MonkeyPatch +from click.testing import CliRunner + +from black import ( + Mode, + NothingChanged, + format_cell, + format_file_contents, + format_file_in_place, + main, +) +from black.handle_ipynb_magics import jupyter_dependencies_are_installed +from tests.util import DATA_DIR, get_case_path, read_jupyter_notebook + +with contextlib.suppress(ModuleNotFoundError): + import IPython +pytestmark = pytest.mark.jupyter +pytest.importorskip("IPython", reason="IPython is an optional dependency") +pytest.importorskip("tokenize_rt", reason="tokenize-rt is an optional dependency") + +JUPYTER_MODE = Mode(is_ipynb=True) + +EMPTY_CONFIG = DATA_DIR / "empty_pyproject.toml" + +runner = CliRunner() + + +def test_noop() -> None: + src = 'foo = "a"' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize("fast", [True, False]) +def test_trailing_semicolon(fast: bool) -> None: + src = 'foo = "a" ;' + result = format_cell(src, fast=fast, mode=JUPYTER_MODE) + expected = 'foo = "a";' + assert result == expected + + +def test_trailing_semicolon_with_comment() -> None: + src = 'foo = "a" ; # bar' + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = 'foo = "a"; # bar' + assert result == expected + + +def test_trailing_semicolon_with_comment_on_next_line() -> None: + src = "import black;\n\n# this is a comment" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_trailing_semicolon_indented() -> None: + src = "with foo:\n plot_bar();" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_trailing_semicolon_noop() -> None: + src = 'foo = "a";' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize( + "mode", + [ + pytest.param(JUPYTER_MODE, id="default mode"), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + id="custom cell magics mode", + ), + ], +) +def test_cell_magic(mode: Mode) -> None: + src = "%%time\nfoo =bar" + result = format_cell(src, fast=True, mode=mode) + expected = "%%time\nfoo = bar" + assert result == expected + + +def test_cell_magic_noop() -> None: + src = "%%time\n2 + 2" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.parametrize( + "mode", + [ + pytest.param(JUPYTER_MODE, id="default mode"), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + id="custom cell magics mode", + ), + ], +) +@pytest.mark.parametrize( + "src, expected", + ( + pytest.param("ls =!ls", "ls = !ls", id="System assignment"), + pytest.param("!ls\n'foo'", '!ls\n"foo"', id="System call"), + pytest.param("!!ls\n'foo'", '!!ls\n"foo"', id="Other system call"), + pytest.param("?str\n'foo'", '?str\n"foo"', id="Help"), + pytest.param("??str\n'foo'", '??str\n"foo"', id="Other help"), + pytest.param( + "%matplotlib inline\n'foo'", + '%matplotlib inline\n"foo"', + id="Line magic with argument", + ), + pytest.param("%time\n'foo'", '%time\n"foo"', id="Line magic without argument"), + pytest.param( + "env = %env var", "env = %env var", id="Assignment to environment variable" + ), + pytest.param("env = %env", "env = %env", id="Assignment to magic"), + ), +) +def test_magic(src: str, expected: str, mode: Mode) -> None: + result = format_cell(src, fast=True, mode=mode) + assert result == expected + + +@pytest.mark.parametrize( + "src", + ( + "%%bash\n2+2", + "%%html --isolated\n2+2", + "%%writefile e.txt\n meh\n meh", + ), +) +def test_non_python_magics(src: str) -> None: + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +@pytest.mark.skipif( + IPython.version_info < (8, 3), + reason="Change in how TransformerManager transforms this input", +) +def test_set_input() -> None: + src = "a = b??" + expected = "??b" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + assert result == expected + + +def test_input_already_contains_transformed_magic() -> None: + src = '%time foo()\nget_ipython().run_cell_magic("time", "", "foo()\\n")' + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_magic_noop() -> None: + src = "ls = !ls" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_cell_magic_with_magic() -> None: + src = "%%timeit -n1\nls =!ls" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "%%timeit -n1\nls = !ls" + assert result == expected + + +@pytest.mark.parametrize( + "mode, expected_output, expectation", + [ + pytest.param( + JUPYTER_MODE, + "%%custom_python_magic -n1 -n2\nx=2", + pytest.raises(NothingChanged), + id="No change when cell magic not registered", + ), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"cust1", "cust1"}), + "%%custom_python_magic -n1 -n2\nx=2", + pytest.raises(NothingChanged), + id="No change when other cell magics registered", + ), + pytest.param( + replace(JUPYTER_MODE, python_cell_magics={"custom_python_magic", "cust1"}), + "%%custom_python_magic -n1 -n2\nx = 2", + does_not_raise(), + id="Correctly change when cell magic registered", + ), + ], +) +def test_cell_magic_with_custom_python_magic( + mode: Mode, expected_output: str, expectation: ContextManager[object] +) -> None: + with expectation: + result = format_cell( + "%%custom_python_magic -n1 -n2\nx=2", + fast=True, + mode=mode, + ) + assert result == expected_output + + +def test_cell_magic_nested() -> None: + src = "%%time\n%%time\n2+2" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "%%time\n%%time\n2 + 2" + assert result == expected + + +def test_cell_magic_with_magic_noop() -> None: + src = "%%t -n1\nls = !ls" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_automagic() -> None: + src = "pip install black" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_multiline_magic() -> None: + src = "%time 1 + \\\n2" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_multiline_no_magic() -> None: + src = "1 + \\\n2" + result = format_cell(src, fast=True, mode=JUPYTER_MODE) + expected = "1 + 2" + assert result == expected + + +def test_cell_magic_with_invalid_body() -> None: + src = "%%time\nif True" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_empty_cell() -> None: + src = "" + with pytest.raises(NothingChanged): + format_cell(src, fast=True, mode=JUPYTER_MODE) + + +def test_entire_notebook_empty_metadata() -> None: + content = read_jupyter_notebook("jupyter", "notebook_empty_metadata") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {},\n' + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}\n" + ) + assert result == expected + + +def test_entire_notebook_trailing_newline() -> None: + content = read_jupyter_notebook("jupyter", "notebook_trailing_newline") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {\n' + ' "interpreter": {\n' + ' "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8"\n' # noqa:B950 + " },\n" + ' "kernelspec": {\n' + ' "display_name": "Python 3.8.10 64-bit (\'black\': venv)",\n' + ' "name": "python3"\n' + " },\n" + ' "language_info": {\n' + ' "name": "python",\n' + ' "version": ""\n' + " }\n" + " },\n" + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}\n" + ) + assert result == expected + + +def test_entire_notebook_no_trailing_newline() -> None: + content = read_jupyter_notebook("jupyter", "notebook_no_trailing_newline") + result = format_file_contents(content, fast=True, mode=JUPYTER_MODE) + expected = ( + "{\n" + ' "cells": [\n' + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {\n' + ' "tags": []\n' + " },\n" + ' "outputs": [],\n' + ' "source": [\n' + ' "%%time\\n",\n' + ' "\\n",\n' + ' "print(\\"foo\\")"\n' + " ]\n" + " },\n" + " {\n" + ' "cell_type": "code",\n' + ' "execution_count": null,\n' + ' "metadata": {},\n' + ' "outputs": [],\n' + ' "source": []\n' + " }\n" + " ],\n" + ' "metadata": {\n' + ' "interpreter": {\n' + ' "hash": "e758f3098b5b55f4d87fe30bbdc1367f20f246b483f96267ee70e6c40cb185d8"\n' # noqa: B950 + " },\n" + ' "kernelspec": {\n' + ' "display_name": "Python 3.8.10 64-bit (\'black\': venv)",\n' + ' "name": "python3"\n' + " },\n" + ' "language_info": {\n' + ' "name": "python",\n' + ' "version": ""\n' + " }\n" + " },\n" + ' "nbformat": 4,\n' + ' "nbformat_minor": 4\n' + "}" + ) + assert result == expected + + +def test_entire_notebook_without_changes() -> None: + content = read_jupyter_notebook("jupyter", "notebook_without_changes") + with pytest.raises(NothingChanged): + format_file_contents(content, fast=True, mode=JUPYTER_MODE) + + +def test_non_python_notebook() -> None: + content = read_jupyter_notebook("jupyter", "non_python_notebook") + + with pytest.raises(NothingChanged): + format_file_contents(content, fast=True, mode=JUPYTER_MODE) + + +def test_empty_string() -> None: + with pytest.raises(NothingChanged): + format_file_contents("", fast=True, mode=JUPYTER_MODE) + + +def test_unparseable_notebook() -> None: + path = get_case_path("jupyter", "notebook_which_cant_be_parsed.ipynb") + msg = rf"File '{re.escape(str(path))}' cannot be parsed as valid Jupyter notebook\." + with pytest.raises(ValueError, match=msg): + format_file_in_place(path, fast=True, mode=JUPYTER_MODE) + + +def test_ipynb_diff_with_change() -> None: + result = runner.invoke( + main, + [ + str(get_case_path("jupyter", "notebook_trailing_newline.ipynb")), + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n+print(\"foo\")\n" + assert expected in result.output + + +def test_ipynb_diff_with_no_change() -> None: + result = runner.invoke( + main, + [ + str(get_case_path("jupyter", "notebook_without_changes.ipynb")), + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "1 file would be left unchanged." + assert expected in result.output + + +def test_cache_isnt_written_if_no_jupyter_deps_single( + monkeypatch: MonkeyPatch, tmp_path: pathlib.Path +) -> None: + # Check that the cache isn't written to if Jupyter dependencies aren't installed. + jupyter_dependencies_are_installed.cache_clear() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + monkeypatch.setattr( + "black.jupyter_dependencies_are_installed", lambda verbose, quiet: False + ) + result = runner.invoke( + main, [str(tmp_path / "notebook.ipynb"), f"--config={EMPTY_CONFIG}"] + ) + assert "No Python files are present to be formatted. Nothing to do" in result.output + jupyter_dependencies_are_installed.cache_clear() + monkeypatch.setattr( + "black.jupyter_dependencies_are_installed", lambda verbose, quiet: True + ) + result = runner.invoke( + main, [str(tmp_path / "notebook.ipynb"), f"--config={EMPTY_CONFIG}"] + ) + assert "reformatted" in result.output + + +def test_cache_isnt_written_if_no_jupyter_deps_dir( + monkeypatch: MonkeyPatch, tmp_path: pathlib.Path +) -> None: + # Check that the cache isn't written to if Jupyter dependencies aren't installed. + jupyter_dependencies_are_installed.cache_clear() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + monkeypatch.setattr( + "black.files.jupyter_dependencies_are_installed", lambda verbose, quiet: False + ) + result = runner.invoke(main, [str(tmp_path), f"--config={EMPTY_CONFIG}"]) + assert "No Python files are present to be formatted. Nothing to do" in result.output + jupyter_dependencies_are_installed.cache_clear() + monkeypatch.setattr( + "black.files.jupyter_dependencies_are_installed", lambda verbose, quiet: True + ) + result = runner.invoke(main, [str(tmp_path), f"--config={EMPTY_CONFIG}"]) + assert "reformatted" in result.output + + +def test_ipynb_flag(tmp_path: pathlib.Path) -> None: + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.a_file_extension_which_is_definitely_not_ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + result = runner.invoke( + main, + [ + str(tmp_nb), + "--diff", + "--ipynb", + f"--config={EMPTY_CONFIG}", + ], + ) + expected = "@@ -1,3 +1,3 @@\n %%time\n \n-print('foo')\n+print(\"foo\")\n" + assert expected in result.output + + +def test_ipynb_and_pyi_flags() -> None: + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + result = runner.invoke( + main, + [ + str(nb), + "--pyi", + "--ipynb", + "--diff", + f"--config={EMPTY_CONFIG}", + ], + ) + assert isinstance(result.exception, SystemExit) + expected = "Cannot pass both `pyi` and `ipynb` flags!\n" + assert result.output == expected + + +def test_unable_to_replace_magics(monkeypatch: MonkeyPatch) -> None: + src = "%%time\na = 'foo'" + monkeypatch.setattr("black.handle_ipynb_magics.TOKEN_HEX", lambda _: "foo") + with pytest.raises( + AssertionError, match="Black was not able to replace IPython magic" + ): + format_cell(src, fast=True, mode=JUPYTER_MODE) diff --git a/tests/test_no_ipynb.py b/tests/test_no_ipynb.py new file mode 100644 index 0000000..3e0b159 --- /dev/null +++ b/tests/test_no_ipynb.py @@ -0,0 +1,37 @@ +import pathlib + +import pytest +from click.testing import CliRunner + +from black import jupyter_dependencies_are_installed, main +from tests.util import get_case_path + +pytestmark = pytest.mark.no_jupyter + +runner = CliRunner() + + +def test_ipynb_diff_with_no_change_single() -> None: + jupyter_dependencies_are_installed.cache_clear() + path = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + result = runner.invoke(main, [str(path)]) + expected_output = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``\n" + ) + assert expected_output in result.output + + +def test_ipynb_diff_with_no_change_dir(tmp_path: pathlib.Path) -> None: + jupyter_dependencies_are_installed.cache_clear() + runner = CliRunner() + nb = get_case_path("jupyter", "notebook_trailing_newline.ipynb") + tmp_nb = tmp_path / "notebook.ipynb" + with open(nb) as src, open(tmp_nb, "w") as dst: + dst.write(src.read()) + result = runner.invoke(main, [str(tmp_path)]) + expected_output = ( + "Skipping .ipynb files as Jupyter dependencies are not installed.\n" + "You can fix this by running ``pip install black[jupyter]``\n" + ) + assert expected_output in result.output diff --git a/tests/test_primer.py b/tests/test_primer.py deleted file mode 100644 index a8ad8a7..0000000 --- a/tests/test_primer.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 - -import asyncio -import sys -import unittest -from contextlib import contextmanager -from copy import deepcopy -from io import StringIO -from os import getpid -from pathlib import Path -from platform import system -from subprocess import CalledProcessError -from tempfile import TemporaryDirectory, gettempdir -from typing import Any, Callable, Generator, Iterator, Tuple -from unittest.mock import Mock, patch - -from click.testing import CliRunner - -from black_primer import cli, lib - - -EXPECTED_ANALYSIS_OUTPUT = """\ --- primer results 📊 -- - -68 / 69 succeeded (98.55%) ✅ -1 / 69 FAILED (1.45%) 💩 - - 0 projects disabled by config - - 0 projects skipped due to Python version - - 0 skipped due to long checkout - -Failed projects: - -## black: - - Returned 69 - - stdout: -Black didn't work - -""" -FAKE_PROJECT_CONFIG = { - "cli_arguments": ["--unittest"], - "expect_formatting_changes": False, - "git_clone_url": "https://github.com/psf/black.git", -} - - -@contextmanager -def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator: - old_stdout, sys.stdout = sys.stdout, StringIO() - try: - command(*args, **kwargs) - sys.stdout.seek(0) - yield sys.stdout.read() - finally: - sys.stdout = old_stdout - - -@contextmanager -def event_loop() -> Iterator[None]: - policy = asyncio.get_event_loop_policy() - loop = policy.new_event_loop() - asyncio.set_event_loop(loop) - if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - try: - yield - finally: - loop.close() - - -async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(1, ["unittest", "error"], b"", b"") - - -async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(123, ["unittest", "error"], b"", b"") - - -async def return_false(*args: Any, **kwargs: Any) -> bool: - return False - - -async def return_subproccess_output(*args: Any, **kwargs: Any) -> Tuple[bytes, bytes]: - return (b"stdout", b"stderr") - - -async def return_zero(*args: Any, **kwargs: Any) -> int: - return 0 - - -class PrimerLibTests(unittest.TestCase): - def test_analyze_results(self) -> None: - fake_results = lib.Results( - { - "disabled": 0, - "failed": 1, - "skipped_long_checkout": 0, - "success": 68, - "wrong_py_ver": 0, - }, - {"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")}, - ) - with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout: - self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout) - - @event_loop() - def test_black_run(self) -> None: - """Pretend to run Black to ensure we cater for all scenarios""" - loop = asyncio.get_event_loop() - repo_path = Path(gettempdir()) - project_config = deepcopy(FAKE_PROJECT_CONFIG) - results = lib.Results({"failed": 0, "success": 0}, {}) - - # Test a successful Black run - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["success"]) - self.assertFalse(results.failed_projects) - - # Test a fail based on expecting formatting changes but not getting any - project_config["expect_formatting_changes"] = True - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a fail based on returning 1 and not expecting formatting changes - project_config["expect_formatting_changes"] = False - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a formatting error based on returning 123 - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123): - loop.run_until_complete(lib.black_run(repo_path, project_config, results)) - self.assertEqual(2, results.stats["failed"]) - - @event_loop() - def test_gen_check_output(self) -> None: - loop = asyncio.get_event_loop() - stdout, stderr = loop.run_until_complete( - lib._gen_check_output([lib.BLACK_BINARY, "--help"]) - ) - self.assertTrue("The uncompromising code formatter" in stdout.decode("utf8")) - self.assertEqual(None, stderr) - - # TODO: Add a test to see failure works on Windows - if lib.WINDOWS: - return - - false_bin = "/usr/bin/false" if system() == "Darwin" else "/bin/false" - with self.assertRaises(CalledProcessError): - loop.run_until_complete(lib._gen_check_output([false_bin])) - - with self.assertRaises(asyncio.TimeoutError): - loop.run_until_complete( - lib._gen_check_output(["/bin/sleep", "2"], timeout=0.1) - ) - - @event_loop() - def test_git_checkout_or_rebase(self) -> None: - loop = asyncio.get_event_loop() - project_config = deepcopy(FAKE_PROJECT_CONFIG) - work_path = Path(gettempdir()) - - expected_repo_path = work_path / "black" - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - returned_repo_path = loop.run_until_complete( - lib.git_checkout_or_rebase(work_path, project_config) - ) - self.assertEqual(expected_repo_path, returned_repo_path) - - @patch("sys.stdout", new_callable=StringIO) - @event_loop() - def test_process_queue(self, mock_stdout: Mock) -> None: - loop = asyncio.get_event_loop() - config_path = Path(lib.__file__).parent / "primer.json" - with patch("black_primer.lib.git_checkout_or_rebase", return_false): - with TemporaryDirectory() as td: - return_val = loop.run_until_complete( - lib.process_queue(str(config_path), td, 2) - ) - self.assertEqual(0, return_val) - - -class PrimerCLITests(unittest.TestCase): - @event_loop() - def test_async_main(self) -> None: - loop = asyncio.get_event_loop() - work_dir = Path(gettempdir()) / f"primer_ut_{getpid()}" - args = { - "config": "/config", - "debug": False, - "keep": False, - "long_checkouts": False, - "rebase": False, - "workdir": str(work_dir), - "workers": 69, - } - with patch("black_primer.cli.lib.process_queue", return_zero): - return_val = loop.run_until_complete(cli.async_main(**args)) - self.assertEqual(0, return_val) - - def test_handle_debug(self) -> None: - self.assertTrue(cli._handle_debug(None, None, True)) - - def test_help_output(self) -> None: - runner = CliRunner() - result = runner.invoke(cli.main, ["--help"]) - self.assertEqual(result.exit_code, 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_trans.py b/tests/test_trans.py new file mode 100644 index 0000000..dce8a93 --- /dev/null +++ b/tests/test_trans.py @@ -0,0 +1,51 @@ +from typing import List, Tuple + +from black.trans import iter_fexpr_spans + + +def test_fexpr_spans() -> None: + def check( + string: str, expected_spans: List[Tuple[int, int]], expected_slices: List[str] + ) -> None: + spans = list(iter_fexpr_spans(string)) + + # Checking slices isn't strictly necessary, but it's easier to verify at + # a glance than only spans + assert len(spans) == len(expected_slices) + for (i, j), slice in zip(spans, expected_slices): + assert len(string[i:j]) == j - i + assert string[i:j] == slice + + assert spans == expected_spans + + # Most of these test cases omit the leading 'f' and leading / closing quotes + # for convenience + # Some additional property-based tests can be found in + # https://github.com/psf/black/pull/2654#issuecomment-981411748 + check("""{var}""", [(0, 5)], ["{var}"]) + check("""f'{var}'""", [(2, 7)], ["{var}"]) + check("""f'{1 + f() + 2 + "asdf"}'""", [(2, 24)], ["""{1 + f() + 2 + "asdf"}"""]) + check("""text {var} text""", [(5, 10)], ["{var}"]) + check("""text {{ {var} }} text""", [(8, 13)], ["{var}"]) + check("""{a} {b} {c}""", [(0, 3), (4, 7), (8, 11)], ["{a}", "{b}", "{c}"]) + check("""f'{a} {b} {c}'""", [(2, 5), (6, 9), (10, 13)], ["{a}", "{b}", "{c}"]) + check("""{ {} }""", [(0, 6)], ["{ {} }"]) + check("""{ {{}} }""", [(0, 8)], ["{ {{}} }"]) + check("""{ {{{}}} }""", [(0, 10)], ["{ {{{}}} }"]) + check("""{{ {{{}}} }}""", [(5, 7)], ["{}"]) + check("""{{ {{{var}}} }}""", [(5, 10)], ["{var}"]) + check("""{f"{0}"}""", [(0, 8)], ["""{f"{0}"}"""]) + check("""{"'"}""", [(0, 5)], ["""{"'"}"""]) + check("""{"{"}""", [(0, 5)], ["""{"{"}"""]) + check("""{"}"}""", [(0, 5)], ["""{"}"}"""]) + check("""{"{{"}""", [(0, 6)], ["""{"{{"}"""]) + check("""{''' '''}""", [(0, 9)], ["""{''' '''}"""]) + check("""{'''{'''}""", [(0, 9)], ["""{'''{'''}"""]) + check("""{''' {'{ '''}""", [(0, 13)], ["""{''' {'{ '''}"""]) + check( + '''f\'\'\'-{f"""*{f"+{f'.{x}.'}+"}*"""}-'y\\'\'\'\'''', + [(5, 33)], + ['''{f"""*{f"+{f'.{x}.'}+"}*"""}'''], + ) + check(r"""{}{""", [(0, 2)], ["{}"]) + check("""f"{'{'''''''''}\"""", [(2, 15)], ["{'{'''''''''}"]) diff --git a/tests/util.py b/tests/util.py index ad98669..d65c2e6 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,67 +1,122 @@ import os +import sys import unittest from contextlib import contextmanager +from functools import partial from pathlib import Path -from typing import List, Tuple, Iterator, Any +from typing import Any, Iterator, List, Optional, Tuple + import black -from functools import partial +from black.debug import DebugVisitor +from black.mode import TargetVersion +from black.output import diff, err, out + +PYTHON_SUFFIX = ".py" +ALLOWED_SUFFIXES = (PYTHON_SUFFIX, ".pyi", ".out", ".diff", ".ipynb") THIS_DIR = Path(__file__).parent +DATA_DIR = THIS_DIR / "data" PROJECT_ROOT = THIS_DIR.parent EMPTY_LINE = "# EMPTY LINE WITH WHITESPACE" + " (this comment will be removed)" DETERMINISTIC_HEADER = "[Deterministic header]" +PY36_VERSIONS = { + TargetVersion.PY36, + TargetVersion.PY37, + TargetVersion.PY38, + TargetVersion.PY39, +} DEFAULT_MODE = black.Mode() ff = partial(black.format_file_in_place, mode=DEFAULT_MODE, fast=True) fs = partial(black.format_str, mode=DEFAULT_MODE) +def _assert_format_equal(expected: str, actual: str) -> None: + if actual != expected and not os.environ.get("SKIP_AST_PRINT"): + bdv: DebugVisitor[Any] + out("Expected tree:", fg="green") + try: + exp_node = black.lib2to3_parse(expected) + bdv = DebugVisitor() + list(bdv.visit(exp_node)) + except Exception as ve: + err(str(ve)) + out("Actual tree:", fg="red") + try: + exp_node = black.lib2to3_parse(actual) + bdv = DebugVisitor() + list(bdv.visit(exp_node)) + except Exception as ve: + err(str(ve)) + + if actual != expected: + out(diff(expected, actual, "expected", "actual")) + + assert actual == expected + + +def assert_format( + source: str, + expected: str, + mode: black.Mode = DEFAULT_MODE, + *, + fast: bool = False, + minimum_version: Optional[Tuple[int, int]] = None, +) -> None: + """Convenience function to check that Black formats as expected. + + You can pass @minimum_version if you're passing code with newer syntax to guard + safety guards so they don't just crash with a SyntaxError. Please note this is + separate from TargetVerson Mode configuration. + """ + actual = black.format_str(source, mode=mode) + _assert_format_equal(expected, actual) + # It's not useful to run safety checks if we're expecting no changes anyway. The + # assertion right above will raise if reality does actually make changes. This just + # avoids wasted CPU cycles. + if not fast and source != expected: + # Unfortunately the AST equivalence check relies on the built-in ast module + # being able to parse the code being formatted. This doesn't always work out + # when checking modern code on older versions. + if minimum_version is None or sys.version_info >= minimum_version: + black.assert_equivalent(source, actual) + black.assert_stable(source, actual, mode=mode) + + def dump_to_stderr(*output: str) -> str: return "\n" + "\n".join(output) + "\n" class BlackBaseTestCase(unittest.TestCase): - maxDiff = None - _diffThreshold = 2 ** 20 - def assertFormatEqual(self, expected: str, actual: str) -> None: - if actual != expected and not os.environ.get("SKIP_AST_PRINT"): - bdv: black.DebugVisitor[Any] - black.out("Expected tree:", fg="green") - try: - exp_node = black.lib2to3_parse(expected) - bdv = black.DebugVisitor() - list(bdv.visit(exp_node)) - except Exception as ve: - black.err(str(ve)) - black.out("Actual tree:", fg="red") - try: - exp_node = black.lib2to3_parse(actual) - bdv = black.DebugVisitor() - list(bdv.visit(exp_node)) - except Exception as ve: - black.err(str(ve)) - self.assertMultiLineEqual(expected, actual) + _assert_format_equal(expected, actual) -@contextmanager -def skip_if_exception(e: str) -> Iterator[None]: - try: - yield - except Exception as exc: - if exc.__class__.__name__ == e: - unittest.skip(f"Encountered expected exception {exc}, skipping") - else: - raise +def get_base_dir(data: bool) -> Path: + return DATA_DIR if data else PROJECT_ROOT + + +def all_data_cases(subdir_name: str, data: bool = True) -> List[str]: + cases_dir = get_base_dir(data) / subdir_name + assert cases_dir.is_dir() + return [case_path.stem for case_path in cases_dir.iterdir()] + +def get_case_path( + subdir_name: str, name: str, data: bool = True, suffix: str = PYTHON_SUFFIX +) -> Path: + """Get case path from name""" + case_path = get_base_dir(data) / subdir_name / name + if not name.endswith(ALLOWED_SUFFIXES): + case_path = case_path.with_suffix(suffix) + assert case_path.is_file(), f"{case_path} is not a file." + return case_path -def read_data(name: str, data: bool = True) -> Tuple[str, str]: + +def read_data(subdir_name: str, name: str, data: bool = True) -> Tuple[str, str]: """read_data('test_name') -> 'input', 'output'""" - if not name.endswith((".py", ".pyi", ".out", ".diff")): - name += ".py" - base_dir = THIS_DIR / "data" if data else PROJECT_ROOT - return read_data_from_file(base_dir / name) + return read_data_from_file(get_case_path(subdir_name, name, data)) def read_data_from_file(file_name: Path) -> Tuple[str, str]: @@ -81,3 +136,26 @@ def read_data_from_file(file_name: Path) -> Tuple[str, str]: # If there's no output marker, treat the entire file as already pre-formatted. _output = _input[:] return "".join(_input).strip() + "\n", "".join(_output).strip() + "\n" + + +def read_jupyter_notebook(subdir_name: str, name: str, data: bool = True) -> str: + return read_jupyter_notebook_from_file( + get_case_path(subdir_name, name, data, suffix=".ipynb") + ) + + +def read_jupyter_notebook_from_file(file_name: Path) -> str: + with open(file_name, mode="rb") as fd: + content_bytes = fd.read() + return content_bytes.decode() + + +@contextmanager +def change_directory(path: Path) -> Iterator[None]: + """Context manager to temporarily chdir to a different directory.""" + previous_dir = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(previous_dir) diff --git a/tox.ini b/tox.ini index 71b02e9..4934514 100644 --- a/tox.ini +++ b/tox.ini @@ -1,15 +1,83 @@ [tox] -envlist = py{36,37,38,39},fuzz +isolated_build = true +envlist = {,ci-}py{37,38,39,310,311,py3},fuzz,run_self [testenv] setenv = PYTHONPATH = {toxinidir}/src skip_install = True +# We use `recreate=True` because otherwise, on the second run of `tox -e py`, +# the `no_jupyter` tests would run with the jupyter extra dependencies installed. +# See https://github.com/psf/black/issues/2367. +recreate = True deps = -r{toxinidir}/test_requirements.txt +; parallelization is disabled on CI because pytest-dev/pytest-xdist#620 occurs too frequently +; local runs can stay parallelized since they aren't rolling the dice so many times as like on CI commands = - pip install -e .[d,python2] + pip install -e .[d] + coverage erase + pytest tests --run-optional no_jupyter \ + !ci: --numprocesses auto \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + --cov --cov-append {posargs} + coverage report + +[testenv:{,ci-}pypy3] +setenv = PYTHONPATH = {toxinidir}/src +skip_install = True +recreate = True +deps = + -r{toxinidir}/test_requirements.txt +; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317 +; this seems to cause tox to wait forever +; remove this when pypy releases the bugfix +commands = + pip install -e .[d] coverage erase - coverage run -m pytest tests + pytest tests \ + --run-optional no_jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov --cov-append {posargs} + coverage report + +[testenv:{,ci-}311] +setenv = + PYTHONPATH = {toxinidir}/src + AIOHTTP_NO_EXTENSIONS = 1 +skip_install = True +recreate = True +deps = +; We currently need > aiohttp 3.8.1 that is on PyPI for 3.11 + git+https://github.com/aio-libs/aiohttp + -r{toxinidir}/test_requirements.txt +; a separate worker is required in ci due to https://foss.heptapod.net/pypy/pypy/-/issues/3317 +; this seems to cause tox to wait forever +; remove this when pypy releases the bugfix +commands = + pip install -e .[d] + coverage erase + pytest tests \ + --run-optional no_jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov {posargs} + pip install -e .[jupyter] + pytest tests --run-optional jupyter \ + -m jupyter \ + !ci: --numprocesses auto \ + ci: --numprocesses 1 \ + --cov --cov-append {posargs} coverage report [testenv:fuzz] @@ -17,10 +85,16 @@ skip_install = True deps = -r{toxinidir}/test_requirements.txt hypothesmith - lark-parser < 0.10.0 -; lark-parser's version is set due to a bug in hypothesis. Once it solved, that would be fixed. + lark-parser commands = pip install -e .[d] coverage erase - coverage run fuzz.py + coverage run {toxinidir}/scripts/fuzz.py coverage report + +[testenv:run_self] +setenv = PYTHONPATH = {toxinidir}/src +skip_install = True +commands = + pip install -e .[d] + black --check {toxinidir}/src {toxinidir}/tests