From c595eebba35f3fd9a937c45c5ce38b34dcf664a1 Mon Sep 17 00:00:00 2001 From: artem Date: Thu, 3 Mar 2022 16:14:36 +0300 Subject: [PATCH] Iteration 'stable beta 1 - new docs' (#13) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: :heavy_plus_sign: Add 'furo' (2022.2.14) RTD theme and remove 'sphinx-rtd-theme' Also updated: • markupsafe (2.0.1 -> 2.1.0) • docutils (0.16 -> 0.17.1) • filelock (3.5.1 -> 3.6.0) * build: :construction_worker: Integrate Furo them into Nox sessions and update docs build config * chore: :arrow_up: Upgrade python to ^3.8.0 Were installed: • zipp (3.7.0) • importlib-metadata (4.11.1) Also were updated: • sphinx (4.3.2 -> 4.4.0) * docs: :memo: Update boilerplate README * docs: :memo: Add manpage (renamed usage) and update generated help output (only for download command) * docs: :memo: Add 'Usage Examples' page * docs: :memo: Add Motivation page Add two meta tags on index page * build: :memo: Update reference.rst bu adding auto generated description for 4 main modules * docs: :memo: Update Readme using anonymous reference ('double underscores') for licenses links * docs: :memo: Add detailed docstrings for 'lep.py' * test(console): :white_check_mark: Update two test to fix concurrency issues during paralell tests execution * docs(downloader): :memo: Add docstrings for 'downloader.py' * docs(parser): :memo: Add docstrings for 'parser.py' * docs: :memo: Add docstrings for 'exceptions.py' * docs: :memo: Fix two little mistakes in lep's docstrings * style: :rotating_light: Fix linter errors by 'pre-commit' session * chore: :wrench: Update .flake8 config to set max-line-length = 80 * docs(console): :memo: Update metavar values for path options and '--episode' option * docs: :memo: Update links to Example Usage and Man Page, Add link to RTD website only for GitHub * chore: :wrench: Update config constants with production URLs * fix: :bug: Correct 'short_date' property for LepEpisode (must be original date, not after conversion to UTC) * docs: :memo: Add LEP YouTube link on Readme and update required time and storage for download all episodes * fix: :bug: Fix wrong short date again (episode datetime is not converted to UTC now) * test: :white_check_mark: Add tests to check date filter for date close to midnight and updated test with assigning date as datetime * chore: Bump to 3.0.0b1 * test(console): :white_check_mark: Update version checking (just "3.") --- .flake8 | 2 +- README.rst | 156 ++++++++++-- docs/_static/logo.png | Bin 0 -> 23023 bytes docs/conf.py | 5 +- docs/index.rst | 14 +- docs/manpage.rst | 19 ++ docs/motivation.rst | 16 ++ docs/reference.rst | 43 +++- docs/requirements.txt | 5 +- docs/usage.rst | 206 +++++++++++++++- noxfile.py | 4 +- poetry.lock | 216 +++++++---------- pyproject.toml | 8 +- src/lep_downloader/cli_shared.py | 10 +- src/lep_downloader/commands/parse.py | 6 +- src/lep_downloader/config.py | 6 +- src/lep_downloader/downloader.py | 290 ++++++++++++++++++---- src/lep_downloader/exceptions.py | 59 +++-- src/lep_downloader/lep.py | 345 +++++++++++++++++++++------ src/lep_downloader/parser.py | 312 ++++++++++++++++++++---- tests/test_cli.py | 2 +- tests/test_cli_download.py | 32 +++ tests/test_parser.py | 2 +- 23 files changed, 1369 insertions(+), 389 deletions(-) create mode 100644 docs/_static/logo.png create mode 100644 docs/manpage.rst create mode 100644 docs/motivation.rst diff --git a/.flake8 b/.flake8 index 79413d0..c6f1705 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] select = B,B9,C,D,DAR,E,F,N,RST,S,W ignore = E203,E501,RST201,RST203,RST301,W503,B950 -max-line-length = 120 +max-line-length = 80 max-complexity = 10 docstring-convention = google per-file-ignores = tests/*:S101 diff --git a/README.rst b/README.rst index ab08333..531322d 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,9 @@ LEP Downloader ============== -|PyPI| |Python Version| |License| +.. badges-begin + +|PyPI| |Status| |Python Version| |License| |Read the Docs| |Tests| |Codecov| @@ -10,6 +12,9 @@ LEP Downloader .. |PyPI| image:: https://img.shields.io/pypi/v/lep-downloader.svg :target: https://pypi.org/project/lep-downloader/ :alt: PyPI +.. |Status| image:: https://img.shields.io/pypi/status/lep-downloader.svg + :target: https://pypi.org/project/lep-downloader/ + :alt: Status .. |Python Version| image:: https://img.shields.io/pypi/pyversions/lep-downloader :target: https://pypi.org/project/lep-downloader :alt: Python Version @@ -32,68 +37,171 @@ LEP Downloader :target: https://github.com/psf/black :alt: Black +========= -Features --------- +.. badges-end -* TODO +.. raw:: html +

logo

+

+ 📚 + + Read the full documentation + + 📚 +

-Requirements ------------- -* TODO +.. after-image + +About +------ +LEP Downloader is a script for downloading the all FREE episodes of `Luke's ENGLISH Podcast`_. -Installation ------------- +It lets you to get all audio files (including audio tracks to video episodes) +and also PDF files for each episode page. + +Even though this script was written for convenient episode downloading, +I don't want to financially harm Luke in any way. +I just want to make my life a bit easier (as usual for lazy IT person =). +So consider `donating`_ to Luke's English Podcast and `becoming`_ his premium subscriber. +And of course, subscribe on his `YouTube channel`_. + + +🚀 Features +------------- + +* Download a range of episodes filtering by episode number or by episode date +* Download only the last episode +* Download PDF files of episodes web pages +* Saving files to specified folder on your hard / solid / flash drive +* Running script in quiet mode for automated routine +* Writing log file in debug mode + + +🛠️ Requirements +---------------- + +* Python 3.8+ +* Internet connection + + +💻 Installation +---------------- You can install *LEP Downloader* via pip_ from PyPI_: -.. code:: console +.. code:: none - $ pip install lep-downloader + pip install lep-downloader +I do recommend you to use pipx_ for any CLI Python package. +It let you install and run Python applications in isolated environments. -Usage ------ +.. code:: none -Please see the `Command-line Reference `_ for details. + python -m pip install --user pipx + pipx install lep-downloader + lep-downloader --help -Contributing ------------- +🕹 Usage +-------- + +.. code:: none + + lep-downloader -ep 758 + +You can also use the short script name: + +.. code:: none + + lep-dl --last + +Please see the `Usage Examples `_ for details. + +Or skim the `Man Page `_ for available options +(if terminal is your best friend). + + +✨ What's new in version 3 +--------------------------- + +The third version was completely re-written by me (again). +But this time with more fundamental and mature approach. +I applied some OOP (object-oriented programming) principles +and covered almost all functions with absolutely isolated unit tests. + +Code base became more extendable and maintainable *(I believe)*. +I dropped support for file naming from old script versions. +Also I removed (for awhile) video and add-ons download +*(I plan to add them again in the future, however - no any promises)*. + +Archive parsing was improved (without skipping several episodes). +Also I added built-in possibility to download files from reserve server, +if primary link is not available (for any reason). + +And many internal little things. +You can read descriptions of pre-releases on `Releases`_ page (if you wish). + + +✊ Contributing +--------------- Contributions are very welcome. To learn more, see the `Contributor Guide`_. -License -------- +📝 License +----------- -Distributed under the terms of the `MIT license`_, +Distributed under the terms of the `MIT license `_, *LEP Downloader* is free and open source software. +It means you can modify it, redistribute it or use it however you like +as long as you do mention the author of the original script. -Issues ------- +🐞 Issues +---------- If you encounter any problems, please `file an issue`_ along with a detailed description. -Credits -------- +🙏🏻 Credits +------------ This project was generated from `@cjolowicz`_'s `Hypermodern Python Cookiecutter`_ template. +Script uses the following packages / libraries under the hood: + +* `click `_ (`BSD-3-Clause License `__) +* `requests `_ (`Apache-2.0 License `__) +* `beautifulsoup4 `_ (`MIT License `__) +* `lxml `_ (`BSD-3-Clause License `__) +* `loguru `_ (`MIT License `__) +* `single-source `_ (`MIT License `__) + +and other amazing Python packages for development and testing. +See a full list of them in 'dependencies' section of ``pyproject.toml`` +`file `_. + +.. _Luke's ENGLISH Podcast: https://teacherluke.co.uk/archive-of-episodes-1-149/ +.. _donating: https://www.paypal.com/donate/?cmd=_s-xclick&hosted_button_id=CA2KNZNBFGKC6 +.. _becoming: https://teacherluke.co.uk/premium/premiuminfo/ +.. _YouTube channel: https://www.youtube.com/c/LukesEnglishPodcast .. _@cjolowicz: https://github.com/cjolowicz .. _Cookiecutter: https://github.com/audreyr/cookiecutter -.. _MIT license: https://opensource.org/licenses/MIT .. _PyPI: https://pypi.org/ .. _Hypermodern Python Cookiecutter: https://github.com/cjolowicz/cookiecutter-hypermodern-python .. _file an issue: https://github.com/hotenov/lep-downloader/issues .. _pip: https://pip.pypa.io/ +.. _pipx: https://pipxproject.github.io/pipx/ +.. _Releases: https://github.com/hotenov/LEP-downloader/releases + .. github-only .. _Contributor Guide: CONTRIBUTING.rst .. _Usage: https://lep-downloader.readthedocs.io/en/latest/usage.html +.. _Manpage: https://lep-downloader.readthedocs.io/en/latest/manpage.html diff --git a/docs/_static/logo.png b/docs/_static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..3dad379b005fd53497a94ff01a4717ae439ad753 GIT binary patch literal 23023 zcmV(uKTQ=h1;jV5)qGUf~T3Jg)MXZv6k4ZPM zPcmFOD}X#Hmw<6F84j+Mg`9+Sk4!tCQaz1{iJVI_V?ZW1e(;oeZHGE0%NZVvab=oB zE}K9qw}MxVcxskfId?cFV?r;;4iotG^sH4pm{dZjX+MfbHMm|sNIN&Pk%5&~F={s_ z#s>^HDJ8U;inyJPk6=7#Niuh2Rf||Ouz66VMlGjxOqyUnUosx9hF+LxMz4!{BN-Kx zKPhNdNc8mdvvf(lp^<%RTd#gtrC&I>ieiLNEcNP}--%pM}j6BwIb zOu2hbomNA4Swog`WnoT0rE6K8XH#%pPo{!#kX=llRxc^f`em1cTX*&f?AqvTXk4BKO`2dg>`;KG>~gRg;YO*T{dt=C8$_1s*`Vj zMeGbPZ_&#sMTjZiC=e_OL+Nw$-4vU_QcXj!?J zg`#p|k5ojSPbNw%AB$~C#kQE2cTJjRI>M%wa5EdcsEBhwFzM^;jY1u{ig>q!aK*5Y ztz$dIjbYo{+P<5LvT9QK>gBn-zKnKL^Ucn?aaV+2Pf$HFyO4gUVo#Pv9=m8o$-bhv zu91H?8!LA2x_xBLCoEYg6Ww!mo{eeOQ&;ud+CffK(?&|lt)0*}JHVQE*UPZe+u!ZF zyXmK>&tE|~#WjRMp-qg8wcXm)tPW#ek!PNJhoScD)jojqv zeW>-)xR|1rb^GL|%t&!?x$0d9{>PWtw}^dRCwAwH1CAKC|4|}OWIIU3hkkUz)~1Sp;!t|O(dXAvN8yW zDI!=prH;aSEA&vd9i)GO?BG=py!gBo!4GeeZ{A;r56}BvYA^|kp=d^GFcd}8lqfbt zR1{tepioOvL&A7Ns#dFv#4wV?+~~Vj#j+N*ZP|F-W~bBaShnRjo}zeutybIF@eSZO zuH#s%`0*uZtM+Oxs@)%TU_-x(NDNiLO2vaA6`>3fc2s3>Jk z4%z8+IU05OurM$*O^sAVsn_@F+3oG@Y;!wH*xuaSG|lW6?e)>=p|ZDUZufedE(_u8rLObl?#ZF5JbN})1VI$(Vv*xGn&T1)I++xb9EUfEv6+~V z6k;NsfFzEN$Au&sm&@fYK^l<+Tl5-66e)l{0!GtJ1MVbIVyJXF4TGRc1hy6n$N;wM zHUSFDbv(~=^6=+71IKY7(9f6M)!Z7}meuTlR;Ag*%OdQ%HW-h`_UE5JUb(f}F$5BQ zyawuZ2s8{;*0P!`%cbR%7zESF#86aK(dsamHBHC^TGoU?{Fvq!^Bk`}25#NfiXOQfWSILN=CS5%D)4+vA_VPUr8w`6@&qeLS441rklB20Jp( z3qdgI&VuD=IhjPV0yIU#7~o}0LCN#HT;j8;a&UU~LcvClF{tZ0BnHG^@#gg#Hv%0- zy8?&ZLgnD(Mc(iC`zlxDXyS)L62(SL6yq^yO0Mm(w>CCnS0E67*k3P>-YZdLe8TCEN<3m1z*_(=-z;`V!ObAq#n05A zfBoS3xM=!Ul90o)F~Fu$EGjleWD$QGcQ!UMnanK~Lxog~WizmO6y# ziti`zBS;nsvZbC1yQRI9f?%;HL4|hPiWnD!tyCC1Eem=vYnV1vjVZGZ3x+WS71~6M zAUSvldP+bfx9%z1!(Mms=$lkQ@ImH(W-`BdGn2Jjw{Cs@j6iDh^UHNqZLM!y-<> zHr6*TUbwUYwCmTlwzfccD<9zQPaJH$X78w8-`neUTdjHy0~lNO^O?od^gK-o4QR)U zJumR$b6gV+XHFD-e|P0klxsBU0_fP9V*_&%gi9~sBMVoGX__PH-~0l3HJ(b0e>FY4 zeEH^05bkeVI3MpBy4ml)Jp}jFgA-$Fp*tGKalj4jK-W3r`nJS~mFUYms(%yg4j&#) zr=Jc#nwDkiengc0`_J}|j*d20vj5Y}w)2`+ONM`w_S$`Uf#S`5#J#})=M)4l{R#gS zK;=EpW;44BOTlotTATGLCXVB8Voka;d(LPUQ<5Z{+~oCNsdE?LF71b76=2Rjgu4C&^1jggsN)ywzsiH;D--$9F{h(JQ_&h zPa)T1aFO~G?*kC{sD69EX7564s(=#9hHL@xaSuQ5+|U z7l(wgzzaQ~^Js!nPb#1^^s;5ZWR!Wt@VoZdpaEk*&xBwBm3$pe;?q;SLL37;DWwFJ z{pmb5N!s zie+gK1nddE6^-CR&Y0`6lt}yQ`rK}f_98CMNf;0c7^QM9fB~yKQ7%GAAXhEkDBjrF zd9m|CDOKULoiDl8I>1w|wt-BlUCmVlIf)x&@iJ2AqkN-SEMSyMO~C+6tTfbgeSCMi z-M6N7Z0eDz6YA8bPvHueMRP-dG$DCw>_|4K#Ul(?a)!gsKs`o!)=3?r!S!bN{B zyyw2Z@qXb7*}JA95H51+P@i_^IE&$5sn-goxbtMCr%zWQB7`e__<%?1b+OZ_zIgL3 z*KcOb?X_tzjKd0opn|&2;*D5bMJ$z0Dx%Iw85AOjc4{v?VTfiEEQX2)Xti3by2N^_ zj;Ux@QRs!%jj6r!P!Pn5ctE_}Rn;5+0&jfYd{zsh3xa4`_+mJJp?=6w8)1$jZqw|msN850!j!~_48g|~W6;#MGD%9I+K6!i4RJh0* z0K^*+AY114u=`1>Z`yMMe zSgBV~R0(oQ8JrJ*qoY`irLu)CKPLiU6JHIvE@AB4{eeFreeUF#t<$G71;;TUBYLk1 zL3WM)L~n0M$MXm;s)u%NXNYbH-}eZsdI*5llQ#!VIftm`sv##b!C6Xl_9haYL^Rv; z*WW%d9j)Xpf1_%T0~%X8$Q;9lrSj@@I*~|gBnis5Xh>qOBreKnaokFm&kw-vc}Qr% zZl0GK$dJmu1MKn#{Q#V!0B+c@;q*wHCF}!Wp#Z?OEL}EH&stf(1$^ zc8DT05>rMf$Z%}9Bh(oFL4R0&?AT4INvG1!^4Q9iA9n9%d?X@a)v@I?)3q-0b3E52 zQm3M;S%|1>;D|FvZ5YD)&Un%;hzE|u6fRIT0g@mJZbHklD?L6x0Ou%x@*lc8I~aBW z=J3Fba9bht__YE=ARMjm8txxR-%9Y|=+fl{3)+9LjyJ`j2=x{IFdpbKTGZC|9auAM(X!EE zIPeqT?AZd)7Z8BFWd&d~+Osx_;D=f~(2H_K;z9swdgsn-W9@Udd|C7V=&P%Vj$XR5 zW5>lZu@(_((ysf$tKzp~Z3uh{Am3b?uGb1M6I>NlA^lOF)X1m2G%QmbCL+&*{n%t?RH6xk-MzaKhf%7krBo;+*IfgQF@9EeQ2 z5)1_5BdZrH?`xY5L=m;=f>X&$+S$Os2%+PF#$OF&zjUy{s%)Rto3^pK>H%`0TB;Wcm{d^h^F!Nj|?rE*7Qf0 zE6o7x@(VzD4R89!shP#b6oqkoC}BkG4Aa&>m`|fe?+-n%af9^f^+-*Ad*YDhO&i8$` z2F)8Z9C4^q){GTH%>Y*x(S%t5SgZq_r~on*oLRe(ar9y=@Gk&uni7thYb+G{l}W&G zmXCgIDg@Qls+I^IfNl4zqW$h-q`GVOb{Sn<+|&X_!VhpFonvFQ;#3uY*{*mmEU^zd zk&Kxuh90_fX#)hqn*D`yirv~UT3O(xi5Q@0dvPbG%C%z|ikd7?do~jaot=(_C7BY> z8(`6&@34}JpP|ZX@UrrjvpT@pyA~ZkyGRDyyJMbV%&;~!Zb1MEegbYFz?x7KafAbu z0F1d^-s5L@rr-oQsjX5?yiv)7_^+y?UuNa3YSM(q&);?a)go!ai(~F#E^p}uiG1MHha12s zs33C%fRzHUbsS*h?Zv~gW@G@6xtah(0uTeNQ~+tiZQv&Wi^bLBJ#p&9*aa4VMl)m4 zYOE9~JZ(Kb@g0%n2C3CkN(A&z()meE0X#5w71K>^PQJZ3oAIpz*rQ1=5Tafzy?V(i z-gzALa$vGF;cDqbI?{=ZCLGt8=|#p+*P#k5PymYm#f>~U8b2K%0wCi+dhrt*X~MS@ z!1%+OtL#wz6`(Qh$q{} zq&#!M>?7t*#L7`Wn%q_t3U#ePPyw{qvNBA$`oCoT6?~!5gg0iczBUQ1UjV>iu8T5| zh8&!9XhZZZ6{X2x9QrsD91chG<^r(8z)LIfAcSLLf!hyN5&!@o!2q_!=n%fZiI;{C zZ#jJU(R7GoqR<^S+AEbvq&IUlPtC#{~kCqF0Kb+H0dBXrA(*ve|`AnJjN&D)A{4oz&) z1T*bnn@os!+W0No0Cm!N86VGP70{y~+W9E&kqAGN=9OZ7w6rK2-z{UOSqBCmX|>jD zf&8Mi)0?FwvB1=Tg38?Pe&07!|1S%y69>3s?MN~nB+z{A+MJWyirns)gviaIa` z(&QHwx|PQPZ9|xw+9N_a%@^4mHP*7-CVCn{uNGmlz z0WyQ3nnb4aRMar0VC7u`$X~DCy?gbhTVDE19aJ!lPE_-V653$jJq$@WQZ{bhmA))r zWpQOmxvRtm4FbP2RuBm$*eOBdVIGA{lq1Bnz_EngvncT;62Qxn`1Ffy&K(5<0H~w^ z+))?-pn-)=3RYomnaM}n6b8FAkrgY);;Ltn=`V0M1pu7|o_7TKKfZoX?7GD3&0>zA zJtJFOCPTeK#KrS_y%G^F_lYFTStTkh4OW$2v_TH7SaDq{knu_M*9+6$S=zt^RPewTDyl-dSfN?fkez!e2JVuyzKTLwT#mWB!$+51z^$D zr(~3TeEs_M)v7yp?o^fgYHNiSs;(t$ey1EipSPMb!CO0jes>;Ua_4bDDL(zp=zJ@XNR=+>!C(^p>${w0rN&1tFKvMr2*ocBfO{&orb&yR}Rf%`zLs;_fk%OU~gjnQz)~{#i zIo=ot>Hxd6asp6x<6fC$1jQ7V8A|fvw18E>41tARmSx}1e0URyylK`{AcE2SSteI0 z?DY(QD@mQ9F_FVC*C8Ur2FRV=a+#%3DJ%1r5I+j#IvnX_=W1Ec^SYyXWKELjN3Nvp z$&)8#Q$itP>>5*d zn<76ui3!%Jcl(pmSy)&&z0lcTnOSm%i3p7r48W$J&qO}F`4IVy2`)4EX#kZwYw5S+ z2T2PV8yy=>Pfs6Y@7UVm=*Seq4!|j> zaVtP~~nEstJ~7ln41=`7GlLFk)?x&p0)l#0f z2r`|X9z2-vC5coMRt;adqy?bl*z6ihkh>;*&>%a|1RkPjEdX~Os`OPyJK6^Ua2^^1 zKvHxmmdF&O!xEx$xiGiexdW0^xKSh|F~?A_#8)2NnM=c$%wI|3A0rw^2D7yZ*ZB#M zPQ)@}(#Ic30QS_>uFx>Blp8WK^})9zNpw4nI|Lu%mnneT_8naHb}Xs=%R>UNPwUop z2L$Y>k607{hk0wGQ|xvHknHB}u%q>ky78>T7_7-AiO>$a1Mm!yuXL3M zbG1ZjmFkON*mwQ<#TEKgPLVP!kg7=c$0#pUGJy3(08DXSW+G@gT=&7tBnfGYG0Mty#?nrll5EKB+m|6RLN=E>u15jRp>6BwSJR1T(c7E`3 z?lD!CaU4Hf5-Cg%CMblU5f*R)$(Q(}?W ztQaC^1u#+(y=UR`RW)0y3v}$hA&3QLF4F*=7U*b&hYv7(H~{SvBG%KyvZx(MvRdg z7fju;0fIC(LJ)oi;Dm*XTKrq9qviq-f@tn!;Wvhyj39t=umD<;uSWam`Zoa@WR6P# zcs%P|-_QWVEPxNa(Xpy*RWU{QFvT}5EjBVOaz@&W6q;8tugJs=2NtJ(F90JIK-p)& zatvun*kgHoii&dZKSAfL0NvANWB>MpN^*H;`s0Zu70bdcvXjoL>-?1oDn%84Fnil3ZP;! zq2}tEY-NFBcS<0ZqKw9{&vQ{I=V7c0U?c~myFzYxA^?l3YNM|ff*2sFN;=u534)g` z!a38k0y^2HhD=h|57PpL%RiL?cryIR@xH#k<3rSeE>j0SB8d+qn)oQelwJT*a)C8N zNGO20st$x(iWnLn2+5Q{qf9qO3=4chOEo@_PhnS509#sWW9DLZ24G4A0GE)FW&z}1 zZvYZiHE|+gJDiqMn32Q9F>(uZ%BllThK}nSb?)4`tmBsli0lAg+TGFAR9Dwd{*3^P z1YmrG8(6UblqzgP0AONTB=BP0DFB3^5(pWINTwXT_(94Fc?;_^4ND9F8|rlvTpr+JF4S!2iUqb(V%2pfP8z4xjIcsqA? zcESaISRiI2*W3s|U4(fKL9x4(8EF6nAy!(BT=2l)5Wl7E;FsO}=abOfd3 zmMgMu46)qj$qceUMR07^y*6cy6$>;@wqZ3QBQL_zjTy8E>t@&jxI5DazV`OcW{{)_ zGrqKRq|?$gz_tD)2E&PIn3g+UH5Lk_0+=EIW2rBB06;;;Q4sN#>c~t0;P#@V#RoP3 zP_;IcbGM8HJC~4IWjfLZ;OAZY?D`xa^kRl#f&U&rOM?TnCKKN4kOao7=QMdJn~ItA zgE2SFulMjle|qaz&2*9B=5}j$fDvxWKM&*(_2TNJRse!q z0MgYNEyL;Jjzhov@=H#T1tR=Ob_u`{)Pes6KqMAu0B(G>7{6TI7rQ*=LQu^I^!BD9 z0m!7vI)5?%;Q~AONktexhGy58AQq?!EZCK#AdilaE+GFc=@Q0Zf#2`N0wtr^jkt(N zi6~z6%gZSA4ylb#+ZkGFOZ#*MngN zkk&*D@SQye{W6~gKiILS$WU(H+!O&AuK`ki^~3?VNglxvX;p-u(n9ZxMS1Ib5@?3e z7}W;TB}~c8T=l^1TL5Ur!k3p2QUr%>f$aZd5q>FkSzTxEJ$3Cq&z#Bg#0b$n_i#uP zfZy%ev$p8gtp_#$kM=|I&FKSc4&`i2&bYuB%e#Vm&z^Sx80j9pFcCqN*_is0^{f3= zZ8y(RnoMsRfXrysXFRd2ZD!7;2vu!(A~_Sj$d8_68h%GQ+lYXD5va z3o)z!AQjwbcVZ%kSP)Qz57*+YYgkk$`RMajumORb>lIpa?z+pR`pZ`ZGx#77u02+j2r$262Rs};59jGxp3k-v{1EABbJ3@eFD2HSHs-xTX|8(fkp6Tf!?C$81uzh}QHm2jNce^y z%F8Bv9XBW{>RwNT0^tcqWle<5Z!dlPs%HUcG&XLMkU*TCyJ|WBU4sG)3AVs697Ya; z&U^0IMq+_xEPT_^9g?{I%mWWR{3vEpL{J8NII;^VP~A!B>3#Uj?-c38X-SI}AFs|A z2I~6tYgD%TQE;sktFpKcy}3GB~VQ=Yar~0v|gkufpZW5mxh002;UELmxk|&Lx*v>q&(7 z9r)xM@7}TFwQtwh9qW7-itx)zPo53vk8vRY6oL-f0!72@05w6V2Ai1WNn+Pmwhu-G za5alLeDv-6WZu7(ib=soeQi>Sz>~{&#d_{fP3DFND*HSeew-F36XB9A7MLwhuq~h@ zB?C)v$y*v4qy-#106+Nz@4MXb`mdWD7}ni_wN zDhssms754iTxf_bz}sno@(Ep?>C=VZywyz{ufv4yzeR0sA{qz%M%LuT{xj{{+nmX{_8?IYAj`+Y! z(i5sqey1i)IweiW0Du5m5PEjfq?h*@f8Dcb9u#wJnc(?r}fGRSbXX>A8*Q8obg z_H58H0fo_201!B>FiE~)TjH-T~Yfe7~y6<%Vp9WZqF#*zdtjB)LkD) zh%Jwv=B@RUcC;8HT1ONRQRg6|OLr}y`<4QUBqfAKyYjUN-hFX)ssO}=t^wjqPt+G$ za;-nCZ%BVZ0G4hE4CS-{D7mLOkLS<`=UjJhm+G&oI?%xnW}0BA9U!$k{I0r*8_NdS zMT#1m!FO4Np@WDwG(XdgQ2-xhy|PS#d^_e}tpz~B*K!||qDK@~b)_WR5N1xdn%oBi zP^aH`2@7`Rd%Y!!;P&o3S`Gnd=|*-9Q8P6kRW>A*b#@jTfF@=@1DvYGjPxadc7704 z{1m|j11wPUQ+JO;Ft`aB5RVl&+q#BB#RnP#!~zY#M>2^o7b$>}V&f%cAvqO*Umd8f z7JizUu)UURgAheeBkj=T)de4Q`)PZ>KTHsU7Na??Z7@0TfMdbhq_W=LPAS4p3tSqv zRBxwlQGaMh45a2~2nGQ({E#ZeRuHsd7G|9|8id0d{n-JAUMj-apW@f?$kim+KoEel zlX5A-gtGNiRz|Vhm>~vTG}+ukw4^27W%vPbwqy}j!^lk%cHxcnI-V=Q&k~n00`R+x z%K}__a-10oR|GN?f||+&VHQ7KiYsihF%~~Dl+ui1gGGiUvSr5d^2XJs#Xla0{i}zL*AW6B_yg)0=%#D6o0G0qyrGNl1J@vlF z0LTxUtVhTZ2|(zHA{4-A0r*6Q0_d>76XDEg1>_oNfoMVKksyN8Eeo7JfM00PV9id? z{>VX4cQ28S-ITj4S>VJ{j|X@hJWC1-HJ0e-{R}8HK=Rb#hd`#OshU!&urP|;^?(IR z3Q|iRBbKc_z{B_;$Js5`$Az0qO;BA$*Cv%^2tcO=PQ?PP?;7F=&f<}Spt%Cxq#%Cw zSZib^7o7+PrbS5S=uChAg$w=tXDU?4wl+W~z~#Q;%q1Ql0Gj})6!m;fHbb@Q$^-#; zjczxL$tjFc+!Tru7B%E)3lxI1DEy)TRE3vaaB;OA`O-&wOrms#R z0b;^he=@@}03s6S0)>{cY%DKeeuW891P-~p24Gu5O>%1LW7PmO+{EG}993k4SeW0E zzN~mv<})~z>=x)!t*{lDaOz}*OIzTe1P9|cbmE)SD23mJi~Sew2+ z5BYp}L}UR`AIqgG6|Ajcjk~SYF)R`Tz`AzIE{YRfLI zPr}YzJReqZ;aGs+u``rn!*4(%^5#xiS`~zKbpe101%ySbRkUQWA`+zYS&NpsurL6T z1v~%_xa#QVrSbzW4%zQi`D) zLvDy<4kuK`Fkpig{1EL2{IrB$LCo`2c{YHo2qWDX2_uJ~VijS;wZRA8%7*M^KF@pu zP^1S(M7G(*Kht4>4uT`#=cpC+zXA(9c2OD;3*Z7+QdUo<0oh4-kpfsq zy;jSBf^JL<;AHtbWpaieG?f4-1Q)gBxj+X#1&}47XoV93(WubFeTfL~wIFg}FIbyi z=JR-hEYO|_W2ByjJ1o#aaD@Df)c*|8htV#8ZkelL?EM@M05yRiwpffDuXxp7N;kNIwHB>vhvr zI_&e*1pwL?;#eS89JSliAnKeDqey}Ua>gcjCx*((_R%2w$d|G4oqpyCvZS%W<;35Oep0yQj z#h9xiJT$?P^ONTH4u>>A^d}`5dL}$j0sLjZSq*&`{&JYioJqnt-puaxa(|cdglm;9 zXZ`HuN?f5WTRx0Cy2cf7wey>Izl2u~@-kyHS*??DEeA<~pEHGfgv%ZY{I}nKYvZq* z-`%1e>qfVnIBPVntAYoEW;yV2g(f(1e#QXr7zE(yKp%$PB~d6i)+cS*v2D|~9jwQ_ zZSxj^^6pX6rB^;xG<)?+M`g)zHcN4F_Lqhj$dAy42=v( zO2RC{)T+cc%!qT5R93hHlQiA zk@{`X#YX$334+k#k)Zj`Caytk&@drZzkLsJ{b(=$)`|;Vqx?03T{C=KK&UtCldM1cSi>mjc%G1ltEB)yZ3ta{^&KZNs0&Nebxz?%&X!J3yR8lQ;$yXnqK zqenV{c69uCBd<-HogeRYXGJ6E@r(T#_%qo0QLnK;Hpe~#r&M3@q)ZB4%6d6Q(v5$8 zV{Ph)#>0&?0d>m1+7Z{+k}Xoz1_JakFc%wbX)Rg)KuE+ToHsIU^z8ho7HA0$kC~rW3xEAf zZ@>I(Eo>a?7NEOc)3$A9p6y2XP0nn*J|z>D(jV>9-UP1s{4}I#+W9!vxyeTPalerPc}7o0Yq#At#YK`G(sW@<8g#{R9fu{);Iq#?O zX-*rR<4NS}c-6yThK0#*o8WlCQ&1m#tWHRCAFzELs+ z0jr}!U;LX)NyY$adCdSIqkxl>7&`7;4l<4MwEW;iUJi-+d_c#bD>bZ9SW>tJXv^aE zn^*K_feV&idE&(Q*`on?JPv7RLSlUEii>W7i9b^T|0Tc~2@w`Jdd~6!fyb*{S*AES z=+H8bHvne(lEsn9YEpjf&ByNGCm#esj4>O4SReqU2&Zc-F+s2Fk%gt-5r2N$MCb6^h=6p%ReNz|u)Cy0xyjN)UOV#2;fvzj4(a%hE{a*>%~+$j zD38HKGD%U7W!6Xh9%_K$@xs#!N*kp^`lh2@z(Xm?E~spZ??PEIF1u^)G=CUB$N^A~ zrgb?GQGgU<$tbA|0!TEH36kgEafywQB^ZzUGBhTtl2!9w=mRj$0$gz7xafvs`Q|-@ zUm1Xx#VAHFXf+dUS`;<-cmNuLM!m-M4nAvvwo89B)87&3_0Hzq-QAfujZ%F6Y|scI z>Ra}l^HX>D#VXN4H9p;Gj(`OKf&+qt$bv}$(o#8ltd?N}L^rNZ0dk*)%x>uN7q5%3 zK&^!ty-SQuC{!*%0iG3`T!7ICni+`_6`+Mk`_BXmcoQAjW()w!cRliI9{s8Bd?DS@ zeu#8~kCN;6OHrjDlk;(`R>#&o2?;()>7B9w5m=Ck8YbnT2t6&vAS7`O6)~ks_tEaM zb#wa#81K!b>h?&VA%KI7z=;E32;U6nq*LrokL~5UdrS& zKv8@VMbs~~hgKoR_OLm`Jwnkx3OO)w0n*k}JQ>=gzFLC80BCGat&pp1g7MBfH}BrO zZ5n>zegGy|cCJyJDS&nc;OOnaJ^&%N=MAMosKx2^>i12FFy){I@4LkA}WVsRX>LTkIy~4-`66`f9SxRc=P+5J{$c!j* z`RNL!T*=N}*Ty`n<_>NJAVAmv1Hem62`2OqScae0F{I=)CSLgQ`d4~IY1sxBU|d8I zwsV@h;jU*8ARi0Qpy-!iZ&>@q%=r{Pk8+#~uoQs1sY1GU3&3HMlOz%lFA!xIH-=dY z7g2^~ja)I!2}Lur+3|5LP$lmg79gf51g}$^NFlF!UVw?Jttj!J&wmuzUn<%1mIkP0GG@P z0kowRMNah0src9xDtiVWaQ;%>NX$Nyh4uyLwF9dg^YUI*4OP)SfCUOOeauPC3@aIw zrH*$xXGSzO3Rs_EoMbIf2*FG0Ter(a=bRvq$Z#FRazKuwys~G?mQ1k#y&>fMi&$Mi z&l`Ce4R6<-mm>|6Jtv#XFVTad5|FwxNjO&yU3}&Z!~$gJMyyX^(x+X#3{#aPsi0k8pDUT1SRvkChXi zuIHed_3LX|SRT(CAL#vNHjlK;s%Hm|^|hT|-VOcVge(%63{uvHy@y~Z%wa1_Wv^df zzKfa6r-2J#UHHvsb#cJqEJRL3XCPPE7@RPZ3}nSkE+PLq?ut>BlNTZxgFqscxi%X0 zNAWGQFp1XZX!4MA*PLRBTN4IkR!e>Vc13luiFJnS>=HR2~Oo)^Wew@M6CBe>$oLotw%M` zEbwGKDwed#hoF#pce>au5-%HW=n)Hvh1v|bHQ()g^nI3q-c-}H={o%2HzHYoI*UZ+ z$d{cPj&7w3-?CFE3^s7&q^_yX;-&=jb4`{Kao5P!`OGEHB=V$%$XzqLe%>2v4!QBd zMt_draL%;OJ6qcMgLC=A@s{%kGB|;F^jr|gUk?J^)W0syb`l1n z=LjQ5jx=PQi=53Z9B0=HN3xe;334ktezM}byKvmR`)_S(oAa&!41dOv`}Y+WUVx+D z9_Ip_z8ueQu%@Q7r4yfeeO3UD@mNN}9Qy2ZG01d0iiYM9&i#7uRnD*TLRm)Z7 zFqva^tw@62cQzOW z0^|d&J2t+ztDvc6x^!m^&;Ot5qhsP!s;c^!b$$zqiy1L+xPF{O%CL?)zH^41st+BJ zB^iN_-5esqy6$^u-=2eCIswF~T6V4VEETf0H6X&USrZr80jx3-9hH!QX2*N1{M|AI zfK2)Mf7SSFN6)ESt}3W5prLzsEf2u?LB<;(9O}AIxDLk`4FjMAK8R8~vY5_e-(CPh zlyh7wDG4%6Kc~J`xd~{Yrrc~sv+PTD9-`uJ0wCVE3kTM+Wippc+8FMOD(>V~?i}Q; zW0nm@x*qWu^UFx$X>{70nx>{;O?gdCc{Q&Jho8;83V;mS@cUCz)Sr%{Q^%8gtJFhM z0Tu#q4@Fo<0V}{@>y6afYxmZbaI7kt5Q^m_uw6Ih7ij3%(cBzt6AL83I*OJeqDHKu z-a2y7x3Y`nr_N3rJ-w!;rlAdhmD|$OFRk5?D;Ag{eJ{Qg6O=@PhPeb?fD)O>fWYm0 zj=bimNy?Ii0ahiY8sGl;mJ%6NjV8cP1KP=5)c|RsT0;6%AyicfLK*3C76n*gn-xF z99 ziaZ-l#M9gUz{+yUaoypJv1gYMDMN@2i)&JblYNTLHvxFc50R)Qtm7HT3>|w89&{j- zil{Y;K3E~^Rkfzd-C}lrziWU+ZuvEmGThO8|HD-@V-#S-uj_n@{z)I;4Ef^V2GZ>3 z&i(N*$O5dcu0gHAj3JX#pUP#Ty1G=K%y1&!OhC9xU6Zr;G{&Z3qX3jzSO5W;f;@B9 zb2kG}Dj_Me7$7N9g2n)`RY_^bgxHFO+Rjz+96sYDw#ENLqyGJ5AmUd6UQTf#FC z4OMy+L!UBA2m=fOa{MwChJzj-B`}XDl1n4{d77CeM2k`cfyRjsYmNUGf6o zPdx`>1=_e&$>kyDMI6(Qi!K1s(khaO?(dB_1UrICSrMTOR`h`Y%mfW&sr5x0GYuT4_sGgj0m>r>5~dH7MPR-G z2*0XAkQ*MjU=V{qGXlR%|6nMJ#!Cc%Jb?z_MXSvO3RDhHC1!GK0K$uhH_QxWf(ZkkUhbMqYbU08nSl>-Ii$-xUrkup)}68sAJ8f6-Uwtl{FFUZCI3o!uubmqRI zz?<|?nVPCG5{Gw1mIfusj#Ee zgdYG&8hnQ*9NxL&{E8Ka7xeoEV2WD7!HmK>mM|SuWeaUB%)blVFm>IMl?&X+0iaR6 z1P<}%KCtA%$Wv|AfdZ+j$_wO@9@H&ogNWHUo)fvWP=Nf)s;a6niddciyz}Nf0Qs`> zFdXz*tbbJBE&B&{MBlz!S^Y43yqgz+14j*Q-Gx;etTu;L6yQ1rkwcjVu2n7M`Jd<= z9@#)7N+TkxKlk2y^w_?Ay}#B#2elF$HH><}0qyW=I%T0#gR?^sOTJ%Bd_Q@oYyc>X z9fzuJY-Iow{XbZaY(pKH3TT#)E182<)pSY*U??p_w4rg4``a6`uMqaqv+w=vdX=uO~tWGnG!+YzE{y9JRx{2Ug5T*4<$u3`G3 zU7(Tc1Do)I*}Z!js2l63cxyksIpo06S!Rjw$_&>4w46`@mVKyDssKH7heF^9#8k>k z24D^)Re&|W0cf^pnT#4i?hAmPwg4cSN3M_1D}MS!_3Zz(sA0YL(PKk~3|a7Y$gzE6 zI`=w!PvX8mJ3RHXjNz;iG5d~TUsC3G-7_)8t=~G(QiW3K&HCxfS3lb+)ha?cHEtaI zyN z`FC+p0f+_~6^=+osY=mkpa86%2C~Nbh6_ZI#+W<%>FnOKX3g3>YQTV@%XeZ(7c2N@ zL-;*4jhn=$taY91a(Ar&Ok@-+mczi@ ziI`EiR0vGZSVEPu;UqVlEbAu#y;w)#GG%@VDhw(4s-mmpbQF%u37uUw3J1x%s&}O{ zZySJ4_4~lK>+art^yvQ8yL&Dj?`uR>T4(b)fr)$oc6IjEW=yDaEbuDW(Ru!ANj+fd zlI(reC_ez}FvSR43Bpf%UBzBRP3su%PVMO4W2PF`znTZ;=&G32@(w^H#(WqIvFA`) zfQW0xTnh36aCq)BzzBde%;tgaaF>Rmp&H~0K1o$QovLW6$lY>C(#;ieu9*i`8=zx} zU3eB&O?-Wq&3$~9b0!8nOn`)#rPj^&2wD$d4|_N!@~d6iwI&}Uu?cg2-x6Z9>AS zZNVZ>_FSEEQwg4IhG%yo<`#x#sjs_bZR~h4Nk2|VrJwFZD3&q-ANYmp_kq(E&$@kk zbdT=ciDJ1?Bf%p$rW@R$vux!bGjbdDod*xyn%Cx$w$nEtzJJCb%ndLMtaNk_^nD0a! zrds$Ma|sWT7{04kVU$~l(c5^as|nq>sv|EYIDFsMT_#@^Z+3h*akj<~*RuWC?HU-` z0tY-K!aF!s*b}sxN6`JDT>GqZuf^gLkFA*{Z`MEE!L8hno=DgD>dz!?>CSJ7*7s^w z(%!dEW;9hP=0ks1EE7js9ESCmM=kXRY1~xfimp~TP_5m*iNMlViO#FR*ocOOXn;1P zQ3p$iY=`acmk<>ZxA*qEkrdt-i~CQ1arq^Qi!V8R-3E1tVpMDS>YEEQfbvbyOw>0{ z7gjMz(qw!tMY8P=b97_U3d&T{eJ#wA>{Sk-pg$j`x+WMSeRZ#BVn;dt~Hjgq1F~@Z^T5sV~1#WKpsR$R-#Lv-QJ!QRpm?U zvaZ$a)lY9`_rf0XM;_E7)cZT~B|u(Hc!FA2Tx+sO$2hPp&xXJ!4T_@84pqY}V0o|a#Zu7ns3b4idhX1R4*w%c(XhaB(Yl_k#iV-*A!NQS5 z=P`U+cju`7lg|#~s0V3vn8X!f@Xl+`t6|rQ?d?5?WM7M8$$wwIfJe zA`WUptt*PLN*^2-TLGYh8 z%k52P=Igo1?ae*s^qeK7kQ07X6IMWElK^-#Br+%fDWV+pnVTdqW3K4=UKU52_1h-> zlRl%rR`~z#I%FVGJ5t;fIRwH%y&4K*mNQ*n+=QoAvv)3UoIe?WO(DS_{M3Uu(yZYD zF-YYF8U+YS6&84N`sDF6;qRf5F;-MU9^RUwk>nHBoVe)wN(%n0QQ_pEhPLZ0Bw5PF z&tf+$WYwrMiVabt_Am38h&ja@jvR$3Xw>`V4I3NO!r>Z%hLKu5bJnaR>}X%KT@95` z1>jgQz>RaJPhNT$3uMey8LA;N0w#*^gdq}w-~*$I4VBD88c&J>*Z~k>ffU#4xOrFa zoSN2S}H#qDcH1IcD`mX4gdOuZUir7FEg$MPIyW8Q)u7;+%!fRZg|}Q$hui50#MF?EVZpFYAkTd z3FZrP3RQp_m|H3~)o`&b<+@~||EW`NnfK2?!;)t$2`2a_5)5OYM7HJP%2j)^s%4@o z9s;85vILbDEJz@BTlVacp%2DB@1Qh@Z7P^&fK|OC;62*)friE*K)*} zd-B#63}@+%QgU%Hah?SPnWddNTkI_0x_Z4q(s& zqU!=q0G3rH{6%|mU6FLrj-fdwVnNyd_~Gsq{!I#9;^a;k=GI8-TuRx!M|R@mP^~OGA*|lohpT!%su} z+`cur?a_U{2NR=wOsJt?ga=74B7C)g-i8ohAL$~}ZuNo^_6>XQoxB+XKBShXNgh63ch>KiOB@I@)gRDtnt$RJ1qB~BHA5W5X($oV`M3)c?#KY624I{Ne zCCNr;n6E{3cgwqvAFnt8>6du8sEh%M1qLzO&I<*hW3D}N1SakC<^iN#_<+?gLwN0i zk0+Lykwz|51u$kAk~Q<0JTfVBuq~fE4c$jQ{}2EnSgO~cBZ#a5Hcc(beo-)1cRx9o zQh=)~tzMvJ_5D(f&KWNJh-;7pZ0KgXgAxXsk&t)RTJwIpKr|14WY^1`SS7Q8Yh4? zMmWI@7Sn`LIS69;wQKWs-YS^1;>?kXxyhdQaH}GOSquf?nGRgS(H2<9EC`2&w2EzG zfdGV`Wza@*#p5Sh{kS3F$+a$V?w%0b zy-U7{p^w)`gat^1N%r|5Ejw^=a0L*k03o<*OO?xeKOlZwD%8Om4@T+~foq8bfqK1h z?&t%K!G$yzpT+EH#o4M=GiX;x2PZ%(ty||$EucN+;X^op7B+t1ZW44V0x5qtT$C_T z{v=%4p$E$Lg%Bv#(l)4YWmp7;VaT#wv(u~;?Z9Wz1#eb&_!XHs)ZPi3b zB$GQbUzH;K)1U~K(vx4x_hX2%s5J}^lFaN5LtW^oq_Y>`0|+B@Y%SR#wcaOdCDSv! zdaE80kh#xs2ghwq<2<+0aXD zKMeIuUejuSprNYdbG8>mjwp^bCvWSC1BpjbD6zF%x*@tAzm;N<2s}!&at7CAw4-BM zlvs)5jA>q>-wpLKNWPKki$-YI?Cenzr;VyA zvlBA|*F*cD{%c&^4zOH8{m7~ZFH0CW`*S(Hc9ug9tW|XMRr-9(v-Gz!;9vqH zCO1tJhzh-F#wuzN<7kYLYA}rAh&r=x0ziP%mYr4W7M?!7X62)$v;GZPk`<=je+RwK zg+jg2c-((%#=SwWS19kjF=m-&hvYoJ1Z#-ik3rq6_e2QU2ZKQz$KUxgZAUL4w}rq9 z7$&l}3#^yR zJIYa1+)-{8EBQ=b;6y~Q$UC&L#P@9x!SaftmsokrN{Z$6N6jgk`1M7YF&V>fH*kvd;jT_{cGrgKY5ut<{v*9-*JV$ z6maEAa^=dIjA3+#(A8dZs7Y=(XmoydiuSN4A8)T=nsp_J1+R`zt4?+}?E56zBo{D$ z*6W=nViO@SK~#>+c5Cpx)ow$zWD!9WKx2TkT1n(+1|q2Iw(I#}LeGAwR56^y&R9Xv zj1Dz-M$yio6EsimJYKF?IXg(D(DQsxSPaDRJmNMxz86_0H(s=uS8&O=n=LEmI_Z{Uhr8Xke4eek(9`X9!*39!AcTGW^1b0(;qd#g(ajz{ zoXa1}=lswN32XGb9^{Zr*Zdg-CdE%WgK z`ykA&u4d^3mthHvIasM|tSckyX=nPjXM%&<#4bJk>}wd7kangMWt_}n2gliD-~o_J z31a96u~sfw{OmJ$c2IhRl_7DkU|SCIQWy@U;GX$=jALi|y}__seIvlV3#xCvrNB$@ z?M=0crZSH*_szF%_YJ`kH4DNl9^EV{%4TK%79ueDwbE^S0L?}!%g2so1jhumv9V%w zvRtf`$o)!TR}gX=p7rWw|8rk?P?&{2#YK|K>W6cgW3}3@Y*t=LRlB)W^-Xm(&*zn@ z2Lb)w%mPl4{yyX284L&%jrgFIux%#=rdFRQBJff0Ta|H&V5m6Hp{U8}H2B$UhE`Kktz{ z{4u?zDg?d-a0Odx8zmph=%iP`@!Ql(K^P)roX|*UdImQmibhbxwb;RkO13MS%yZOK z>r)C*nBTg)co`)zq!j0b2%H?HQ0#Gz3kZW?B8X3hs+mMYG}3_T#v4U5x%FTpmAYd< zNKES}(zt+CTQs_(M@u*OkZ%~PYg-O*LvR8HSEmO`ve;lp+<9|1@LC$bb2s{Xe_{qXUU6v3~ + Man Page (--help) + Why? (Motivation) reference contributing Code of Conduct diff --git a/docs/manpage.rst b/docs/manpage.rst new file mode 100644 index 0000000..08a6596 --- /dev/null +++ b/docs/manpage.rst @@ -0,0 +1,19 @@ +Man Page +======== + +.. important:: + This page uses auto-generation of ``--help`` option + for 'download' command and script itself *(without command)*. + + To download files you can use only options (without 'download' command). + They are the same. + + If you don't know how to read and use ``--help`` output, + you should visit `Usage Examples`_ page (with "copy-paste" instructions) + +.. click:: lep_downloader.cli:cli + :prog: lep-downloader + :nested: full + :commands: download + +.. _Usage Examples: usage.html diff --git a/docs/motivation.rst b/docs/motivation.rst new file mode 100644 index 0000000..06fcb9f --- /dev/null +++ b/docs/motivation.rst @@ -0,0 +1,16 @@ +Why? (Motivation) +================= + +I'm learning two languages at the moment: Python and English. + +And I've decided to *kill two birds with one stone*. + +To learn programming language is better on real project. +For real users. Even though that user is only you. +I was lucky - I had a broken one *(a project, not a user =)*. + +Developing version 3 of the script I've learned a lot of new things +and managed to improve it significantly *(I hope so)*. + +In addition, the **LEP Downloader** enables us to have +offline collection of the great podcast for English learners. diff --git a/docs/reference.rst b/docs/reference.rst index 62a2fe9..f589d5f 100644 --- a/docs/reference.rst +++ b/docs/reference.rst @@ -1,13 +1,42 @@ -Reference +Developer Reference +=================== + +Classes and functions definitions for main modules. +All descriptions are generated automatically +from source code docstrings. + + + +lep_downloader.lep +-------------------------- + +.. automodule:: lep_downloader.lep + :members: + :show-inheritance: + ========= -.. contents:: - :local: - :backlinks: none +lep_downloader.downloader +-------------------------- +.. automodule:: lep_downloader.downloader + :members: + :show-inheritance: + +========= + +lep_downloader.parser +-------------------------- + +.. automodule:: lep_downloader.parser + :members: + :show-inheritance: + +========= -lep_downloader.cli ------------------------ +lep_downloader.exceptions +-------------------------- -.. automodule:: lep_downloader.cli +.. automodule:: lep_downloader.exceptions :members: + :show-inheritance: diff --git a/docs/requirements.txt b/docs/requirements.txt index f47f3eb..b35849e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,2 @@ -sphinx==4.0.2 -sphinx-click==3.0.1 -sphinx-rtd-theme==0.5.2 +sphinx==4.4.0 +sphinx-click==3.0.2 diff --git a/docs/usage.rst b/docs/usage.rst index 049a3cb..a6e916c 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -1,6 +1,202 @@ -Usage -===== +Usage Examples +============== -.. click:: lep_downloader.cli:cli - :prog: lep-downloader - :nested: full +.. meta:: + :description: Usage Examples. How to download all LEP episodes using Python 3.8+ + :keywords: english, podcast, LEP, downloader, episodes, app, quick start, usage + + +.. important:: + There are some definitions which are used in this guide: + + **app folder** - folder (directory) where ``lep-downloader.exe`` is located (installed) + + **destination folder** - folder (directory) where downloaded files (.mp3, .pdf) + will be saved. By default it's the same as *app folder* + +Full list of script options you can find on `Man Page`_. + +For all commands in this guide, +you can substitute script name ``lep-downloader`` with its short version ``lep-dl``. +Also it's implied that: + +* all episodes (and database) are available; +* you have not downloaded these episodes before; +* you answer "Yes" for download confirmation. + +========= + +How to download episode by its number +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To get certain episode, run script with ``--episode`` option: + +.. code:: none + + lep-downloader --episode 707 + +or with short one: + +.. code:: none + + lep-downloader -ep 707 + +Episode (mp3 file) will be downloaded to the **app folder**. + +How to change destination folder +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For saving files to more familiar location, +you can specify destination folder with ``--dest`` option: + +.. code:: none + + lep-downloader -ep 707 --dest "C:\English\podcasts\LEP" + +If path is writable, episode will be downloaded to this folder. +Otherwise, '*Error: Invalid value...*' will be displayed. + +Option has short version ``-d`` + +.. code:: none + + lep-downloader -ep 707 -d "C:/English/podcasts/LEP" + + +.. note:: + On Windows, you can use both path style: + with back slash (native), or with forward slash. + + On MacOS and Linux, forward slash is more preferable. + + All intermediate sub-directories for destination folder + will be created automatically. You don't have to worry about it. + + +How to download episode by its date +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you know exact day when episode was posted on archive page, +you can specify date interval like this: + +.. code:: none + + lep-downloader -S 2019-09-17 -E 2019-09-17 -d "C:/English/podcasts/LEP" + +Episode #615 will be downloaded. + +All episodes for year 2011: + +.. code:: none + + lep-downloader -S 2011-01-01 -E 2011-12-31 -d "C:/English/podcasts/LEP" + + +You can specify only one of the bounds (start or end). +In this case another bound will be set with default value +(first episode ever or the last episode at running moment). +For example, let's download all episodes starting from *2022*: + +.. code:: none + + lep-downloader -S 2022-01-01 --dest "C:\English\podcasts\LEP" + +How to download a range of episodes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Let's assume that you've skipped episodes from 707 to 711. +You can download them with one command: + +.. code:: none + + lep-downloader -ep 707-711 -d "C:\English\podcasts\LEP" + +All five episodes will be downloaded. + +You can omit one bound leaving hyphen: + +.. code:: none + + lep-downloader --episode 755- -d "C:\English\podcasts\LEP" + +All episodes from #755 to last will be downloaded. +The same story for ``-ep -10`` (episodes from first to #10). + +.. note:: + If you specify range option \--episode / -ep + and date filter option -S / -E together -> + **range option will be ignored**. + + You **cannot** specify random (comma separated range), + i.e. ``-ep 3,117,513`` is invalid option value. + + +How to download the last episode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: none + + lep-downloader --last -d "C:\English\podcasts\LEP" + + +How to download PDF along with MP3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Each episode web page has been exported to PDF file +*(don't confuse with separate transcript file)*. +You can download it along with episode audio file (.mp3) +using this command: + +.. code:: none + + lep-downloader -ep 122 --with-pdf -d "C:\English\podcasts\LEP" + +or with short ``-pdf`` + +.. hint:: + You can specify options in any order as you like. + +.. code:: none + + lep-downloader -d "C:\English\podcasts\LEP" -pdf --last + +If you want to download PDF files for all "TEXT" episodes (without any audio), +you should combine two options: + +.. code:: none + + lep-downloader -ep 0-0 -pdf -d "C:\English\podcasts\LEP" + +Such episodes have number = **0** under the hood, +that's why we've set range ``0-0`` in this command. + + +How to download all episodes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's very simple. Run script without options. +For convenience, specify only destination folder: + +.. code:: none + + lep-downloader -d "C:\English\podcasts\LEP" + +.. attention:: + Be careful, running this command. + + ALL episodes (audio + pdf) will take up more than 45 GB + on your drive (HDD, SSD, flash) + *(relevant on moment when #758 is the latest episode)* + and process of downloading will take at least ~20 hours + (depends on the speed of Internet connection). + You must have enough free space for downloading all of them. + +========= + +.. hint:: + Did not find your answer? Let me know about it by + creating a new `Discussion`_ + or writing me a letter to qa[at]hotenov.com + +.. _Man Page: manpage.html +.. _Discussion: https://github.com/hotenov/LEP-downloader/discussions diff --git a/noxfile.py b/noxfile.py index a011635..e4e62f7 100644 --- a/noxfile.py +++ b/noxfile.py @@ -173,7 +173,7 @@ def docs_build(session: Session) -> None: """Build the documentation.""" args = session.posargs or ["docs", "docs/_build"] session.install(".") - session.install("sphinx", "sphinx-click", "sphinx-rtd-theme") + session.install("sphinx", "sphinx-click", "furo") build_dir = Path("docs", "_build") if build_dir.exists(): @@ -187,7 +187,7 @@ def docs(session: Session) -> None: """Build and serve the documentation with live reloading on file changes.""" args = session.posargs or ["--open-browser", "docs", "docs/_build"] session.install(".") - session.install("sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-rtd-theme") + session.install("sphinx", "sphinx-autobuild", "sphinx-click", "furo") build_dir = Path("docs", "_build") if build_dir.exists(): diff --git a/poetry.lock b/poetry.lock index e738fd6..a10da03 100644 --- a/poetry.lock +++ b/poetry.lock @@ -98,7 +98,6 @@ mypy-extensions = ">=0.4.3" pathspec = ">=0.9.0" platformdirs = ">=2" tomli = ">=1.1.0" -typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] @@ -152,7 +151,6 @@ python-versions = ">=3.6" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" @@ -194,7 +192,7 @@ python-versions = "*" [[package]] name = "docutils" -version = "0.16" +version = "0.17.1" description = "Docutils -- Python Documentation Utilities" category = "dev" optional = false @@ -218,7 +216,7 @@ pipenv = ["pipenv"] [[package]] name = "filelock" -version = "3.5.1" +version = "3.6.0" description = "A platform independent file lock." category = "dev" optional = false @@ -237,7 +235,6 @@ optional = false python-versions = ">=3.6" [package.dependencies] -importlib-metadata = {version = "<4.3", markers = "python_version < \"3.8\""} mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.8.0,<2.9.0" pyflakes = ">=2.4.0,<2.5.0" @@ -331,6 +328,23 @@ flake8 = ">=3.0.0" pygments = "*" restructuredtext-lint = "*" +[[package]] +name = "furo" +version = "2022.2.14.1" +description = "A clean customisable Sphinx documentation theme." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +beautifulsoup4 = "*" +pygments = ">=2.7,<3.0" +sphinx = ">=4.0,<5.0" + +[package.extras] +test = ["pytest", "pytest-cov", "pytest-xdist"] +doc = ["myst-parser", "sphinx-copybutton", "sphinx-design", "sphinx-inline-tabs"] + [[package]] name = "gitdb" version = "4.0.9" @@ -352,7 +366,6 @@ python-versions = ">=3.7" [package.dependencies] gitdb = ">=4.0.1,<5" -typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} [[package]] name = "identify" @@ -383,19 +396,19 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "3.10.1" +version = "4.11.1" description = "Read metadata from Python packages" -category = "main" +category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +perf = ["ipython"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] [[package]] name = "iniconfig" @@ -462,11 +475,11 @@ source = ["Cython (>=0.29.7)"] [[package]] name = "markupsafe" -version = "2.0.1" +version = "2.1.0" description = "Safely add untrusted strings to HTML/XML markup." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "mccabe" @@ -487,7 +500,6 @@ python-versions = ">=3.5" [package.dependencies] mypy-extensions = ">=0.4.3,<0.5.0" toml = "*" -typed-ast = {version = ">=1.4.0,<1.5.0", markers = "python_version < \"3.8\""} typing-extensions = ">=3.7.4" [package.extras] @@ -569,9 +581,6 @@ category = "dev" optional = false python-versions = ">=3.6" -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} - [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] @@ -587,7 +596,6 @@ python-versions = ">=3.6.1" [package.dependencies] cfgv = ">=2.0.0" identify = ">=1.0.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} nodeenv = ">=0.11.1" pyyaml = ">=5.1" toml = "*" @@ -674,7 +682,6 @@ python-versions = ">=3.6" atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" @@ -826,9 +833,6 @@ category = "main" optional = false python-versions = ">=3.6,<4.0" -[package.dependencies] -importlib_metadata = {version = ">=3.0,<4.0", markers = "python_version < \"3.8\""} - [[package]] name = "six" version = "1.16.0" @@ -863,7 +867,7 @@ python-versions = ">=3.6" [[package]] name = "sphinx" -version = "4.3.2" +version = "4.4.0" description = "Python documentation generator" category = "dev" optional = false @@ -875,6 +879,7 @@ babel = ">=1.3" colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""} docutils = ">=0.14,<0.18" imagesize = "*" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} Jinja2 = ">=2.3" packaging = "*" Pygments = ">=2.0" @@ -889,7 +894,7 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.920)", "docutils-stubs", "types-typed-ast", "types-pkg-resources", "types-requests"] +lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "docutils-stubs", "types-typed-ast", "types-requests"] test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"] [[package]] @@ -921,21 +926,6 @@ click = ">=7.0" docutils = "*" sphinx = ">=2.0" -[[package]] -name = "sphinx-rtd-theme" -version = "0.5.2" -description = "Read the Docs theme for Sphinx" -category = "dev" -optional = false -python-versions = "*" - -[package.dependencies] -docutils = "<0.17" -sphinx = "*" - -[package.extras] -dev = ["transifex-client", "sphinxcontrib-httpdomain", "bump2version"] - [[package]] name = "sphinxcontrib-applehelp" version = "1.0.2" @@ -1016,7 +1006,6 @@ optional = false python-versions = ">=3.6" [package.dependencies] -importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} pbr = ">=2.0.0,<2.1.0 || >2.1.0" [[package]] @@ -1043,14 +1032,6 @@ category = "dev" optional = false python-versions = ">= 3.5" -[[package]] -name = "typed-ast" -version = "1.4.3" -description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" -optional = false -python-versions = "*" - [[package]] name = "typeguard" version = "2.13.3" @@ -1067,7 +1048,7 @@ test = ["pytest", "typing-extensions", "mypy"] name = "typing-extensions" version = "4.1.1" description = "Backported and Experimental Type Hints for Python 3.6+" -category = "main" +category = "dev" optional = false python-versions = ">=3.6" @@ -1095,7 +1076,6 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [package.dependencies] distlib = ">=0.3.1,<1" filelock = ">=3.2,<4" -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} platformdirs = ">=2,<3" six = ">=1.9.0,<2" @@ -1146,7 +1126,7 @@ python-versions = "*" name = "zipp" version = "3.7.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" +category = "dev" optional = false python-versions = ">=3.7" @@ -1156,8 +1136,8 @@ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest- [metadata] lock-version = "1.1" -python-versions = "^3.7.0" -content-hash = "77d9cb695272405b4f0ac8e5318921913556dd2796fb991d93da7cc7c834b24e" +python-versions = "^3.8.0" +content-hash = "c95f08d8e982ab22b2ae1652e320ecfb1497656207be5017d7e3ed64d5424805" [metadata.files] alabaster = [ @@ -1289,16 +1269,16 @@ distlib = [ {file = "distlib-0.3.4.zip", hash = "sha256:e4b58818180336dc9c529bfb9a0b58728ffc09ad92027a3f30b7cd91e3458579"}, ] docutils = [ - {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, - {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, + {file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"}, + {file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"}, ] dparse = [ {file = "dparse-0.5.1-py3-none-any.whl", hash = "sha256:e953a25e44ebb60a5c6efc2add4420c177f1d8404509da88da9729202f306994"}, {file = "dparse-0.5.1.tar.gz", hash = "sha256:a1b5f169102e1c894f9a7d5ccf6f9402a836a5d24be80a986c7ce9eaed78f367"}, ] filelock = [ - {file = "filelock-3.5.1-py3-none-any.whl", hash = "sha256:7b23620a293cf3e19924e469cb96672dc72b36c26e8f80f85668310117fcbe4e"}, - {file = "filelock-3.5.1.tar.gz", hash = "sha256:d1eccb164ed020bc84edd9e45bf6cdb177f64749f6b8fe066648832d2e98726d"}, + {file = "filelock-3.6.0-py3-none-any.whl", hash = "sha256:f8314284bfffbdcfa0ff3d7992b023d4c628ced6feb957351d4c48d059f56bc0"}, + {file = "filelock-3.6.0.tar.gz", hash = "sha256:9cd540a9352e432c7246a48fe4e8712b10acb1df2ad1f30e8c070b82ae1fed85"}, ] flake8 = [ {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"}, @@ -1331,6 +1311,10 @@ flake8-rst-docstrings = [ {file = "flake8-rst-docstrings-0.2.5.tar.gz", hash = "sha256:4fe93f997dea45d9d3c8bd220f12f0b6c359948fb943b5b48021a3f927edd816"}, {file = "flake8_rst_docstrings-0.2.5-py3-none-any.whl", hash = "sha256:b99d9041b769b857efe45a448dc8c71b1bb311f9cacbdac5de82f96498105082"}, ] +furo = [ + {file = "furo-2022.2.14.1-py3-none-any.whl", hash = "sha256:d7cb8126034637212332350ec8490cb95732d36506b024318a58cee2e7de0fda"}, + {file = "furo-2022.2.14.1.tar.gz", hash = "sha256:1af3a3053e594666e27eefd347b84beae5d74d6d20f6294cc47777d46f5761a7"}, +] gitdb = [ {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, @@ -1352,8 +1336,8 @@ imagesize = [ {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, ] importlib-metadata = [ - {file = "importlib_metadata-3.10.1-py3-none-any.whl", hash = "sha256:2ec0faae539743ae6aaa84b49a169670a465f7f5d64e6add98388cc29fd1f2f6"}, - {file = "importlib_metadata-3.10.1.tar.gz", hash = "sha256:c9356b657de65c53744046fa8f7358afe0714a1af7d570c00c3835c2d724a7c1"}, + {file = "importlib_metadata-4.11.1-py3-none-any.whl", hash = "sha256:e0bc84ff355328a4adfc5240c4f211e0ab386f80aa640d1b11f0618a1d282094"}, + {file = "importlib_metadata-4.11.1.tar.gz", hash = "sha256:175f4ee440a0317f6e8d81b7f8d4869f93316170a65ad2b007d2929186c8052c"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, @@ -1434,40 +1418,46 @@ lxml = [ {file = "lxml-4.8.0.tar.gz", hash = "sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23"}, ] markupsafe = [ - {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, - {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, + {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, ] mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, @@ -1683,8 +1673,8 @@ soupsieve = [ {file = "soupsieve-2.3.1.tar.gz", hash = "sha256:b8d49b1cd4f037c7082a9683dfa1801aa2597fb11c3a1155b7a5b94829b4f1f9"}, ] sphinx = [ - {file = "Sphinx-4.3.2-py3-none-any.whl", hash = "sha256:6a11ea5dd0bdb197f9c2abc2e0ce73e01340464feaece525e64036546d24c851"}, - {file = "Sphinx-4.3.2.tar.gz", hash = "sha256:0a8836751a68306b3fe97ecbe44db786f8479c3bf4b80e3a7f5c838657b4698c"}, + {file = "Sphinx-4.4.0-py3-none-any.whl", hash = "sha256:5da895959511473857b6d0200f56865ed62c31e8f82dd338063b84ec022701fe"}, + {file = "Sphinx-4.4.0.tar.gz", hash = "sha256:6caad9786055cb1fa22b4a365c1775816b876f91966481765d7d50e9f0dd35cc"}, ] sphinx-autobuild = [ {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, @@ -1694,10 +1684,6 @@ sphinx-click = [ {file = "sphinx-click-3.1.0.tar.gz", hash = "sha256:36dbf271b1d2600fb05bd598ddeed0b6b6acf35beaf8bc9d507ba7716b232b0e"}, {file = "sphinx_click-3.1.0-py3-none-any.whl", hash = "sha256:8fb0b048a577d346d741782e44d041d7e908922858273d99746f305870116121"}, ] -sphinx-rtd-theme = [ - {file = "sphinx_rtd_theme-0.5.2-py2.py3-none-any.whl", hash = "sha256:4a05bdbe8b1446d77a01e20a23ebc6777c74f43237035e76be89699308987d6f"}, - {file = "sphinx_rtd_theme-0.5.2.tar.gz", hash = "sha256:32bd3b5d13dc8186d7a42fc816a23d32e83a4827d7d9882948e7b837c232da5a"}, -] sphinxcontrib-applehelp = [ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"}, @@ -1777,38 +1763,6 @@ tornado = [ {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, ] -typed-ast = [ - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win32.whl", hash = "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3"}, - {file = "typed_ast-1.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win32.whl", hash = "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7"}, - {file = "typed_ast-1.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win32.whl", hash = "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805"}, - {file = "typed_ast-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39"}, - {file = "typed_ast-1.4.3-cp38-cp38-win32.whl", hash = "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927"}, - {file = "typed_ast-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40"}, - {file = "typed_ast-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3"}, - {file = "typed_ast-1.4.3-cp39-cp39-win32.whl", hash = "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808"}, - {file = "typed_ast-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c"}, - {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, -] typeguard = [ {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, diff --git a/pyproject.toml b/pyproject.toml index 0eddc50..cacb1fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "lep-downloader" -version = "3.0.0a5" +version = "3.0.0b1" description = "LEP Downloader - CLI app for parsing and downloading episodes of Luke's English Podcast" authors = ["Artem Hotenov "] license = "MIT" @@ -9,7 +9,7 @@ homepage = "https://github.com/hotenov/LEP-downloader" repository = "https://github.com/hotenov/LEP-downloader" documentation = "https://lep-downloader.readthedocs.io" classifiers = [ - "Development Status :: 3 - Alpha", + "Development Status :: 4 - Beta", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -19,7 +19,7 @@ classifiers = [ Changelog = "https://github.com/hotenov/LEP-downloader/releases" [tool.poetry.dependencies] -python = "^3.7.0" +python = "^3.8.0" click = "^8.0.1" requests = "^2.26.0" beautifulsoup4 = "^4.9.3" @@ -47,7 +47,6 @@ pep8-naming = "^0.12.1" darglint = "^1.8.1" reorder-python-imports = "^2.6.0" pre-commit-hooks = "^4.0.1" -sphinx-rtd-theme = "^0.5.2" sphinx-click = "^3.0.2" Pygments = "^2.10.0" requests-mock = "^1.9.3" @@ -56,6 +55,7 @@ flake8-import-order = "^0.18.1" rope = "^0.20.1" yapf = "^0.31.0" pytest-mock = "^3.6.1" +furo = ">=2022.2.14" [tool.poetry.scripts] lep-downloader = "lep_downloader.__main__:main" diff --git a/src/lep_downloader/cli_shared.py b/src/lep_downloader/cli_shared.py index cb3324a..397cbe5 100644 --- a/src/lep_downloader/cli_shared.py +++ b/src/lep_downloader/cli_shared.py @@ -83,7 +83,7 @@ def common_options(f: Callable[..., Any]) -> Callable[..., Any]: "Episode number for downloading. " "To specify range of episodes use hyphen, i.e. -." ), - metavar="", + metavar="", ) @click.option( "--with-pdf", @@ -106,14 +106,14 @@ def common_options(f: Callable[..., Any]) -> Callable[..., Any]: "start_date", type=click.UNPROCESSED, callback=validate_date, - help="To specify 'START DATE' for date range filtering. Format 'YYYY-MM-DD'", + help="To specify 'START_DATE' for date range filtering. Format 'YYYY-MM-DD'", ) @click.option( "-E", "end_date", type=click.UNPROCESSED, callback=validate_date, - help="To specify 'END DATE' for date range filtering. Format 'YYYY-MM-DD'", + help="To specify 'END_DATE' for date range filtering. Format 'YYYY-MM-DD'", ) @click.option( "--dest", @@ -122,7 +122,7 @@ def common_options(f: Callable[..., Any]) -> Callable[..., Any]: callback=validate_dir, default=Path(), help="Directory path (absolute or relative) to LEP files destination.", - metavar="", + metavar="", ) @click.option( "--db-url", @@ -130,7 +130,7 @@ def common_options(f: Callable[..., Any]) -> Callable[..., Any]: "db_url", default=conf.JSON_DB_URL, help="URL to custom JSON database file.", - metavar="", + metavar="", ) @click.option( "--quiet", diff --git a/src/lep_downloader/commands/parse.py b/src/lep_downloader/commands/parse.py index ffb0761..45f3676 100644 --- a/src/lep_downloader/commands/parse.py +++ b/src/lep_downloader/commands/parse.py @@ -50,7 +50,7 @@ "Directory path (absolute or relative) for storing HTML files. " "It makes sense only if option '--with-html' is provided." ), - metavar="", + metavar="", ) @click.option( "--dest", @@ -59,7 +59,7 @@ callback=validate_dir, default=Path(), help="Directory path (absolute or relative) to JSON result file destination.", - metavar="", + metavar="", ) @click.option( "--db-url", @@ -67,7 +67,7 @@ "db_url", default=conf.JSON_DB_URL, help="URL to custom JSON database file.", - metavar="", + metavar="", ) @click.pass_context def cli( diff --git a/src/lep_downloader/config.py b/src/lep_downloader/config.py index f4b2aff..71c8e4b 100644 --- a/src/lep_downloader/config.py +++ b/src/lep_downloader/config.py @@ -22,10 +22,10 @@ """App configuration module.""" -ARCHIVE_URL = "https://hotenov.com" +ARCHIVE_URL = "https://teacherluke.co.uk/archive-of-episodes-1-149/" -JSON_DB_URL = "https://hotenov.com/some_json.json" -DEFAULT_JSON_NAME = "lep-db.min.json" +JSON_DB_URL = "https://hotenov.com/d/lep/v3-lep-db.min.json" +DEFAULT_JSON_NAME = "v3-lep-db.min.json" DOWNLOADS_BASE_URL = "https://hotenov.com/d/lep/" diff --git a/src/lep_downloader/downloader.py b/src/lep_downloader/downloader.py index 5f8c376..621cdd1 100644 --- a/src/lep_downloader/downloader.py +++ b/src/lep_downloader/downloader.py @@ -43,28 +43,59 @@ # COMPILED REGEX PATTERNS # URL_ENCODED_CHARS_PATTERN = re.compile(r"%[0-9A-Z]{2}") +"""re.Pattern: Pattern for matching %-encoded Unicode characters.""" @dataclass class LepFile: - """Represent base class for LEP file object.""" + """Represent base class for LEP file object. + + Args: + ep_id (int): Episode index. Defaults to 0. + name (str): File name (without extension). Defaults to empty str. + ext (str): File extension. Defaults to empty str. + short_date (str): Episode date (format "YYYY-MM-DD"). Defaults to empty str. + filename (str): File name + extension. Defaults to empty str. + primary_url (str): Primary URL to download file. Defaults to empty str. + secondary_url (str): Secondary URL to download file. Defaults to empty str. + tertiary_url (str): Tertiary URL to download file. Defaults to empty str. + """ - ep_id: int = 0 - name: str = "" - ext: str = "" - short_date: str = "" - filename: str = "" - primary_url: str = "" - secondary_url: str = "" - tertiary_url: str = "" + ep_id: int = 0 #: Episode index. + name: str = "" #: File name (without extension). + ext: str = "" #: File extension. + short_date: str = "" #: Episode date (format "YYYY-MM-DD"). + filename: str = "" #: File name + extension. + primary_url: str = "" #: Primary URL to download file. + secondary_url: str = "" #: Secondary URL to download file. + tertiary_url: str = "" #: Tertiary URL to download file. @dataclass class Audio(LepFile): - """Represent episode (or part of it) audio object.""" + """Represent audio object to episode (or part of it). + + Args: + ep_id (int): Episode index. Defaults to 0. + name (str): File name (without extension). Defaults to empty str. + ext (str): File extension. Defaults to ".mp3". + short_date (str): Episode date (format "YYYY-MM-DD"). Defaults to empty str. + filename (str): File name + extension. Defaults to empty str. + primary_url (str): Primary URL to download file. Defaults to empty str. + secondary_url (str): Secondary URL to download file. Defaults to empty str. + tertiary_url (str): Tertiary URL to download file. Defaults to empty str. + part_no (int): Part number. Defaults to 0. + + Notes: + Filename depends on part number. + - If `part_no` = 0, composed as ``f"[{short_date}] # {name}" + ext`` + - If `part_no` > 0, ``f"[{short_date}] # {name}" + " [Part NN]" + ext`` + + Other attrs see :class:`LepFile` + """ - ext: str = ".mp3" - part_no: int = 0 + ext: str = ".mp3" #: Extension for audio file. + part_no: int = 0 #: Part number. def __post_init__(self) -> None: """Compose filename for this instance.""" @@ -79,9 +110,26 @@ def __post_init__(self) -> None: @dataclass class PagePDF(LepFile): - """Represent PDF file of episode page.""" + """Represent PDF file of episode page. + + Args: + ep_id (int): Episode index. Defaults to 0. + name (str): File name (without extension). Defaults to empty str. + ext (str): File extension. Defaults to ".pdf". + short_date (str): Episode date (format "YYYY-MM-DD"). Defaults to empty str. + filename (str): File name + extension. Defaults to empty str. + primary_url (str): Primary URL to download file. Defaults to empty str. + secondary_url (str): Secondary URL to download file. Defaults to empty str. + tertiary_url (str): Tertiary URL to download file. Defaults to empty str. + + Notes: + Filename is composed after initialization other attrs as: + ``f"[{short_date}] # {name}" + ext`` + + Other attrs see :class:`LepFile` + """ - ext: str = ".pdf" + ext: str = ".pdf" #: Extension for PDF file. def __post_init__(self) -> None: """Compose filename for this instance.""" @@ -90,10 +138,31 @@ def __post_init__(self) -> None: @dataclass class ATrack(LepFile): - """Represent audio track to episode (or part of it) object.""" + """Represent audio track object (to episode video or part of it). + + Args: + ep_id (int): Episode index. Defaults to 0. + name (str): File name (without extension). Defaults to empty str. + ext (str): File extension. Defaults to ".mp3". + short_date (str): Episode date (format "YYYY-MM-DD"). Defaults to empty str. + filename (str): File name + extension. Defaults to empty str. + primary_url (str): Primary URL to download file. Defaults to empty str. + secondary_url (str): Secondary URL to download file. Defaults to empty str. + tertiary_url (str): Tertiary URL to download file. Defaults to empty str. + part_no (int): Part number. Defaults to 0. + + Notes: + Filename depends on part number. + - If `part_no` = 0, + composed as ``f"[{short_date}] # {name}" + " _aTrack_" + ext`` + - If `part_no` > 0, + ``f"[{short_date}] # {name}" + " [Part NN]" + " _aTrack_" + ext`` + + Other attrs see :class:`LepFile` + """ - ext: str = ".mp3" - part_no: int = 0 + ext: str = ".mp3" #: Extension for audio track file. + part_no: int = 0 #: Part number. def __post_init__(self) -> None: """Compose filename for this instance.""" @@ -113,14 +182,31 @@ class LepFileList(List[Any]): """Represent list of LepFile objects.""" def filter_by_type(self, *file_types: Any) -> Any: - """Return new filtered list by file type(s).""" + """Filter list by file type(s). + + Args: + file_types (Any): Variable length argument list of file types + (Audio, PagePDF, ATrack, and others). + + Returns: + :class:`LepFileList`: New filtered LepFileList. + """ file_types = tuple(file_types) filtered = LepFileList(file for file in self if isinstance(file, file_types)) return filtered def crawl_list(links: List[str]) -> Tuple[str, str, str]: - """Crawl list of links and return tuple of three links.""" + """Crawl list of links and return tuple of three links. + + For absent URL empty string is assigned. + + Args: + links (list[str]): List of URLs (for one file). + + Returns: + Tuple[str, str, str]: A tuple of three strings (URLs). + """ primary_url = secondary_url = tertiary_url = "" links_number = len(links) if links_number == 1: @@ -136,16 +222,23 @@ def crawl_list(links: List[str]) -> Tuple[str, str, str]: return primary_url, secondary_url, tertiary_url -def add_each_audio_to_shared_list( +def append_each_audio_to_container_list( ep_id: int, name: str, short_date: str, audios: List[List[str]], file_class: Union[Type[Audio], Type[ATrack]], ) -> None: - """Gather data for each episode audio. + """Relate links for each audio file with episode. + + And put audio as 'Audio' or 'ATrack' object to container list of LepFile objects. - Then add it as 'Audio' or 'ATrack' object to shared list of LepFile objects. + Args: + ep_id (int): Episode number. + name (str): File name (without extension). + short_date (str): Date (format "YYYY-MM-DD"). + audios (list[list[str]]): List of list of URLs for each audio part. + file_class (:class:`Audio` | :class:`ATrack`): LepFile subclass (audio type). """ is_multi_part = False if len(audios) < 2 else True start = int(is_multi_part) @@ -166,15 +259,21 @@ def add_each_audio_to_shared_list( files_box.append(audio_file) -def add_page_pdf_file( +def append_page_pdf_file_to_container_list( ep_id: int, name: str, short_date: str, page_pdf: List[str], ) -> None: - """Gather page PDF for episode. + """Relate links for page PDF file with episode. - Then add it as 'PagePDF' object to shared 'files' list of LepFile objects. + And put it as 'PagePDF' object to container list of LepFile objects. + + Args: + ep_id (int): Episode number. + name (str): File name (without extension). + short_date (str): Date (format "YYYY-MM-DD"). + page_pdf (list[str]): List of URLs for page PDF file. """ global files_box if not page_pdf: @@ -198,12 +297,18 @@ def add_page_pdf_file( files_box = LepFileList() +""":class:`LepFileList`: Module level container list of LepFile objects.""" def gather_all_files(lep_episodes: LepEpisodeList) -> LepFileList: - """Skim passed episode list and collect all files. + """Skim list of episodes and collect all files. + + Args: + lep_episodes (LepEpisodeList): List of LepEpisode objects. - Return module's 'files_box' list. + Returns: + :class:`LepFileList`: Module's container list + :const:`files_box`. """ global files_box files_box = LepFileList() @@ -213,16 +318,18 @@ def gather_all_files(lep_episodes: LepEpisodeList) -> LepFileList: if ep.files: audios = ep.files.setdefault("audios", []) if audios: - add_each_audio_to_shared_list( - ep.index, ep.post_title, ep._short_date, audios, Audio + append_each_audio_to_container_list( + ep.index, ep.post_title, ep.short_date, audios, Audio ) audio_tracks = ep.files.setdefault("atrack", []) if audio_tracks: - add_each_audio_to_shared_list( - ep.index, ep.post_title, ep._short_date, audio_tracks, ATrack + append_each_audio_to_container_list( + ep.index, ep.post_title, ep.short_date, audio_tracks, ATrack ) page_pdf = ep.files.setdefault("page_pdf", []) - add_page_pdf_file(ep.index, ep.post_title, ep._short_date, page_pdf) + append_page_pdf_file_to_container_list( + ep.index, ep.post_title, ep.short_date, page_pdf + ) return files_box @@ -230,7 +337,22 @@ def detect_existing_files( save_dir: Path, files: LepFileList, ) -> Tuple[LepFileList, LepFileList]: - """Separate lists for existing and non-existing files.""" + """Separate list for existing and non-existing files. + + Method scans all files in the directory and composes + list of filtered files by extensions: mp3, pdf, mp4. + Then it separates 'files' list on two: + existed files and non-existed files + (iterating over filtered files in the directory, not all). + + Args: + save_dir (Path): Path to destination folder. + files (LepFileList): List of LepFile objects. + + Returns: + Tuple[LepFileList, LepFileList]: A tuple with + two lists: existed, non_existed. + """ existed = LepFileList() non_existed = LepFileList() only_files_by_ext: List[str] = [] @@ -253,7 +375,18 @@ def download_and_write_file( filename: str, log: LepLog, ) -> bool: - """Downloads file by URL and returns operation status.""" + """Download a file by URL and save it. + + Args: + url (str): URL to file. + session (requests.Session): Session to send request. + save_dir (Path): Folder where to save file. + filename (str): Filename (with extension). + log (LepLog): Log object where to print messages. + + Returns: + bool: Status operation. True for success, False otherwise. + """ is_writing_started = False file_path: Path = save_dir / filename try: @@ -279,7 +412,14 @@ def download_and_write_file( class LepDL(Lep): - """Represent downloader object.""" + """Represent downloader object. + + Args: + json_url (str): URL to JSON database + session (requests.Session): Requests session object. + If None defaults to global session :const:`lep.PROD_SES`. + log (LepLog): Log instance where to output messages. + """ def __init__( self, @@ -287,26 +427,39 @@ def __init__( session: requests.Session = None, log: Optional[LepLog] = None, ) -> None: - """Initialize LepDL object. - - Args: - json_url (str): URL to JSON datavase - session (requests.Session): Requests session object - if None, get default global session. - log (LepLog): Log instance of LepLog class where to output message. - """ + """Initialize LepDL object.""" super().__init__(session, log) - self.json_url = json_url + + #: URL to JSON database. + self.json_url: str = json_url + + #: List of episodes in JSON database. self.db_episodes: LepEpisodeList = LepEpisodeList() + + #: Dictionary "URL - post title". self.db_urls: Dict[str, str] = {} + + #: List of all files (gathered for downloading). self.files: LepFileList = LepFileList() + + #: List of downloaded files. self.downloaded: LepFileList = LepFileList() + + #: List of unavailable files. self.not_found: LepFileList = LepFileList() + + #: List of existing files on disc. self.existed: LepFileList = LepFileList() + + #: List of non-existing files on disc. self.non_existed: LepFileList = LepFileList() def get_remote_episodes(self) -> None: - """Get database episodes from remote JSON database.""" + """Get database episodes from remote JSON database. + + After retreiving episodes, also extract all URLs and their titles + and store them in 'db_urls' attribute. + """ self.db_episodes = Lep.get_db_episodes(self.json_url) self.db_urls = extract_urls_from_episode_list(self.db_episodes) @@ -315,14 +468,22 @@ def detach_existed_files( save_dir: Path, files: Optional[LepFileList] = None, ) -> None: - """Detach 'existed' files from non 'non_existed'.""" + """Detach 'existed' files from non 'non_existed'. + + Args: + save_dir (Path): Folder for saving files. + files (LepFileList, optional): List of files. + If None, defaults to self 'files' attribute. + """ files = files if files else self.files self.existed, self.non_existed = detect_existing_files(save_dir, files) def populate_default_url(self) -> None: - """Fill in download url (if it is empty) with default value. + """Fill in secondary download url (if it is empty) with default value. - Operate with 'files' shared list. + Iterate over 'files' attribute list. + Default value composed as: :const:`config.DOWNLOADS_BASE_URL` + url-encoded + filename. """ populated_files = LepFileList() for file in self.files: @@ -337,7 +498,14 @@ def download_files( self, save_dir: Path, ) -> None: - """Download files from passed links bunch.""" + """Download files from 'non_existed' attribute list. + + For reliability: If primary link is not available, + method will try to download other two links (if they present). + + Args: + save_dir (Path): Path to folder where to save files. + """ for file_obj in self.non_existed: filename = file_obj.filename primary_link = file_obj.primary_url @@ -377,7 +545,20 @@ def download_files( def url_encoded_chars_to_lower_case(url: str) -> str: - """Change %-escaped chars in string to lower case.""" + """Change %-escaped chars in string to lower case. + + Args: + url (str): URL with uppercase unicode characters. + + Returns: + str: URL with lowercase unicode characters. + + Example: + >>> import lep_downloader.downloader + >>> url = "https://teacherluke.co.uk/2016/03/01/333-more-misheard-lyrics-%E2%99%AC/" + >>> lep_downloader.downloader.url_encoded_chars_to_lower_case(url) + 'https://teacherluke.co.uk/2016/03/01/333-more-misheard-lyrics-%e2%99%ac/' + """ # noqa: E501,B950 lower_url = URL_ENCODED_CHARS_PATTERN.sub( lambda matchobj: matchobj.group(0).lower(), url ) @@ -385,7 +566,14 @@ def url_encoded_chars_to_lower_case(url: str) -> str: def extract_urls_from_episode_list(episodes: LepEpisodeList) -> Dict[str, str]: - """Extract page URL and its title for each episode object in list.""" + """Extract page URL and its title for each episode object in list. + + Args: + episodes (LepEpisodeList): List of episodes. + + Returns: + dict[str, str]: Dictionary "URL - post title". + """ urls_titles = { url_encoded_chars_to_lower_case(ep.url): ep.post_title for ep in episodes } diff --git a/src/lep_downloader/exceptions.py b/src/lep_downloader/exceptions.py index 2b0dd0d..4467b40 100644 --- a/src/lep_downloader/exceptions.py +++ b/src/lep_downloader/exceptions.py @@ -20,6 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Module for LEP custom exceptions.""" +from typing import Any class LepExceptionError(Exception): @@ -31,65 +32,77 @@ class LepExceptionError(Exception): class NoEpisodeLinksError(LepExceptionError): """Raised when no valid episode links on page. - Attributes: - url (str): URL which has no episode links. Default is '' - message (str): Explanation of the error. Default is '' + Args: + url (str): URL which has no episode links. Default is empty string. + message (str): Explanation of the error. Default is empty string. """ def __init__(self, url: str = "", message: str = "") -> None: """Initialize NoEpisodeLinksError exception.""" - self.url = url - self.message = message + #: URL which has no episode links. + self.url: str = url + #: Explanation of the error. + self.message: str = message class NotEpisodeURLError(LepExceptionError): """Raised when given URL is not episode / archive URL. - Attributes: - url (str): URL which has
tag. Default is '' - message (str): Explanation of the error. Default is '' + Args: + url (str): URL which has no
tag. Default is empty string. + message (str): Explanation of the error. Default is empty string. """ def __init__(self, url: str = "", message: str = "") -> None: """Initialize NotEpisodeURLError exception.""" - self.url = url - self.message = message + #: URL which has no
tag. + self.url: str = url + #: Explanation of the error. + self.message: str = message class LepEpisodeNotFoundError(LepExceptionError): """Raised when given episode URL is not available. - Attributes: - episode (LepEpisode): Episode object. - Partially filled to add as 'bad' episode. - message (str): Explanation of the error. Default is '' + First argument serves to pass partially filled episode instance, + in order to add it as 'bad' episode. + + Args: + episode (LepEpisode): Episode instance. + message (str): Explanation of the error. Default is empty string. """ - def __init__(self, episode: object, message: str = "") -> None: + def __init__(self, episode: Any, message: str = "") -> None: """Initialize NotEpisodeURLError exception.""" - self.bad_episode = episode - self.message = message + from lep_downloader.lep import LepEpisode + + #: Episode instance. + self.bad_episode: LepEpisode = episode + #: Explanation of the error. + self.message: str = message class DataBaseUnavailableError(LepExceptionError): """Raised when JSON database file is not available. - Attributes: - message (str): Explanation of the error. Default is '' + Args: + message (str): Explanation of the error. Default is empty string. """ def __init__(self, message: str = "") -> None: """Initialize DataBaseUnavailable exception.""" - self.message = message + #: Explanation of the error. + self.message: str = message class NoEpisodesInDataBaseError(LepExceptionError): # pragma: no cover for Python 3.10 """Raised when JSON database has no any valid episode. - Attributes: - message (str): Explanation of the error. Default is '' + Args: + message (str): Explanation of the error. Default is empty string. """ def __init__(self, message: str = "") -> None: """Initialize NoEpisodesInDataBase exception.""" - self.message = message + #: Explanation of the error. + self.message: str = message diff --git a/src/lep_downloader/lep.py b/src/lep_downloader/lep.py index 15643c3..8df1766 100644 --- a/src/lep_downloader/lep.py +++ b/src/lep_downloader/lep.py @@ -25,8 +25,6 @@ import sys from dataclasses import dataclass from datetime import datetime -from datetime import time -from datetime import timedelta from datetime import timezone from functools import partial from functools import total_ordering @@ -37,6 +35,7 @@ from typing import Dict from typing import List from typing import Optional +from typing import Tuple from typing import Union import requests @@ -46,6 +45,8 @@ from lep_downloader.exceptions import DataBaseUnavailableError +default_episode_datetime = datetime(2000, 1, 1, tzinfo=timezone.utc) + # COMPILED REGEX PATTERNS # INVALID_PATH_CHARS_PATTERN = re.compile(conf.INVALID_PATH_CHARS_RE) @@ -63,16 +64,44 @@ def stdout_formatter(record: Any) -> str: - """Return formatter string for console sink.""" + """Return formatter string for console sink. + + Args: + record (Any): Loguru's record dict. + + Returns: + Format string for stdout log + ``"{message}" + end`` + + Notes: + Controling ending character for log message by + storing it in the 'extra' dict and changing later via bind(). + Default is the newline character. + """ end: str = record["extra"].get("end", "\n") return "{message}" + end def logfile_formatter(record: Any) -> str: - """Return formatter string for console sink.""" + """Return formatter string for log file sink. + + Args: + record (Any): Loguru's record dict. + + Returns: + Format string for log file + ``{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | "{message}" + LF`` + LF - newline character here. + + Note: + .. code-block:: text + + 2022-02-25 07:20:48.909 | PRINT | Running script...⏎ + 2022-02-25 07:20:48.917 | PRINT | Starting parsing... + + """ date = "{time:YYYY-MM-DD HH:mm:ss.SSS} | " level = "{level: <8} | " - # name = "{name} - " # Always the same in my case return date + level + "{message}" + "\n" @@ -80,8 +109,16 @@ def init_lep_log( debug: bool = False, logfile: str = conf.DEBUG_FILENAME, ) -> Any: - """Create custom log after modules initialization.""" - # global lep_log + """Create custom logger object. + + Args: + debug (bool): Debug log or not. Defaults to False. + logfile (str): Name of the logfile. + Defaults to :const:`config.DEBUG_FILENAME` = "_lep_debug_.log" + + Returns: + Custom loguru.logger object + """ lep_log = logger lep_log.remove() file_log = Path(logfile) @@ -107,45 +144,47 @@ class LepEpisode: Args: episode (int): Episode number. - date (str | datetime | None): Post datetime (default 2000-01-01T00:00:00+00:00). - It will be converted to UTC timezone. For None value default is set. - url (str): Final location of post URL. + date (str | datetime): Post datetime. + It will be converted to aware `datetime` object (with timezone). + If None, defaults to `datetime` equaling "2000-01-01T00:00:00+00:00". + url (str): Final location of web post URL. post_title (str): Post title - extracted from tag text and converted to be safe for Windows path. + extracted from link text (unsafe). post_type (str): Post type ("AUDIO", "TEXT", etc.). - files (dict | None): Dictionary with files for episode. - Category file ("audios", "audiotrack", "page_pdf", etc) as 'files' key. + files (dict | None): Dictionary with files for episode. Each key of it is + a file category ("audios", "audiotrack", "page_pdf", etc). + If None defaults to empty dict. parsed_at (str): Parsing datetime in UTC timezone, with microseconds. - index (int): Parsing index - concatenation of URL date and increment (for several posts in a day). + index (int): Parsing index, + concatenation of date from URL and increment (for several posts in a day). admin_note (str): Note for administrator - and storing error message (for bad response) + and storing error message (for bad response during parsing) updated_at (str): Datetime in UTC when episode was updated - (usually manually by admin) - html_title (str): Page title in HTML tag . - Important: Not stored in JSON database. + (usually manually by admin). + html_title (str): Page title extracted from HTML tag <title>. + **Important:** Not stored in JSON database. """ - def _convert_date(self, date: Union[datetime, str, None]) -> datetime: - """Convert string date to datetime object and UTC timezone. + def _convert_date(self, date: Union[datetime, str]) -> Tuple[datetime, str]: + """Convert string datetime to aware datetime object. - Input format: 2000-01-01T00:00:00+00:00 - If datetime is passed, then only convert date to UTC timezone. + String datetime format: 2000-01-01T00:00:00+00:00 + If `datetime` is passed, it will be set "as-is". """ + converted_date = default_episode_datetime + short_date: str = converted_date.strftime(r"%Y-%m-%d") if isinstance(date, str): converted_date = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S%z") - converted_date = converted_date.astimezone(timezone.utc) + short_date = converted_date.strftime(r"%Y-%m-%d") else: - if date is not None: # To satisfy 'typeguard' check - converted_date = date.astimezone(timezone.utc) - else: - converted_date = datetime(2000, 1, 1, tzinfo=timezone.utc) - return converted_date + short_date = date.strftime(r"%Y-%m-%d") + converted_date = date + return converted_date, short_date def __init__( self, episode: int = 0, - date: Union[datetime, str, None] = None, + date: Any = default_episode_datetime, url: str = "", post_title: str = "", post_type: str = "", @@ -169,22 +208,35 @@ def __init__( self.admin_note = admin_note self.updated_at = updated_at self._title = html_title - self._short_date = "" @property def date(self) -> Any: - """Episode date.""" + """Episode datetime (with timezone). + + To be accurate, posting datetime on the website. + """ return self._date @date.setter - def date(self, new_post_date: Union[datetime, str, None]) -> None: + def date(self, new_post_date: Union[datetime, str]) -> None: """Episode date setter.""" - self._date = self._convert_date(new_post_date) - self._short_date = self._date.strftime(r"%Y-%m-%d") + self._date, self._short_date = self._convert_date(new_post_date) + + @property + def short_date(self) -> str: + """Episode short date. + + It's the same as posting date in the episode URL, + just formatted as "YYYY-MM-DD". + """ + return self._short_date @property def post_title(self) -> str: - """Post title (safe to use as filename).""" + """Post title converted to be safe for Windows path (filename). + + Conversion via :func:`replace_unsafe_chars`. + """ return self._post_title @post_title.setter @@ -217,22 +269,55 @@ def __repr__(self) -> str: class LepEpisodeList(List[Any]): - """Represent list of LepEpisode objects.""" + """Represent list of LepEpisode objects. + + Attributes: + default_start_date (datetime): Min date. + It's equal to "1999-01-01T00:01:00+00:00" + default_end_date (datetime): Max date. + It's equal to "2999-12-31T23:55:00+00:00" + """ def desc_sort_by_date_and_index(self) -> Any: - """Return new sorted list by post datetime, then index.""" + """Sort LepEpisodeList by post datetime. + + Returns: + :class:`LepEpisodeList`: New sorted LepEpisodeList. + + Notes: + Sort is descending (last by date will be first). + Sort goes by two attrs: "date" and "index". + """ sorted_episodes = LepEpisodeList( sorted(self, key=attrgetter("date", "index"), reverse=True) ) return sorted_episodes def filter_by_type(self, type: str) -> Any: - """Return new filtered list with passed episode type (AUDIO, TEXT, etc).""" + """Filter list by episode type. + + Args: + type (str): Episode type ("AUDIO", "TEXT", etc) + + Returns: + :class:`LepEpisodeList`: New filtered LepEpisodeList. + """ filtered = LepEpisodeList(ep for ep in self if ep.post_type == type) return filtered def filter_by_number(self, start: int, end: int) -> Any: - """Return new filtered list by episode number.""" + """Filter list by episode number. + + Args: + start (int): Episode number (left bound) + end (int): Episode number (right bound) + + Returns: + :class:`LepEpisodeList`: New filtered LepEpisodeList. + + Notes: + If end < start - they are swapped. + """ if start > end: start, end = end, start filtered = LepEpisodeList( @@ -248,25 +333,34 @@ def filter_by_date( start: Optional[datetime] = None, end: Optional[datetime] = None, ) -> Any: - """Return new filtered list by episode (post) date.""" + """Filter list by episode date. + + Args: + start (datetime, optional): Episode date (left bound). + If start is None, defaults to min date + :class:`LepEpisodeList.default_start_date`. + end (datetime, optional): Episode date (right bound). + If end is None, defaults to max date + :class:`LepEpisodeList.default_end_date`. + + Returns: + :class:`LepEpisodeList`: New filtered LepEpisodeList. + + Notes: + If end < start - they are swapped. + """ start = start if start else self.default_start_date end = end if end else self.default_end_date + if start.date() > end.date(): start, end = end, start - start_aware = datetime.combine( - start.date(), - time(0, 1), # Begining of a day - tzinfo=timezone(timedelta(hours=2)), - ) - end_aware = datetime.combine( - end.date(), - time(23, 55), # End (almost) of a day - tzinfo=timezone(timedelta(hours=2)), - ) filtered = LepEpisodeList( - ep for ep in self if ep.date >= start_aware and ep.date <= end_aware + ep + for ep in self + if ep.date.date() >= start.date() and ep.date.date() <= end.date() ) + return filtered @@ -274,12 +368,19 @@ class LepJsonEncoder(json.JSONEncoder): """Custom JSONEncoder for LepEpisode objects.""" def default(self, obj: Any) -> Any: - """Override 'default' method for encoding JSON objects.""" + """Override 'default' method for encoding JSON objects. + + Args: + obj (Any): Object for encoding. + + Returns: + Any: If object is :class:`LepEpisode` returns dict. + Otherwise, TypeError exception is raised. + """ if isinstance(obj, LepEpisode): - date_0200_zone = obj.date.astimezone(timezone(timedelta(hours=2))) return { "episode": obj.episode, - "date": date_0200_zone.strftime(r"%Y-%m-%dT%H:%M:%S%z"), + "date": obj.date.strftime(r"%Y-%m-%dT%H:%M:%S%z"), "url": obj.url, "post_title": obj.post_title, "post_type": obj.post_type, @@ -294,12 +395,24 @@ def default(self, obj: Any) -> Any: def as_lep_episode_obj(dct: Dict[str, Any]) -> Any: - """Specialize JSON objects decoding.""" + """Specialize JSON objects decoding. + + Args: + dct (dict): Dictionary object from JSON + (including nested dictionaries). + + Returns: + Any: :class:`LepEpisode` object or None. + + Notes: + If dictionary is empty or has "audios" key it's returned "as-is". + Returns None if TypeError was raised. + + """ if dct == {} or ("audios" in dct): return dct try: lep_ep = LepEpisode(**dct) - lep_ep._short_date = lep_ep.date.strftime(r"%Y-%m-%d") except TypeError: # Message only to log file Lep.cls_lep_log.msg("Invalid object in JSON: {dct}", dct=dct, msg_lvl="WARNING") @@ -310,10 +423,22 @@ def as_lep_episode_obj(dct: Dict[str, Any]) -> Any: @dataclass class LepLog: - """Represent LepLog object.""" + """Represent LepLog object. + + Args: + debug (bool): Debug mode flag. Defaults to False. + logfile (str): Name of log file. + Defaults to :const:`config.DEBUG_FILENAME` = "_lep_debug_.log". + + Attributes: + debug (bool): Debug mode flag (True / False). + logfile (str): Name of log file. + lep_log (loguru.logger): Custom *loguru.logger* object, + which is returned from :func:`init_lep_log`. + """ debug: bool = False - logfile: str = conf.DEBUG_FILENAME # Default is '_lep_debug_.log' + logfile: str = conf.DEBUG_FILENAME def __post_init__(self) -> None: """Create logger for LepLog instance.""" @@ -331,9 +456,27 @@ def msg( ) -> None: """Output message to console or log file. - If DEBUG = True duplicates all console messaged to log file (level PRINT). - Also add records (messages) for other log levels. - """ + Args: + msg (str): Message to output. Supports + `loguru <https://loguru.readthedocs.io/en/stable/api/logger.html#color>`__ + color markups. + skip_file (bool): Flag to skip writing to logfile (even in Debug mode). + Defaults to False. + one_line (bool): Flag to replace new line character + with Unicode char of it, i.e. ⏎. Defaults to True. + msg_lvl (str): Message level. Defaults to "PRINT". + wait_input (bool): Flag to stay on line after printing message to console. + Defaults to False. + kwargs (Any): Arbitrary keyword arguments. + + Notes: + If Debug mode is False and message level is "PRINT", + method outputs to console only. + Otherwise, it duplicates all console messages to log file too + (with level PRINT). + Also records (messages) for other log levels goes into file + (if `skip_file` is not True). + """ # noqa: E501,B950 if msg_lvl == "PRINT" and not self.debug: if wait_input: self.lep_log.bind(to_console=True, end="").info(msg, **kwargs) @@ -358,7 +501,19 @@ def msg( class Lep: - """Represent base class for general attributes.""" + """Represent base class for LEP's general attributes and methods. + + Args: + session (requests.Session, optional): Global session for descendants. + log (:class:`LepLog`, optional): Log object where to output messages. + + Attributes: + cls_session (requests.Session): Class session. + Default is taken from module variable :const:`PROD_SES` + cls_lep_log (LepLog): Class log object where to output messages. + Default is LepLog() - only **stdout** output. + json_body (str): Content of JSON database file. + """ cls_session: ClassVar[requests.Session] = requests.Session() json_body: ClassVar[str] = "" @@ -369,12 +524,7 @@ def __init__( session: Optional[requests.Session] = None, log: Optional[LepLog] = None, ) -> None: - """Default instance of LepTemplate. - - Args: - session (requests.Session): Global session for descendants. - log (LepLog): Log instance of LepLog class where to output message. - """ + """Default instance of Lep class.""" self.session = session if session else PROD_SES self.lep_log = log if log else LepLog() Lep.cls_session = self.session @@ -385,8 +535,20 @@ def get_web_document( cls, page_url: str, session: Optional[requests.Session] = None, - ) -> Any: - """Return text content of web document (HTML, JSON, etc.).""" + ) -> Tuple[str, str, bool]: + """Get text content of web document (HTML, JSON, etc.). + + Args: + page_url (str): URL for getting text response. + session (requests.Session, optional): Session object + to send request. Default is :class:`Lep.cls_session`. + + Returns: + A tuple (resp.text, final_location, is_url_ok) where + - resp.text (str) is text content of URL response + - final_location (str) is location after all redirections + - is_url_ok (bool) is flag of URL status + """ session = session if session else cls.cls_session final_location = page_url is_url_ok = False @@ -419,7 +581,16 @@ def extract_only_valid_episodes( json_body: str, json_url: Optional[str] = None, ) -> LepEpisodeList: - """Return list of valid (not None) LepEpisode objects.""" + """Return list of valid (not None) LepEpisode objects. + + Args: + json_body (str): Content of JSON database file. + json_url (str, optional): JSON URL, only for printing it to output. + + Returns: + :class:`LepEpisodeList`: List of :class:`LepEpisode` objects. + It's empty if there are no valid objects at all. + """ db_episodes = LepEpisodeList() try: db_episodes = json.loads(json_body, object_hook=as_lep_episode_obj) @@ -448,7 +619,19 @@ def get_db_episodes( json_url: str, session: Optional[requests.Session] = None, ) -> LepEpisodeList: - """Get valid episode list by passed URL.""" + """Get valid episodes from JSON. + + Args: + json_url (str): URL to JSON database file. + session (requests.Session, optional): Session object + to send request. Default is :class:`Lep.cls_session`. + + Returns: + :class:`LepEpisodeList`: + + Raises: + DataBaseUnavailableError: if JSON is unavailable + """ session = session if session else cls.cls_session db_episodes = LepEpisodeList() cls.json_body, _, status_db_ok = Lep.get_web_document(json_url, session) @@ -460,5 +643,19 @@ def get_db_episodes( def replace_unsafe_chars(filename: str) -> str: - """Replace most common invalid path characters with '_'.""" + """Replace most common invalid path characters with '_'. + + Args: + filename (str): Filename (should be a string representing + the final path component) without the drive and root. + + Returns: + Safe name for writing file on Windows OS (and others). + + Example: + >>> import lep_downloader.lep + >>> unsafe = "What/ will: be* replaced?.mp3" + >>> lep_downloader.lep.replace_unsafe_chars(unsafe) + 'What_ will_ be_ replaced_.mp3' + """ return INVALID_PATH_CHARS_PATTERN.sub("_", filename) diff --git a/src/lep_downloader/parser.py b/src/lep_downloader/parser.py index 634c7a8..d3da689 100644 --- a/src/lep_downloader/parser.py +++ b/src/lep_downloader/parser.py @@ -53,7 +53,20 @@ class Archive(Lep): - """Represent archive page object.""" + """Represent archive page object. + + Args: + url (str): URL to LEP Archive page. Defaults to :const:`config.ARCHIVE_URL`. + session (requests.Session): Session to send requests. + If None, defaults to super's (global) session from :const:`lep.PROD_SES`. + mode (str): Parsing mode ("raw" | "fetch" | "pull"). Defaults to "fetch". + with_html (bool): Flag to save HTML file for parsed web page. + Defaults to False. + html_path (str, optional): Path to folder where HTML files will be saved. + If None, it will be later replaced with :const:`config.PATH_TO_HTML_FILES`. + log (LepLog, optional): Log instance. If None, global (super's) value LepLog() + will be set (output to console only). + """ def __init__( self, @@ -66,23 +79,54 @@ def __init__( ) -> None: """Initialize an archive instance.""" super().__init__(session, log) - self.url = url - self.parser = ArchiveParser(self, self.url, log=self.lep_log) + #: URL to LEP Archive page. + self.url: str = url + + #: Parser instance. + self.parser: ArchiveParser = ArchiveParser(self, self.url, log=self.lep_log) + + #: Valid episodes links on archive page. self.collected_links: Dict[str, str] = {} + + #: Deleted (invalid) links. self.deleted_links: Set[str] = set() + + #: Set of indexes. self.used_indexes: Set[int] = set() + + #: List of archive episodes. self.episodes: LepEpisodeList = LepEpisodeList() - self.mode = mode - self.with_html = with_html - self.html_path = html_path - def fetch_updates( + #: Parsing mode. + self.mode: str = mode + + #: Flag to save HTML files. + self.with_html: bool = with_html + + #: Path to folder for saving HTMLs. + self.html_path: Optional[str] = html_path + + def take_updates( self, db_urls: Dict[str, str], archive_urls: Optional[Dict[str, str]] = None, mode: str = "fetch", ) -> Any: - """Fetch only new URLs between database and archive page.""" + """Take differing URLs between database and archive page. + + Difference is determined according to parsing mode: + "fetch" or "pull". + + Args: + db_urls (Dict[str, str]): URLs dictionary of database. + archive_urls (Dict[str, str], optional): URLs dictionary of archive. + If None, takes attribute dictionary 'collected_links'. + mode (str): Parsing mode. Defaults to "fetch". + + Returns: + Any: Difference dictionary or None (for "fetch" + mode when database contains more episodes than archive). + """ archive_urls = archive_urls if archive_urls else self.collected_links if mode == "pull": # Take any archive url which is not in database urls @@ -108,14 +152,13 @@ def parse_each_episode( self, urls: Dict[str, str], ) -> None: - """Parse each episode collected from archive page. + """Parse each episode in dictionary of URLs. - In for cycle: reversed collected links in order to start - from first episode to last in parsing action. + Args: + urls (Dict[str, str]): Dictionary of differing URLs + (or all URLs in case of "raw" mode). """ - # mypy warns only for Python 3.7.0 and below. - # https://docs.python.org/3/library/typing.html#typing.Reversible - for url, text in reversed(urls.items()): + for url, text in reversed(urls.items()): # from first episode to last try: ep_parser = EpisodeParser(self, url, post_title=text, log=self.lep_log) ep_parser.parse_url() @@ -124,7 +167,7 @@ def parse_each_episode( "<g>done:</g> {title}", title=ep_parser.episode.post_title ) if self.with_html: - short_date = ep_parser.episode._short_date + short_date = ep_parser.episode.short_date post_title = ep_parser.episode.post_title file_stem = f"[{short_date}] # {post_title}" self.write_text_to_html( @@ -158,7 +201,18 @@ def do_parsing_actions( json_url: str, json_name: str = "", ) -> None: - """Main methdod to do parsing job.""" + """Do parsing job. + + Args: + json_url (str): URL to remote JSON database. + json_name (str): Name for JSON local file (with parsing results). + + Returns: + None: + + Raises: + NoEpisodesInDataBaseError: If JSON database has no episodes at all. + """ updates: Optional[Dict[str, str]] = {} all_episodes = LepEpisodeList() @@ -174,7 +228,7 @@ def do_parsing_actions( lep_dl.get_remote_episodes() if lep_dl.db_episodes: - updates = self.fetch_updates( + updates = self.take_updates( lep_dl.db_urls, self.collected_links, self.mode ) if updates is None: # For fetch mode this is not good. @@ -209,7 +263,16 @@ def write_text_to_html( path: Optional[str] = None, ext: str = ".html", ) -> None: - """Write text (content) to HTML file.""" + """Write text to HTML file. + + Args: + text (str): Text (HTML content) to be written to file. + file_stem (str): Name of the file (without extension). + path (str, optional): Folder path where HTML files will be saved. + If None, defaults to :const:`config.PATH_TO_HTML_FILES` + (it's nested folder ``./data_dump`` in app folder). + ext (str): Extension for HTML file. Defaults to ".html". + """ path = path if path else conf.PATH_TO_HTML_FILES filename = file_stem + ext filename = lep.replace_unsafe_chars(filename) @@ -235,7 +298,16 @@ def write_text_to_html( def is_tag_a_repeated(tag_a: Tag) -> bool: - """Returns True for appropriate link to episode.""" + """Check link to episode for repetition. + + Repetitions are revealed in advance and placed in regex. + + Args: + tag_a (Tag): Tag object (<a>). + + Returns: + bool: True for repeated link, False otherwise. + """ tag_text = tag_a.get_text() is_repeated = False match = DUPLICATED_EP_PATTERN.search(tag_text.strip()) @@ -244,7 +316,15 @@ def is_tag_a_repeated(tag_a: Tag) -> bool: def parse_post_publish_datetime(soup: BeautifulSoup) -> str: - """Returns post datetime as string.""" + """Extract value from HTML's <time> tag. + + Args: + soup (BeautifulSoup): Parsed HTML document. + + Returns: + str: Post datetime. If <time> tag is not found + returns default value ``1999-01-01T01:01:01+02:00``. + """ date_value: str = "" tag_entry_datetime = soup.find("time", class_="entry-date") if tag_entry_datetime is not None: @@ -255,7 +335,15 @@ def parse_post_publish_datetime(soup: BeautifulSoup) -> str: def parse_episode_number(post_title: str) -> int: - """Returns episode number.""" + """Parse episode number from post title. + + Args: + post_title (str): Post title (link text). + + Returns: + int: Episode number. If number is not found, + returns 0. + """ match = BEGINING_DIGITS_PATTERN.match(post_title) if match: return int(match.group()) @@ -264,7 +352,16 @@ def parse_episode_number(post_title: str) -> int: def generate_post_index(post_url: str, indexes: Set[int]) -> int: - """Returns index number for post.""" + """Generate index number for post from URL. + + Args: + post_url (str): URL to episode. + indexes (Set[int]): Already used indexes. + + Returns: + int: Index number. If URL is not valid, + returns 0. + """ match = EP_LINK_PATTERN.match(post_url) if match: groups_dict = match.groupdict() @@ -286,7 +383,16 @@ def generate_post_index(post_url: str, indexes: Set[int]) -> int: def has_tag_a_appropriate_audio(tag_a: Tag) -> bool: - """Returns True for appropriate link to audio.""" + """Check link text for "download" audio purpose. + + Key words are revealed in advance and placed in regex. + + Args: + tag_a (Tag): Tag object (<a>). + + Returns: + bool: True for appropriate link, False otherwise. + """ tag_text = tag_a.get_text() if "http" in tag_text: return False @@ -298,7 +404,15 @@ def has_tag_a_appropriate_audio(tag_a: Tag) -> bool: def parse_post_audio(soup: BeautifulSoup) -> List[List[str]]: - """Returns list of lists with links to audio.""" + """Find links to audio(s) on episode page. + + Args: + soup (BeautifulSoup): Parsed HTML document of episode page. + + Returns: + List[List[str]]: list of lists (for multi-part episode) + with links to audio (or part). + """ audios: List[List[str]] = [] soup_a_only = BeautifulSoup( @@ -324,7 +438,15 @@ def parse_post_audio(soup: BeautifulSoup) -> List[List[str]]: def extract_date_from_url(url: str) -> str: - """Parse date from URL in YYYY/MM/DD format.""" + """Parse date from URL. + + Args: + url (str): URL to episode. + + Returns: + str: Date in YYYY/MM/DD format. If date is not found, + returns empty string. + """ match = EP_LINK_PATTERN.match(url) if match: groups_dict = match.groupdict() @@ -335,7 +457,14 @@ def extract_date_from_url(url: str) -> str: def convert_date_from_url(url: str) -> datetime: - """Convert URL string with date YYYY/MM/DD to datetime object.""" + """Extract date from URL and then convert it to 'datetime' object. + + Args: + url (str): URL to episode. + + Returns: + datetime: `Naive` datetime. + """ url_date = extract_date_from_url(url) return datetime.strptime(url_date, r"%Y/%m/%d") @@ -344,18 +473,31 @@ def write_parsed_episodes_to_json( lep_objects: LepEpisodeList, json_path: str = "", ) -> None: - """Write list of LepEpisode objects to file.""" + """Serialize list of episodes to JSON file. + + Args: + lep_objects (LepEpisodeList): List of LepEpisode objects. + json_path (str): Path to JSON file. Defaults to empty string. + """ if Path(json_path).is_dir(): filepath = Path(json_path) / conf.DEFAULT_JSON_NAME else: filepath = Path(json_path) with open(filepath, "w") as outfile: - # json.dump(lep_objects, outfile, indent=4, cls=LepJsonEncoder) json.dump(lep_objects, outfile, separators=(",", ":"), cls=LepJsonEncoder) class LepParser(Lep): - """Base class for LEP archive parsers.""" + """Base class for LEP parsers. + + Args: + archive_obj (Archive): Archive instance. + url (str): Target page URL. + session (requests.Session): Parsing session. Defaults to None. + If None, takes global session from :const:`lep.PROD_SES`. + log (LepLog, optional): Log instance to output parsing messages. + Defaults to None. + """ def __init__( self, @@ -375,30 +517,57 @@ def __init__( log (LepLog): Log instance of LepLog class where to output message. """ super().__init__(session, log) + + #: Archive instance. self.archive = archive_obj + + #: Target page URL. self.url = url + + #: Page content. self.content: str = "" + + #: Parsed HTML as BeautifulSoup object. self.soup: BeautifulSoup = None + + #: Final location of target URL. In case of redirects. self.final_location: str = self.url + + #: URL getting status. self.is_url_ok: bool = False def get_url(self) -> None: - """Retrive text content of archive web page.""" + """Retrive target web page. + + Method result are saved in attributes: + + - content + - final_location + - is_url_ok + """ get_result = Lep.get_web_document(self.url, self.session) self.content = get_result[0] self.final_location = get_result[1] self.is_url_ok = get_result[2] def do_pre_parsing(self) -> None: - """Extract useful data before and prepare for parsing. + """Prepare for parsing. + + It might be: extracting data from URL, clearing / replacement tags, etc. - For archive page - Substitute link with '.ukm' misspelled TLD. - For episode page - Generate index, parsed_at; parse episode number, etc. + Raises: + NotImplementedError: This method must be implemented. """ raise NotImplementedError() def parse_dom_for_article_container(self) -> None: - """Parse DOM for <article> tag only.""" + """Parse DOM for HTML's <article> tag only. + + This is common step for parsers. + + Raises: + NotEpisodeURLError: If target page has now HTML's <article> tag. + """ self.soup = BeautifulSoup(self.content, "lxml", parse_only=only_article_content) if len(self.soup) < 2: # tag DOCTYPE always at [0] position self.lep_log.msg("No 'DOCTYPE' or 'article' tag", msg_lvl="CRITICAL") @@ -408,15 +577,23 @@ def parse_dom_for_article_container(self) -> None: ) def collect_links(self) -> None: - """Parse all links matching regex pattern.""" + """Parse all links by parser own rules. + + Raises: + NotImplementedError: This method must be implemented. + """ raise NotImplementedError() def do_post_parsing(self) -> None: - """Finalize and process parsing results.""" + """Finalize and process parsing results. + + Raises: + NotImplementedError: This method must be implemented. + """ raise NotImplementedError() def parse_url(self) -> None: - """Perform parsing.""" + """Perform parsing steps.""" self.get_url() self.do_pre_parsing() self.parse_dom_for_article_container() @@ -425,7 +602,16 @@ def parse_url(self) -> None: class ArchiveParser(LepParser): - """Parser object for archive page.""" + """Parser for archive page. + + Args: + archive_obj (Archive): Instance of Archive object + to put and use data in its containers attributes. + url (str): URL for parsing. + session (requests.Session): Requests session object. + If None, get default global session. + log (LepLog): Log instance of LepLog class where to output message. + """ def do_pre_parsing(self) -> None: """Substitute link with '.ukm' misspelled TLD in HTML content.""" @@ -434,8 +620,13 @@ def do_pre_parsing(self) -> None: def collect_links(self) -> None: """Parse all links matching episode URL and their texts. - Also remove duplicated links. If an archive page consists completely of - duplicated links - silently go further (as if there are no episodes at all). + Ignoring repeated links. + One more case is unlikely to be true, but + if an archive page consists **completely** of repeated links, + method silently skips them (as if there were no episodes at all). + + Raises: + NoEpisodeLinksError: If there are no episode links on archive page. """ soup_a_only = BeautifulSoup( str(self.soup), @@ -460,9 +651,9 @@ def collect_links(self) -> None: def remove_irrelevant_links(self) -> None: """Delete known irrelevant links from dictionary. - First, write irrelevant links into 'deleted_links' - before deletion them from dictionary. - Then rebuild dictionary skipping irrelevant links. + First, irrelevant links is saved into 'deleted_links' attribute + before deletion them from dictionary. + Then dictionary is rebuilt ignoring irrelevant links. """ self.archive.deleted_links = { link @@ -476,7 +667,7 @@ def remove_irrelevant_links(self) -> None: } def substitute_short_links(self) -> None: - """Paste final URL destination instead of short links.""" + """Paste final URL location instead of short links.""" for short, final in conf.SHORT_LINKS_MAPPING_DICT.items(): # Rebuild dictionary changing only matched key self.archive.collected_links = { @@ -491,7 +682,17 @@ def do_post_parsing(self) -> None: class EpisodeParser(LepParser): - """Parser object for episode page.""" + """Parser for episode page. + + Args: + archive_obj (Archive): Archive instance. + page_url (str): Target page URL. + session (requests.Session, optional): Parsing session. Defaults to None. + If None, takes global session from :const:`lep.PROD_SES`. + post_title (str): Link text for this episode. + log (LepLog, optional): Log instance to output parsing messages. + Defaults to None. + """ def __init__( self, @@ -503,12 +704,21 @@ def __init__( ) -> None: """Initialize EpisodeParser object.""" super().__init__(archive_obj, page_url, session, log) + + #: Episode instance. self.episode = LepEpisode() self.episode.post_title = post_title + + #: Used indexes from archive instance. self.used_indexes = archive_obj.used_indexes def do_pre_parsing(self) -> None: - """Parse episode date, number, index.""" + """Parse episode date, number, HTML title and generate index. + + Raises: + NotEpisodeURLError: If URL does not contain date. + LepEpisodeNotFoundError: If URL is not available. + """ self.episode.index = generate_post_index(self.final_location, self.used_indexes) if self.episode.index == 0: raise NotEpisodeURLError(self.final_location, "Can't parse episode number") @@ -533,7 +743,10 @@ def do_pre_parsing(self) -> None: raise LepEpisodeNotFoundError(self.episode) def collect_links(self) -> None: - """Parse link(s) to episode audio(s).""" + """Parse link(s) to episode audio(s). + + Also parse datetime of episode publishing. + """ self.episode.date = parse_post_publish_datetime(self.soup) self.episode.files["audios"] = parse_post_audio(self.soup) if not self.episode.files["audios"]: @@ -542,5 +755,8 @@ def collect_links(self) -> None: self.episode.post_type = "AUDIO" def do_post_parsing(self) -> None: - """Post parsing actions for EpisodeParser.""" + """Post parsing actions for EpisodeParser. + + No actions - just pass. + """ pass diff --git a/tests/test_cli.py b/tests/test_cli.py index 6e53fca..19a8c96 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -21,7 +21,7 @@ def test_cli_prints_version( """It prints version.""" result = run_cli_with_args(["--version"]) assert result.exit_code == 0 - assert "lep-downloader, version 3.0.0a" in result.output + assert "lep-downloader, version 3." in result.output def test_cli_when_no_such_command( diff --git a/tests/test_cli_download.py b/tests/test_cli_download.py index 9a41553..46919ac 100644 --- a/tests/test_cli_download.py +++ b/tests/test_cli_download.py @@ -94,6 +94,7 @@ def test_continue_prompt_yes( mp3_file1_mock: bytes, tmp_path: Path, run_cli_with_args: Any, + monkeypatch: MonkeyPatch, ) -> None: """It downloads files if user answers 'Yes'.""" requests_mock.get( @@ -104,6 +105,7 @@ def test_continue_prompt_yes( "https://traffic.libsyn.com/secure/teacherluke/703._Walaa_from_Syria_-_WISBOLEP_Competition_Winner_.mp3", # noqa: E501,B950 content=mp3_file1_mock, ) + monkeypatch.chdir(tmp_path) result = run_cli_with_args( ["download", "-ep", "703", "-pdf", "-d", f"{tmp_path}"], @@ -124,6 +126,7 @@ def test_continue_prompt_no( json_db_mock: str, tmp_path: Path, run_cli_with_args: Any, + monkeypatch: MonkeyPatch, ) -> None: """It exists if user answers 'No'.""" requests_mock.get( @@ -131,6 +134,8 @@ def test_continue_prompt_no( text=json_db_mock, ) + monkeypatch.chdir(tmp_path) + result = run_cli_with_args(["download", "-ep", "714"], input="No") assert "Do you want to continue? [y/N]: No\n" in result.output assert "Your answer is 'NO'. Exit." in result.output @@ -247,6 +252,33 @@ def test_filtering_for_one_day( assert expected_file_2.exists() +def test_filtering_for_one_day_close_to_midnight( + requests_mock: rm_Mocker, + json_db_mock: str, + mp3_file1_mock: bytes, + tmp_path: Path, + run_cli_with_args: Any, +) -> None: + """It downloads all episodes for certain day.""" + requests_mock.get( + conf.JSON_DB_URL, + text=json_db_mock, + ) + requests_mock.get( + "https://traffic.libsyn.com/secure/teacherluke/714._Robin_from_Hamburg__WISBOLEP_Runner-Up.mp3", # noqa: E501,B950 + content=mp3_file1_mock, + ) + + run_cli_with_args( + ["download", "-S", "2021-04-11", "-E", "2021-04-11", "-q", "-d", f"{tmp_path}"] + ) + + expected_filename_1 = "[2021-04-11] # 714. Robin from Hamburg (WISBOLEP Runner-Up).mp3" # noqa: E501,B950 + expected_file_1 = tmp_path / expected_filename_1 + assert len(list(tmp_path.iterdir())) == 1 + assert expected_file_1.exists() + + def test_filtering_by_start_date( requests_mock: rm_Mocker, json_db_mock: str, diff --git a/tests/test_parser.py b/tests/test_parser.py index 0ac443e..2acd2e5 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -912,7 +912,7 @@ def test_setting_episode_date_as_datetime() -> None: new_date = datetime(2021, 12, 24, 17, 18, 19, tzinfo=t_zone) ep = LepEpisode(date=new_date) as_str = ep.date.strftime(r"%Y-%m-%dT%H:%M:%S%z") - except_date = "2021-12-24T14:18:19+0000" + except_date = "2021-12-24T17:18:19+0300" assert as_str == except_date