From 8115afd58dd485e34f3d1dec7e33fe103fb28d23 Mon Sep 17 00:00:00 2001 From: liwenjian-sensetime <109193776+liwenjian-sensetime@users.noreply.github.com> Date: Wed, 19 Jun 2024 17:25:44 +0800 Subject: [PATCH 1/7] add badges (#19) --- README.ENG.md | 5 +++++ README.md | 5 +++++ docs/LazyLLM-logo.png | Bin 0 -> 67482 bytes 3 files changed, 10 insertions(+) create mode 100644 docs/LazyLLM-logo.png diff --git a/README.ENG.md b/README.ENG.md index 41b27054..1ee46a16 100644 --- a/README.ENG.md +++ b/README.ENG.md @@ -1,9 +1,14 @@ +
+ +
+ # LazyLLM: A Low-code Development Tool For Building Multi-agent LLMs Applications. [中文](README.md) | [EN](README.ENG.md) [![CI](https://github.com/LazyAGI/LazyLLM/actions/workflows/main.yml/badge.svg)](https://github.com/LazyAGI/LazyLLM/actions/workflows/main.yml) [![License](https://img.shields.io/badge/License-Apache_2.0-yellow.svg)](https://opensource.org/license/apache-2-0) [![GitHub star chart](https://img.shields.io/github/stars/LazyAGI/LazyLLM?style=flat-square)](https://star-history.com/#LazyAGI/LazyLLM) +[![](https://dcbadge.vercel.app/api/server/cDSrRycuM6?compact=true&style=flat)](https://discord.gg/cDSrRycuM6) ## What is LazyLLM? diff --git a/README.md b/README.md index 33eb6b4e..522101d8 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,14 @@ +
+ +
+ # LazyLLM: 低代码构建多Agent大模型应用的开发工具 [中文](README.md) | [EN](README.ENG.md) [![CI](https://github.com/LazyAGI/LazyLLM/actions/workflows/main.yml/badge.svg)](https://github.com/LazyAGI/LazyLLM/actions/workflows/main.yml) [![License](https://img.shields.io/badge/License-Apache_2.0-yellow.svg)](https://opensource.org/license/apache-2-0) [![GitHub star chart](https://img.shields.io/github/stars/LazyAGI/LazyLLM?style=flat-square)](https://star-history.com/#LazyAGI/LazyLLM) +[![](https://dcbadge.vercel.app/api/server/cDSrRycuM6?compact=true&style=flat)](https://discord.gg/cDSrRycuM6) ## 一、简介 diff --git a/docs/LazyLLM-logo.png b/docs/LazyLLM-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a569d14fa395e430e368a282dc546d026f9b93d7 GIT binary patch literal 67482 zcma&N1yG#9(kM!B3GNyK!Ce=J;O_43u()gR;K2zJ+}+*Xg1fuB+uJ1Pp8wugw_ZJJ zfp5N@>6z~7?rEDad08<;cszJ8Ffc?3abZO;Fz`0e^9n3D=N3(?MnD^S17o0}3B8+*9jG@L7!SXjoq>^+i4(D*iJ65h zFU5IF2L-W(F)xK0n+&6josfySg}8@qR1Ec=} z`ZwhN1fvXeHZuRSG(P74_WPfAfBW(<{Gpb$#XltfHvWU^rr!o%>t&HsPF_unfJw}SYV;lJDsYW!C|Ol(0y;Q-?P07hCaFfd{;3E{8G zZfPg!FkTq*{ufl`jbASNn!-D3;#$7Q((gLxrB3KM*q?B)>%hi<-L82u6cWSvdq8{^ z5ET3cFIafLc3fOmyEJ>Z_Ix&aCi%>Jij$6jVld#BcmK{CJUP0Y0=RwMZvy~ba6Y5J zLW27Xun$C0q=Wr^Bwok)1rQ+q^L~-`H~!xTe`~A*#Q*d}xPgX+td3OI(qh5+sQ-|Q z4({)z^#;RCyM}ig$UsR0fIqK4Et?hn}&wQ$j~r2IJgr-A6ZoC zGb9EqtZ|$G&`*F^>rPf}fB&T=P4kZ^gQOqYK7jjM zdoyxw*l{qlUUZjKv_G|-djs#!R-f}~aoN|4Atv@Hq3JZCK@@M%mHXrsNVUP$G z48RL}+P$BBUba8^zFZE-%F4>f$mo}AmIXUwp$zu?TR4=uiinE~drWYSSoF8c@897W z-{!P{gxoHpolzLqiKCUYth%H#R>@us;jMi zdD-WOLFa!uAZ1{vC@(LsuV?2W0tU{4W_tDeOCl(IvIy$<5 zZ{IT9_i#5*+S}U!wkz_K7&SFD&;gqgTYyYK!apT|);O3k$>@3Jy+kax!JykN_mg7;N|_Vifd0J2>d_;ke9SG7lW+cHVYh z8HUgPYu1v2oLqpw(a{lQbE7LDDk^G!Je`J;^5FE8t$Up4e+G`6cU=HF9uXNKm{Y7< z%^+~p%Ya6Ak(&qJid1+~B85n@}_EkAK5Hu-%0`)9^CuIk>`W=&;44X#G z&%f?gaRqK9MB6D(ozQ;2<_SX7)YP=a_1wk9WqfduI98k_R-70G$Dj_%xCQ+0c9T|4 zLGchFw;HUsd-p$^yyLI8cS~aXarq6hv$LOfyuW1$g*Z4kaB*=(P`Dmr?N(LhvoHp#YG33WScJ{0Bm{(%7i0FcS$4F7U57)uHT z1+}`m3L>JEl$5ABa*Pec-`U$bi>XD$#J=A7zC74(ua8^-bboc;knr=rpEj&VMMl0( zM*G}=)^>6eYpD;m{y6TM zt5|f|wF{Z7C@n4R^}M_eU0&(9FU$C{%ixQPg*7)l9pL;G(ck*hKV)H5cQ*F-VZQuo zZ(nokD28V=$q9kx%S8SNA^dio`A$GUFlt&8d9Mr+e%A;919unvw_MawG(RRVOgmRs z0AFnwL_QRVkBF-S0)%vR*B%}kFLlr3PzLM4|B7mqz6g|L-qmiBp3in9K$$w>vgTDf zK!c2o?DX_hRB0bk-|c!&W&^R~?Y3fiYKoeMCe!%na|{K{-@TOSg2!1|UG+zFa&fVh z?h1B2Tlp|eM@!3|fARRDrt1zR9T5>RIyBU^X$N8_ZTflG^(;SYihnu?{*K*LL z*69BMk~mF+@qItj_ig>{biEQl%E(_>TKWQ_&dW*71b7}LJw3EiB~YI_L2|oI6&2X= z>SAka>+a5JjRCi)`j1}Nv?C1-4KPH0w_SdVLW+uL$ya4S@8j3|^$xquZU*1Sk)9A_ zeEvKtB(+gDNSeq^1T(L&3r_Wo;wr+P|Kj zo_>Ft?*OrT0F^fVJqQc0x5s^d0_&IF#YNng-r3n(Ip3$ay1Luh61C^2rw9NHOod@d0-AXQaW6%-Vt$G!w$4&TeIzT#r)2TgKgZm_=^!-~G&meZWM09!K=PX>>AzcDjlkWkKpE%Ao2$*_qS7MRIp{Cw_Ldw;viFep+6C zb2*wRdU^5M<}HaCGA2M01L?23B`a>%vo9#H`q`p6A9`Y9P%e5viXtc^B$MB_y{^vL z&20gehL%_h`>%9p1(;k&lRHji@Gi_3>v~;^=tIoT&L%7B>sOdRJwFG)78Mosd+x%= z#l?Z-Ehi^ug6H~U<#(wh`q`P8PI!btEa<;{JZFvZvGx7!xf9OkwnUGR!w#&Za>1<& zt+$IHyQm1Rm(%0MTu)C=Svg*_Y%TO+{rz=aUsYRM8^nN-qAcAcFoRhCusqY3jY&;~ zXMS^Ca~~KVFE1?}b?b<>0x{X}uq1TA<|bM1?C!bB)lAzRAg}2XE!upTWz}OF%E@BMnWZm1^0VmVu9p=|78=ot+h2* zk?I5vzB=gdX-jONfRs1%F2p?ih4QDeGV2`SXkKklouR? z7*huSM+m_mKO(;`K(b5aOhQ6Isi>-^rlfrEciT%bt88Ej6DFC%W`nF&`3GDNy0m-i z(bF%|fKjhd^3&b6t)ey~OIzp?@{ zbE-A|NZ$Xq1q3phh=2a2C+Hz&?C+Tc8Z&76Zy6x1So41aCszk3Eh{U-`{>c887AM85Q3H+}?wy4V!xmH3{JA)jU5SvLw;ya0;`R3@87v^Jz~)c>ZG^xyrZ}QryZ6ynF5LWIM}dqTGS2idK=Yj4 zYF`6m7Nm2_@R4^hPo+1aE7L~&fwesRY-H|ABI0fK7|4wm)a%CZ$$QCBsdQ}&tFLA$%;HSwa_u`55}d`Yr!p-ky^=M%dz_8~^ya(9 zv_CG3MkMFYmRrc=VU+WfM~t&Ej|{cU-e-R#297?()R;t}&dUj75=BvR;b=HeZZ7%v zTEUF#@VdPXXT_IZ<6)vYw<||<4ER1$r(5%< zCdJVd;7H8cRV)SJX1@9qi*=v7?Ua|?uF0phNU);^#4^d?KA+m!S0hHYR(0fetq&hu zu!ULx%hu3a}eQi zo_t!|#r2tI${xRTDl^iKRPu_PP7!TzEX&wm^7N>rFX{>eo!0zPq#yOe-EvQ#zWX75Rd&D)q1R5^L#iE5(oP+f`Xnboz=uBT%GJuv zTnnIXW}t4Spspt)&hI~VlG$jgFH9>h41Z;UI(@@syJMoukbp42wfR7$76qfno1%H- zvZ<Ygcz9)fwPk}|i=ex;x5nR(yR>MFbV_jy=F8IgkMH$@Q znr!_VosCx|Op{oe>Y*z**P-#~I`g4q{8t1beM6!G`BD<}PtIP>^t(92;K*pD0fw9gjngMwd8I!EkzJi=MJmd|F+m3EJLPHzLH3iH)m2C z?sgv<-)e2{?Nvm@?$?!C=}6^fc^7YbHhb2yn~TGP*yP68WR{*PiB~2#2>UQ0dkLXS z2?!>$=4?4vVNn-hkvcI^C*j_kz}!1wQ^brmu6R9mo#p;C4NVQ=N?h77;bQq>CoYI@ zzZ_E1BtK6?$^E_@X^h&8{a$`p919qJqEq4bdI=? zG}I1RNdR@Fe7P&TP2o(=pFY2uBs;6N^Yh2}EU?@A9QBwAMVJ(g)S=H5Fp7bwK#grk z(yOMFoTP}GVatkbCL2mYwu0*0pQNFdO+Ha^h>C)E0PfwG;IvPYL!08x6zILCgtI@2 z%$pZlt4h95RGl7l$i5X3_x)u1I}Jr3)IeY!#Y|g9bD#LCVT2*GsyZt1=$(E;o@3~A zzo*!1?EL^l0|c!tVnT^R5&W?ZyqXFhCO$6_CO1kP2TBqvj+H~uLT}gfrME)W>SWcb zJzRR2#Oh(utGl~UP!K6##12vBAwzfTAz76~aj>uOD}@@(Kz=@sHG9$oDJ}gLdH!BM zxtLZ9wC!wMvzWLLI(YZ+PFej@#Fj%ejmUX^0>MP-Eu95Yd7}qq(-Jk!vAp&OpzCjB zs@lZu#w8Yhw_pVS?F_k7fDU6zdlk~Yt5p9}R2jrXSJ3v!m#~}7kfsd?Iyl_7oo4*N zz0{_c>39qV-835h-@z_NV5bKIqokiEk$fp@xqe;nXGd`7OY)J2Elu1%`{Ccl^4u40 zCDyt0$^o$RwO~~yAf=jL`sUj-*vtmE%ko``-|aJj>MWMVlAKDE1(&E<-Gy4Sh01Ma z=ypt(uLfZds1l@A`yu0Lgu2K>ulv(t<=TA!z!bUY&~y$smO{VLk4r}$XqiFN@hGHC zvjS(XNWuPnMhmB5W@nRF30-59J@vq*H7%9eFpINpw5%LGo(|glX%lT(S9clj&9QMS zMA!6k%r@Wh6T7?H*TbOaIGQqM7UX{Rb+_WS4@wOnKW=$p!OhLB(FPA?acQZ!wbje$ zGs<8bcuO3}gaZY)u(7dSUGK*-hrmEC7)h+TSA-dJeYU^<2T#vuvF5YRd`=FIG+xg; zD~}y0I4NmqCnqPb8SjU@4>aH*qwDK>Wvcjqd5n+8W`%Wim@D6iEY4qr6cO&;2M0qh z58KaquRN2v>NH8wta2426|)jYP-lgf0m-IC53QWo03T&yesP=huAqXR;ft~Gl#|oAyzrH=b-%;XY|u4SINO*ub%I!d)WS!&=yJs?lt_7U zgKFFnaIBQ7hywbp}Z0MF_!;zAL!EjlRv`NyYmvBsmV z26^k3YbGE4d8jJ6!?fvrLD8x%izdhYahZ&Md?rs1kBI0?>HhW?*TX5Y$|Fz=5ct-- zTxW%i^9FLp`x7saYhkPv^s73aW-HiLZ*V%1J}kh29L)YPlYUT06p%?6?Kb~(q~DU) z*yy%R%$LsN;2!Yd>#b%P(N`zf?D@kvd~u(%70u_s+r#{T+?`8PZSBS^z7u)9@;DY` zj==EaVTlqiPk^G%Rv!{m4(G|P+o#yHx)?L9!Rcrkfx&8d46Tx3n*{7= z8s;dU=l2?NtiViVTKHzs!`( zxQzQ27GljztwMt%NR?7z{2jx|KaqwdyF`(wft%cii!}&OfdlSjgM{Hi4BVu?oR}H3EJ@lgW8;2)_&ECLp|md!o-UW-KF(osy8SSy067y=f>7__ zW2(v$3~|;*99^HsT^Zd#;w74jR&Y^paPU6?4!KlBpxf*1eAm!pHv)q-Grk)rCi2yo z5iCCM&A!jRaH~3=5x>Y&^MC#v+2mL_i<)C z6>IkKp%nBj8RR=d&E-p-Rn90`Yc=f0CIOqTF$drFCCk(EX|z>N`yDcmBwkJi{s^PiS>iYM>)}CCzA}8AH&K44^jZe+i?&Yn2C8$QKHV1 zZ(k=_9Uo7uG{&ir>N9gFe-9mtuaqF^5A@?9Hk3K4lWq&nQrR@#qK`Bb?k-BYI{VIf z%k0yj%1*v{Q-tZLxtkpk(le`K4;TLN=xS<(HNCd?3tFHwSAh72;-DeCO>tEm4=0OP z0n@~@7yJeEbJ|P*(^y`{nWo=FFroB!qF{_wE<@^TywuKXTR zUsW@8!t?wUIvMcbpP88n(q2BqTKF;E-==YnGh}J8IzW-14OVFCjbp3Wv=jTih>?Kt zonGOF1@(e+@AJBJZdcytlbYAp*ZzSm#0|p_aC4S*`Y*!?G{~exoF~>^?ld>ac3dvtI?j=KP0f}MH+j`>udpeaH1RYVBy3vQyCVu&-Hn#<*N=2>VXsw> zjon9Y-;P(Cf+F4Xw3#DCK9|#O)=EZ20BExBB!&p#$3taDL}%QBT4m@9e;^#2d=srJ z6(K#3ap30MF~w{5Z(iunzms~LZVc8UEu&+lRv^c^;QY9<0fvG~72TswzVRHQ!Sc|w z$&?^GrMr;)@Gdy4L!VC&b=Wj*;q}yN0C6&Ne|gyz$o08v-rV@uiA8j~+dDuO#yT3V zoP?I|26}M1Q&jg#?)^q?qtw@ z1au-d7B;rgd~C|1r|9<7($Xsf7s^LyyHy2z{Od}GtBOT@pAMt8Eqyhq5vWSV>Echx zl@(|=9+k$Ty1J0{=zLEHIb*r#%NHprxJpG+Fqym_!Zb!ooQ^#->9e`bBfZ0{!;y~h$3uZ5W`iO_G>;O!rO8b!T|#?e9BV|` zqd18*EF)cEyGQ+Uu&S7S=D>8_(}*l=j*Y2kOYUzb&gDxVe2r&dQf{E8{J?>o?uVvr zu@awNYcma~>=ID33qpuT{5@20syCv`AuTjMYdXX@4XO2n%}Y()PDx{OsDmu8SP3*N=(KShHMCo-u4Y$uFs)}c1Z2lI<=x$XOM*kh$ z6nIKJII)uv9+`d@$^Ak(@B^1n>@Ym4!9?45?fl&@d$=Xz9iT5j?Q6f zm-g%dnWY$!MYiMiur5m}i9@7nu7Cgv^_&$Ahhn6Bc~TLjj*;uC;qrv`m({{ExiM@- zb#1&bDGG5&3^C6vxOA#KTL636r~xklo0Yt@r2oWbZmFg zx!Dxc0wd9rw~VvdwA|7|M&w)#Y6D}3AJXvQ@?Ty#pL`%D*(&%+3+ z>!H8)7R|X|*u43QiR7GqBd1^J-3R09y?%?%*eK8f|18h)N!p?^Yo(qQ1st&y1%hFv zGaBq`G-A`Hr6_SXXBl?-G+f;Ljs^4(=b`m4>(Aay=)0hD9e7x~o zOv%h#c0RQX?$x9p)*XTulZngaV%R9GL>UxQ8Lq?;@R!RmXnJX56^YgCdbz@eiKh>G zsrjM?E%7Vt!_Rp2=?A;d2Nk7TC3Y6Mm zPS@O0NvH;C@kWwuxRvL$&Uj z2|gogpk449*rTZ5?x)z;*bN>pki$`Brb^2XO>y1bJy!4=zgF{f8Q9V0d4CqE8q(rg z;n^AT>5VAY?Ag7?!}Md+WXt&*d`qVF9xoJax>z((pU;ofBg8ZD{AaYA;P>6GH9xk` zrqf}w^M^r2X+JP@MX*|E(-l%B86-2LE-7?AnqUk}><5qS1&*4ANbf?*?FC1e!95l9vW6}EL5t==tw7gS< ze=!;TbQt38yxoZp{u}qxD@d1gslm(`EUpV6Y376+iu-#Q`uXD-Zv-2xU4-#obk`k= zAjLu}pbdY|b84x1YTHGRWl1`5@1g3w%e`#A$SSXS71G#7Z$dZH=n2?w@kDF(K;v#k zZ)rvoZ9&`9ny^bn6QSGhli}PM$0Pi%|Ev#+(xD8gI7Dg|3+GYS36Sd;+J#^RLOfW` zKiuU?N%dipnoZ|^BnfXXRJFq}B(W+Amr73@QD>g~9ynEsaZkz7fEjPq-G_GO6dSKh zuUSYkyB`WC^DV)L&me|xb*vy-@{t2KoMUQAP)`%%dch+-cmbe%iYhWNHk>Pn6%=Th z0L~{yg5f8;D&9_X+0sd}9FYsbL^yleU(kc!83~E6O`OSV0uy>@QJ0dsdh%h{YYOPtXn1|4C++t_xD z=qZAks3zxk>t6FC!x=$Pp`Ha5H;_)_WwHQ#C+6=t8gJ6lkal*VY=wXe3-o{n(uLPA ztgJH+^1x0sIHXrla#%@;#FdRT*LyW=MQ}}j$y<>+R?{oio5_ZmuKwIXd;hk#{6}w8 zcjMf(j})i>=npq9+Xq402W{O0FI^ninx?F2&ds<*gB)CcBV2!wju61Z`vC#$b6>CJ zsWdc`YN*c#MU8%dH~H(*tl7Rq$U9q977ycF+zLOn{su(~DjDEg4f^!}$7KWV2m9W0 zZT=WwtzSFP3MpXxQ6X@$pbt5ZRWPC7D4>wJYzq?!i@%7@i@s1D|3;QteY2Z>?HEb( z=_$G1zokGEW5&lHpIehTIvBOrLvMN z+3KkA85QqLhtaYL&n-YSa!}ZzNuWPf7cCME5+Xd$RsxZ!sqsWA$9~^fnIuyxQA)!S zSEV}xmASy5Ra}HdP_;vOA#Z1vaxmy61#QhONWWO~>#HSbSelSd=YMKrjil9Xf4 zyXyFn!fig_p%qjg`B^F440jRvABW_mfggAPT$&UrV7|X29DQs}okXH1OQ-AUfZRb9vM%Rx+SX>J<^VC7;9{fV)Ittl( zaTdU{JA2|hzx9Au^ozprLj#ZL6F4;ltHvlKg~m6Jzr|X zAZc?hd1@)u1IU-sWFDN=0H>bRf62{(!I&*l(M*vhLzBuSVM1@{#n&WekI;3HEt0E9 zZqjbHD^sS^w{v&$52Q)76DA3ck;8;tFl17q&IyI|R`iLBUnc3C9||M1w2c0?R`KiO z4=d_W_oS3MS6m3*(MPH`c%Q;5qMF*GJ!B3 zU+Mkms52mA0+dA0;J{C3jgODVz{1jP^Qg&{m%!iJK$Y*)n()x3eq*+7928c;zC_D09c2tUXiz6O~t_&9?w&A;aFZT6a%i`VouADramh~@-$ z#{lcfMasyF^e3v*3k&AOF-vernML-ygX^pQzw!AVzmWlLzuPl4@zVa{V63F3sc-*M zwdtXFTKBLwN-U}|wUuxXR*Eu;KPP016B9f{K#aIaN0rrp7C0k|v8+K{cG?x>x%;3NA5TWZlA`=*|I=^&W3$%>n|u1$AISMC>5v;C*l+7=|Y zo`?e5E|{+8@*mxE;tC#Kfw2TO>PTO)0zSrz^YTg#M9eYeIF)|cROTumu_Rm9;c{CN z2oyeG#@%5XjGqes&Bvb>hTb_u#h@D8M3w* zIj|Bhs5h>*sIEochRK-g#J_w-cod@Ml%?vrR`YgbNTl^b}P0;7vfOkVCSjBNR!)O2S>jJGvs-*&)@tM%md# zxwwqg)}6j|S`Uo|TN;cxI%ILEOKtPAs;wxKAsyk%raDmMP7+$&3v=;I@-eJdg7nBY z*~V;+hFSOfy1b1=E8BU3^$D)4XZ#Fbou=&~t%%=oq)x|cC}C6G90C5@#?h8Vvas)zEXFa zF*;eANNm;pb_HixHWNLIG^h8yI#=>)CY#LQ^<;GJv~8*Acq2?Go;D{40Jr~_1= zh(-+Va$S3bs@E#l>#+5*Ll2R=4@VCDUJbz8Z=Vy5)f&U6^iI$2u1)BU*o$~5J_)^g zspN)I$PrQ4hVnNGG9XPNg>mc-sSymB7J8l%8bN!B3*a3O_9zg0*b{x~dun!H;sSl1 z7n&ZHnw}O~GV1CHIrOWXW)gjdGpy}@oSyy~yHwS+Q#M|tWVVTabQz?>c53@Mcngo< z)QLdoz!K3?vFYVLMo%W|3;VdJMMklH9$H^|_xeyh_D-wvSQ=}D>XDWh7jiNDC9pbW zwDPl>qohm~QUa-DhAeOR_hZ%(+~P?_mFUdkWi{RN{xQHlYwzg9O0i9oyhf6WZjypV zrgI6^v#O(uh_|VgmhQyrP1BS1SF6kJGLj%|a^;dn$vpT;2{u?UlI_qw?N~oLj0=(O zMO;I{t@PkTP6GR&Zlf9z@U4}Vt%U=4pRZqbuF1Tr<3+aV;ZlSIH5sg`+)WC9G%V1Q zL^$TdsG|o2*0YPcG8EO=6g7m^aY!VmsE&}`kZr9Tu!dDmDY!_HZ?GK9tnDHFLiK?7 zhQ|s`I&3=G|K!0Y={~f$_1iT7KCQZ&AKt3OmW7XSO&?UT)jY)atoMFE@wtc|>KOQ&GIl<8W(B1BRPQ-b&Hksr52nto`wBc1f4%`nM zBJNLQu2;Q%A8-U!py3l2Z8Z0EZy7EMtvxJ|uYr9#jFD++Bk!?^W`!oOdH z@ax9{S3)7GVcRytd4A(BOdDlq-^AWbLlt<_Q#`KABIDCUXwXIywPUbq;_x|e8WIDt zSy*cM*m7UA5BeTrDEa5*f&0nwnT`4t!~pDqxV!O!6Kz>eH%)F2?ncoRE$PfkJ-$r= z&RubC3pym%>AfbJno|=C)hj2fT_|pwIt0!0;Cc^Yn~Y8(r&Zn2pLY$~Zv=OGK;ySd zez^TKwH7&i3bcr6xT$<%T~CbR#_!_X&afI4Cip1zM0p5%EmY~wT*8I(5^Chn=Disz zJ*}eh#aX$O`J}85)6I+K_H7g}YKzlas!DMxI!Q`OKj#-!nSZM=&FJt>EAg)@ixkrB z)~PV8P-9)X2@(!Z6_QyBe^HwZL`zlfiWw|YBvUCn%ykfCxSjb}E-hb3kfQuc6}`|H zEpiw&97phj&}U4m9ql=S=&798@=%NGlOo&0VODS3G+usk^6}UZkD?rio1x9esOF*i zr`;kdW*>vnl8S;RA==ogqS3pjxux&ijz%0!!VyPo#HD!7fqhMfo2Yhf-_gpUI?&yQ zm}tC!Zb0vfO9@}j)9E#^MklJ#8I9#%MR4~$n)X6KKOSCdw;oUy=HqzVyzX^1wLDb%&BQ-k_g{8qFWz1{yJ)f?pt3sg zn`^pl3xaogLvni|MW7nR9-2g<+@U-gpEw#hGOHVED(kszx4pXOW|uZkP)FRuoq@kH z9CDYrsJ3|OG>DFz#1=VeEUu_TGUyzZF+Z&R-ifOBBz9e`-5o!%{)x5RapU1tN^Pg2 zR~5UXRcy;qf)BxZY5-fN8M2{(sX(JONy^ifSR(dlRj5zQ7Bn-Rt#%na@;$kCt&WYn z7)NEAUaDYAIKW_PlhN51rG_Q$o7K5ZtE!gsrORwz)Z9_~S^wr+VtYn@dwG9shN+j_ zvb7FdY~>&TCz%>E209VN_>K9yv`8#vkpm{K$l345_Jo{S4z8#^li^(D6l+!Ll*CX< zSi$MyOG#XvVKWDD!8`C`l)(j7Bp=oBTt|t(l(+G`%dWo74=?(kFadR}TpZl2Gb+n` z+VyPZ`|6#uQjHucAOu#y1(g$>;&n+z6j~J3z_ct z7sf_Wy!XvAxCtF3(Xafq9lXZz+pp8rg1Yk=)Y@K!Tf5eL-jmL<%n>{x=2)vsXk72l zZavZ();x~CewyiLcek_qJ)<@-4p?*3$vRl@B0P4vhd%L!hsqRUXj@3b)k53n{z2H= zXl`ZX39^eN&vxQX&qtl$}p{3B*8e5D?wc{+s;H&xE#>j0&Zi`7oX|ODbiuZPA^R7 zoW!sdl3PgD&?T-xRaqs|1i4jn*&FCTF5S_}m-`OP@JL%W+%O;3`XM!FUIgThjw!R52^3a8(Z z$PS%o3SB-dKu{ll#YuoYqxGeP;h*!`pWDS}6Tx(q0s>T20V-;2tL5h9RKT(_>#}1T ztCKQXbUO~Tx?uW*NfY=~<*x}P2b%H0znm~mx5G%DFV%)tus)b6lc+0~n8sfzpl0R@ z!G*eLemD??S1mR`vAnz#@)V5c0c|TMGXYCR z2N6ejDx?&Ej+bD$ia$438(LliG`6<2cd!qW%^Q=EW*Lm{OTm3vk(ON$IbVM^UCI|G zX7ud=0Bdcv7|7|evVB@svDkeb}sIXKygnMsIw zNIKU?AaDXg(!-Cbum`s3WwFZZ_wMGz8-VNS9+8gs|u`IXgt!V-lZND0x| zSA52K>QODd+Gj1E1qY&IbyvZ|#rdh<^OpgdRwJI8#lg)ZEvZ`En$W=2Y}suL%`*A? zo#6z6yzdp0S15r|zX;NObh9KW7>5^LWKmsZ=_t%+l;9U5q$QQDIyq$NeyWY6z;?@* zh=ZS@rL%|`e;lQc8fBnWqH_G=aQ$2O`Qhec!gQVRI6<1^VWy>l9gXL}F+R6DLAm%B z(xFmd7G{SdSm?;z-f}Q=tgCpzH9lNRV&E%Z)rnnv^ zv!A4Q6i8Z`tTk-!%a=19L@C_H+;!`gm&p(#eR1cS}2&xfJBy zj^EC41GtVxLR!u@M1e9NSC1^9qThSlKk#KTs8|R)y0&E&&s=E@&q0z`^p-M?&fx zHl?Z)RRvKF8gLg3xYJARRAY!?n5ma$P=fo?e=vGg=6`wjR&|%*zk3T*4DB<_(5A*C zB-pW6Z`ynu@3`hMYdJ%#F>(|IU2+&ygZAiAy@ft0|B-SSa#|PwgUmeHObs^ZT zJM8e}tD~J3-(jUwcd+p(@ulN_qb3s}oyZ$S-elsiHU24`oJ?K8UVT#iq@M2fX9jz$ zJS_yYQkD4^Wr#4!GCY-t3Qq{Z8B&b4#0X1?(5OJe51%dB0&g`kuc6~}r?>n8=I*PS zsx+DGYcr}^9`}2qd) za(LTBXl00Q_N}x|7A8q~!U=(uM4E(U%{)a7Rx|o6I%14j082 zJ%542<-KIMR@pAJh*+lN-*rlG5CAqq7#hYbzVR(y0Q{X?-Nju zAL#3=hfJa7Ndxn9n|;o-e$MnR5yplVd68h*bNB@!T?N-Q`BW;3v7 zP;5XTdBKd+eWUkELMtk~jAKbCbvLJZFMQ=rXi0Flj;~r0TPCH2ar+DR9p!OtUNNJ< zG+sr@#QdeE(=e5CIE+K(cX?HX@1P2hfeT!P2W_w^KNP283uX4)U8h6H99pcZyqdiF zn!NmiywZ|OO*5fIJ6BBGMoZJ1@s}N)6*pI3etT2@o-H)BLgs|v0{LoIymF-71C$Tn zzbg>J13OP*W7YNVHr@X7-tz<2C;uNjls11j@4&T*CVMCC#>1Kw^J%F zMOizw_;`dVis`S&!!$3t=VIR*>7+^*J0+Z}8oiZM;hHxarDu)TG|jJ}sv}h}dCS!% zS){9}k&@bVd+QP&^b)QB71eRgVU>Vmf?C7q5BO8;-OgN6*=i8(0WwB^jFiC)IPH($ z=Eq)NXZo&m+cINfVlIroi~)V}=K}%}eODt%8WI7?QA`arnFJ1|D)i8Olg_w znYoxbDLJS)7S?-urYD&?Cq|a1dj_UihzBP|R#&@*yOzKYzTJiiv17rMMW6CJJDnHx zoD;GnP&YkuXWA53kYLMs4cuy9X@VerreeLFVD)5 zsoCb4#5Y8u`l*jv#E^uYw2CLvC~2b(l`}S}71dg?n(o5CVkM*lI@0*UD>kp_8RBTv zTT?G_^94G$ss<xLbZM6;T#B)H0UGW`u2i*ny;DoziI*Y$7$2T zPQxOoZjj|JF0O0tFKS)v8yv5koUa`mU0v;+?d|UDTpIW+j4Vu{S~q(Qv}!suh#r4W zowRL>tu?SdZ~s3`omEsDT(qrmid%7Q(c&HmUMNtU;_eQ?-Aj?8#ogVV;O_43?(TZ? zpL@?3cZ@tIFFR|mJ=dJy+RDL*k|nE32X6CRZdLntkU?7D7Q0^irSABh-ET@-cHi^S>3$Y=uASAVZ(QE?P09>g6e z(Z2q^Q0E=*>qpCsWOxW4t$v6CZk=20q0e@W^-s+$Pp!<&EX)iH^sG#cPj$b3-bWj_ z&jdcME7o?XC$+hrJ_geotqTD`N8C2&thdp9qXsP|D1e%HzKK4q-F=4po9G`<&Y%8? zyW6$N|Mf2ap{vTN=4gf zpNR&GNm%eCV@?4Pa@;~LHJIWQ^d(h$9+0%K+;EO2(Dp={KF3ijitT>KUTztvIDyH@ z1}~t@oKCwJic1AV1XuC%@XyWhPR=c_Y^=}dY^*OVu6eZNb9TJ`5;yS@GpLep57TJ~ z^9E_}Iu9rvz`%xnH>%{68^c+czs{TfRJ{FLhOh^B$|tgMpm9yX5>D}GFg-T2?=ILjQm=xvN`CHk<}V$qnG@t zA7SpI-P^M{SROsOg?Mf~5A6>Erz8HGGz~jP2*wuw5h)_?FwT-}Pci@D`g@&g;K&30 zKTrn!9!{5F#SIM&!!a&{7IZyrZajUA{2^2Keu=9f48ceJ0oo0>8d{$3SFL}VAccV;ZW5TqNKaBX+Tj2o!*XVR83M^2eLoW9>__~?#BfgX75ti`^)Hx0aFT!4sV%#{D4Xr#K0U3ZIj28oR0}Cv?QKEmGTD9GczG+A zP$Kexlut$9!22#wR@dR$22e=y(OFuIUs{lVjaPphu8z&1lpnH3hT0{_Po2^gmhFgR zUdD;=Fo-#Bn-M2nu%>_W+P@z;{tJEXj=47JYapt8Hgn9kSdPb~AKQB?cW6eA;Ah&3 z4V&bQWSmPli!OK7j|ILfWrxvX4(Uc}%G)3bd8W%C-}eJw?zNfBm#e6cyTp#yDM*_A zLf+uCrt7xq~ zs5<_cSR$o-X4(hkRYKw~A`@N0eD@)giGAy_3-+=vbUKTOdVNTdDDBbA&zA*h(p}0T z2W*vzqdAm^?lj$i1U_76e67Oyz%BT1l0Zv7GNb2KP`O%7jamT~kS{O4CG5 zEnd*`NqAyhI{8l~r%)hANlR0Y*Fw#$dp|VLWZgcD#Pj8KbI^*qo>U3AMjBu9>zjQ1 zaQR}IH^Xd?+q4D(U%>ES7CVbwAR@kMB%_kI>3(j=Ztj}Vy4!rBb1cdrIv z9yS39ikT5UL|Y8m1yngzP8T`3vCF2`y3SXek0Vk?M!|J%dw0;%Bd7PmG4TN!WW2Gx zsQ#t#RHcO3ZcNCgYj);L3BAEmYIf- zd5>^v1Ap2wVOYI5`|*Kp=i0v|MOFjpOgucu`tTlI+n}{j5Eg%^3&3YJ61>{U6rVnB z)6X8Ce@v`v`GMop!Zwy{@S7Jw;I;dghXjzP?TQW9HCuY=6k(K3!!&_uD1lKLL1WbX zUve^=OE?<`$I$3#cuZRFM)WuhV9gY4-)vz^G;!mRVbo!cTp{0s*j`5!%|L5Uhh9yw5Aden>>}su$*P=}*tZq#`FP z0AXD`)#m0B?OAx4ZO3GsEWmflr)4x`7LUDZ*~6-jl_ocZk>wGqJA4i$`ShS~-Z?Y& zO~t%YT?u>Rle8ai4uvFNQCogDS6+NXF%!{Vh&WI)b(zRA6?Rg;$O3(amE#wfb*iqx zKMPYW0{M&T%%@WlB7lMa;T+f5JonKI2k|60iM5u{FX_&bAX0eL?CjskXK)+lvM%-u zogS4HYCIa1q*;ZuTA0bnUdtApqTN5x1 zvy0XVjcS?tlsmoAis!p^dz**# z(QM!&4f9eluz@5~N4b;fPj=SD<)yVX+9Ps;;tHE%@?>-J)L~om;-f?23{3(O5)EB- z!$tShw+1p_0x~(Zl4e9Rtz>;6bhF!ktC#V16aMm+ z^oZulU=Y{)nfz)KY;U>n9E9jZ4*4;pt)TrCF!7Kv!(Oowh8Ew04CRLBh4jo|U0vNw zOBA+xjE|4crdD%v<~Lf7@qY|^qhhpyh5~$^qV9OhN~37OE2+6te*w2PuV|#-v3m7A zzy2Wq@z_njmf#8d?UKUwW`KXy?g8ollv2&72hAG>D5l60vq%x z*12}eB4>3=6dHSs7tM3v_Q=XNMi#O zUkjo=%#H2sGI+SZpKO9Z1O3f4P7>g^(wsBUnZXg_R$8B-Oim*qN!8w1Pfg0u+Yne? z;!`H6=j8&oR|{}(2yyV9Sq^jhl1l_sC8?$rUmeZZ-d*|pxfFARbX|ycqT51Svn>)h zCjPguKb{{Nv-f3satFR-op%|2gc2LG6Jn|0k)*D{)wd`H% zo)p(;t5Ed3f5l*4>7oX9AZEK)2# z5fSU1oQA8dU-ZnQn-f`Bm7?J-yt%Y&!Ul?c?E;60jd0xQ53A08jh`tJ+T!{wnN5_7i-U`^eRX}^j3U?c>napxgtWq$bPx?uLjMA)IzHD*#OX7I z>e_O_8nB=ZXeyvt;O()GqdJJImS{s`5hl8NO>-(S`GC#ix$uPOo*@yqK8@_FArWp( z@dKDlo&N3L*m~R;?#RCM=PQ}$ZM{v826b+CC7>!*Dr0oy2LIvEy6wnfarjCrR!uz4 z^bb*t>F9yLjDb7o6Zo<~9`G7}M|xyUU_klq>aQDl{dvvvM3yFf)^}zye>^G|T{DH8rTL0Onpjay%He2rkwzF3t#6_Apj9bXK-&02^{=i-dUI z9{Qj>%)AT9fEsDPs!jj?7lEAxBHD=50dvur?yclMOZ9$JLxW4yV9?Lg-Osi8q$;|Z zg7uh7OP;y4Mjnp2uEy3L2JQz6Kvu!@w3Ino!gYZdn{!gx3FflOvX&I~(T#x_3TtL< z9w7l=k8w1!i8Ee+*U7aoh(}Nm!4Q&wMUh$&y)1|Di>{sy^xPo+=b+%3c3cpY9t337 znpO7&UI^oLb%doOk3_>^ylKne`26oCf=Rq{QK|1|ERkPwlmga5u=h-Yg6dU`5N<>g z1FKQzo6qx6;ilHr)h&kutisya%*@iq6}hdAZDRV}r!ij?5RbrGN;;vGEb+}}S8Vkm zgb;B5qI@32#-@$9E2E9ZA{+%+5Ml_q~H*$-( zfCa9c7rcnZzRShyYlTrjZ|(iBCQriFHpX||hRv+bm&VnPMnUCeGTIaQcytp)$J3Seh?mX|8F0>mbII&x2AY3KDh8ND@0tyTN#yXj20lG-C zgPu#n<(!%xLWDyG5Ts5@689%Cs!B|hQ>2COq=1qM{Rg^u%x_FFA-=C0a0d=^L~q5( zJG6LpvMxHvvb@{OUT}WYF%UxKX;D)<<#cxnxF-f67lTWA! zZI+f%Hkv!*X;s zw7;u~hZZADPid-LmE1g&Uvpuq%NW&FiD_)Z%x^G{29R9UWu2B4aRXx46!=M5UPLalm{J4`2KJUY(VV6B6=^j0_Ml2_s|WX=YNwC{x8LS;hIO#46ox;6TEB1LG)D6H)D$ zP-ks8929_Yj3x8`J0XF@o$mUR*(}kL%9d9b5LVa4kg+r*-zWSDjw7F8qw>s{JcwA> zY&xsY1cGvWWaxP8lh@Xfg|c_#aCZ3Z?9%V8^9K34H|K|4JJH~h<6vPe7|VlO-@Ef3 zuWE0DFBf+UdhmmH|MkaYWbJC<5MdIE4eIH6NZ!Y7o)@-iA!o|kuaetr z1N$X7q-s%-vnf2fIspWOix4p&#c4D;U0r+7-+gM>HkR;umM%^~|3hC>xTkIchrZcO z4$<2eA`46LGgB8>?m>Gh53+;Skbz1Zq*T;vynxmrD%;79IA;Wl2oY@~x3b!A#4!C0 zES?KdVEkUbaNMEV0Qt}y`B0;%+Mauk004F$2XuI{{=ItIgQ3Oty7}CcL#dPNr5Moj zt4<90#0W;AeB=QLdLfttQ7hNs=yuqR^oj`8nb4$UWWG&&LB+saFaM~tHX8^-iq`&} z^J!W0yNykDMTW4Nd{OR6xh(+*N;u#*v0;Sl?}%)}qDa7{JJm1;S7n`*8Lf{^kE286 zg3z`QujpCyWefEeSfeknJBl+5?L8=YG)N6KGlh;>mOcjh&iP&-df&zlZ|8^^fQ1s@ zVCjWMB3V2a_l7x0w#>?!kR)AQJA$PcPmDT#mJ@b$6UIo(`&26UipOqhc>DN>7my|Z zK3KghA5m-cAK8ORh6{Fr67^%DSBs>TlWT^eP#6ZBse9HvAy8Tq!K*^1e6p861T6awY73$6V~Gq0GTi`e!0uSftdYcC4Wi61mKTg z^;SJ?S?jW<<8O{$*F!vA6pi?#h6ap+ahpYfc7ECEqpSptl*s4iW?|htTO=fG=A84Z zi>en28H@>w34gx{G7iFMgHT1^dJ2pMm^&vsqsWI%;(?zd!yjJ%&E zGj#A=-zCQSZ@|sh=xV?`zPIVNFHcjCLsz>)Q>Q~$-9uNef1R)z8}l1=|B;Wa@#>Vb zOwj68JIqlED<3^u%6O{;RS+pj1fXCBG$6?1G)^jR!q5SmVMx98LNrKbzmj3X-l$W? zVp7`BOhn_I>?MBQ<`DYT*2Jz2=1i?_S81+MZEv(QGOjZ={bB{;b#sAU;4;$UJ5ry* zVdGR`7gFnJ~^ zBQutR8j_27TcKy3aa_N~jn!>cGD4cr+}-;$;&$gJWPrN5Iwuzw?&r^(+}!JqJqO!{ zULMO#v2)Pm2)n5qudYqdMN$-3h9>6b=2;4K!|?x5AdL)j()-ZR##6i_sIlWbg9KzA zAS{T8U0fl!5*?pqkyl?&qizUMiEht-r34QoMkVD^?#eX52mnqm)LQT6F*2K-k+Y;E zDmIdCuC?&sWT)OVk-9ngIfmm&MK%LtpA=<$c*2OgcY~_kL>NJsvBWaFa5+m(D#3O- z4z2Z)#&XWwF5=0+uhygrpHN}8Lb|ro((LFy6M-T)r>p)Hr-)W0nd@;*K7Yz_#X_(P z=rU1Skk4AmBs}o`H1;I?(eJtJeqpuWQK{&nSbH4f(MKH*bUs6{Sp(XP;laL%3#20y zd4Tg3DP=mQ28Dmukacy_A`N1tjS-Luii$8JO%g{xu6gyl#6N6_C0lDb)KNyCN2HNN zW|GLJk;rC{Or(>*9qD+_gQgfjpy!b`S_o+5d&8pL)^QPU=5wpPj)aB>DSywS`+)SopIb zz%^!kll}KYQ!Cfc(_<%MAs$^V0|BIvuxzdM@UPQ%jKe8~Xi?(R;K1LUt29Y|mhK3A zaqK@aa3ZK6ir5nglb2uW&@%8qDkK zUHbrk|KG%;V=5E7jL@xP+lG3RoY((hq8lS!+}s{Z_1>hGeA=}XvFr+XQL9Ffk?djH z-*d^^s#~<)o$O#)w{^A8w-GBZd+>R(-^kugc3!`<+-|K~f%)PQo9kEOwqje{PpKUbn)0Z#FxKe$X9_y0N(wc_8u3DqnwD?!A z^e&YHHLRx?BWp>G+u$@Dxq4^_p3;(g}S)4Q5&N7nk2(?jORElxMb)1|J|`Z zG{X%@2htAYg7cJU2XJTyl%P1hhR_C-rUML))-RcsX7`1kB;mYy{?xFmlOJ~*7xNP7 zxCE6{-zpO}>+K;#LLNGzBZ!uGX4Z+&^|ArbtNO9X9P;d(@_RKt0s^gd?A0~g_0<9`o|6Islo}dGnn@Qa z={o#8{}`E_#^UZI@xZRe4ok<&$E%~S|BN3mHzhr&Vt?+@UTgWCUYz&T3zRW8 zL9cc;;8bKf_E7HKZ~P_pd4bp~FVvs}(q&W`5*MJaS(*|N9eQpigg*XLVT0)0t)kforMOsD@@a1tXr#*uoSfVG?3O8>Uk8WKfn~Hl)aX z_C+U8Dp7r!x4}Ayi8$+g7bpXtBmM79UE}Tu2924v4*1YN z(M8mG>aYR?_#43s0$kp7@#JgGDojVD+Pdc&+BSOYB^f#uBm_2OM9J}FQKaXt<%|(5 zm5db?(AHJnc2%BsbOOA*F(W3l)3I8#VEKzd+aVLb!9B#6VA>ijPbff9u2W|7Q!|S4-P_ocps#1KwZFO z@}h*i+Wxj}e|k$#FYNIRc-=MTp?CDhV*iO|;dBR^^#ztv$_Y<=>j}#(<7@vF45@}8 zm27y9d$z?_)Uk;`tceGidyL_b$gnFED#GkQOaKvFa4?U?$r9mFl16GL8#<5LZ^R5? zVO~Q;I$9+itF?&;=ML8Cm(@?Z2qzEq?1BJ7PP^mjx{43f_L76 zwo=@U+t6~{=9Y9JB02%31>*~o*R!+t^X#FM)pAqdA-`@8ic2T&M`zd*&LoSn$OhpD zN9{alQ@9KMF^6<4tp3MG7xMev!^!*d*sZ`Pqo<**GcD3sqX?9+jC5?D6+7V%x0&1T zBfG{+`tIJ|zSS}QAjD=Q6_!0-v54t38Wkm|Hr4(0-2x9KE~%e!-^}$T;AJwd?QmqWUnUS{G%Q z!(BE;Fo$XaeJ9$45Cp*Fcdzmcd){~g`+|x>XSN-5eJ#8P9VBb^{9hkjS(Hfynw)z> zZ9Q33UrtT|9%3#}|Jr#M?=!9K@{Q+EG2F+1FxnUChs`&($?XFzI43-cN`iRPZ>8V0gEs=W>}IRR=90 zhIsLq=&Q3V3mQ{(kV{tNkC+Ubgm#;80q90(L(J^_u7zJ;T8gt$|HZkxF7rgJ{>W8* zabsa-Ip2uE7#%++BP&0@P`P&n$c@jkl;O&Z>5`{gr)wUjW-ug34vUKeX=o%&4@}k6 z222aoxBrdLI55EJ z{s78LMEw*ly-rUraou=h$uICC%Oc$K>dCd*;Be?!O31%B`G4IpX*x?2bQl-^jQ@f@ z1|@cOS7ks!G-Qv(W`{QOwTyT)1w&z>W0&}2KXIiH)BD#4u`U8QlI8NVDsqZ^?1EK0i5cvt8 z`C@kcKRYx&PbI4EVJw5ALnv1V7l(nJPjpoCNa`*YDN)H3RL@mdI03RmuRJ8xmXJU}>}#@B4KwMCFh?5i=k+A2GEhx3*1)?oQEF0n|PQ>~e}R?n3rHG%*SFgJHE zwR$QQF>tnLJmIAZ?4!uLTCB@%1d$bTe`CeYY z=c~NY>8lBAKptSsMpGNuyRo^kx}mCulS^QIJN&u*$rD95$1Ol-Nuxbbvd$rtI6|&7hw2wlA3ws=ruEm~BlP z#atKPSQmEe5^n0Frslq%y;_h?FtBi*iKj=56R!f zu zgX9S3j{#=+m3B#d#R@aM0y8~m|3uME_-E50pr&AoB_kSILG52@q;ypxGVaMa7q^$) zO)>7;ChnSsX+d^ieZ#U4-wc;vdsUOOokNYATO0@1h^6(|8KF8Ke~N~N?)vhgAaIpu zQEzQcZ*e(Yb16ZF(FIM$rJIdJYsm9yX@R^g-Z~V>!0y@c$xZ; zfs|H>)YgF%FDOEb%Q)pLkI<(#+^JHW7%dEMhfN`ZjPD>Q`I@DTN+=LF-5?jw;#n#+ z>!gai!g_N2By7{xiN}qn=MXkF!LY#$tel_MzdE1VkNn`nFn^i*gha&OAP;}k#pUJq zU0R_h#QF=!?8?y%*d+2z&jB{81QMuGiUnD=a5oUmWKum-lGTi4N>;~Ba+B;YE)}(lc%3%z}>@* zx(u(u|MBIAqPb!c<;F_dD8TMO*pB3rs_`K`yWN)P<@ci0$={m+G=Z##UDh5H!9+AU zy^mNpJ%c~7X3c#KvpRoBIvZk5!Xd)V&vsvjHay{b6eQltFpFUMPDwXe&S8k45-eg6 zFGUe_o7rqJFo*m|8zWh^MmK8MHLa!`j9jBaIH}DCBZAjm;WbU zN*2jn(>YbQT22{vs1C+J&iw#(f+lRRV{+~h8BTuqlFLP)%SAr=lnj;+w zSK$v0oK0@@JACZ2RM}bEnwO{Nw=(rkxdbK73GZ1d7%h&A4pWD0Qpc@RB8v%-=mZ#G@Yv|C*M%pGcBAKJ z_X7MSu0BW z1{dj8C#ld6V}HVwY}yPrAtOLT%nXR?(wu^$gmwb|<%vK6jum^MQ%8T3hSeTt5&Jb{ zS3%y=7?X=WE`dOHMCPkmp`v3gLDK>uJn!gl;J~svq5%GI zFmH){yu96>68{tDMNxWVs(vl86;mnYTFQNltoxZuHTMmx^cGDiZ{#$g-c!!UP)(-t zcYz{%p0WYv-7buNo@;8BrPf00t{Y>`Dvdqv_|vMSzsX4TM@EE*8p}n#a&yYijsE2Y z%+r(CiVL?&b751z@hvDvpbRH}%pc_(e;Gu86~mcr7ayR4g^8G*!#c50Osba3U zNfqTg!6}ul;R8q6j^u;oBjt=Gz{V1g%XwizOG~pE2rRKY8Q@;%0lt8o?w{Nd_Y=W| zap6H`&hYtnkK*8$$AtAy9fEPVZK@=7iU&eVgBMoY^#RjmaWuLTIN-Lw^Xp<4=KEZL zE23kb`ufupO&4!iaJ}552K90AJyF>e83XoxBF)dJ{HPW*0om0_1XPJQRi86IDJ^YA z7!PFqN!Vs$4}GZqjVzoz2_#{=Ye_--&lSj8VNeJno6 zUNvBVBJZfj=Xlr(g{&!r;Y^=*$jmE7k_rU{rLzw*IdSZ1yF7CUNJ!W|mCEPZhDS%V z7nheso{@BPQo3osdPDJ>0vHdN4rW@t2Utlcf%MA)?Dsc*zT z+4%E`uHkB|4!*GqFt^MwG|bU8&#{~`G1KdL><*X2`WAlp6&4#sO26lmm(_3D$j_rj zy+ItYF~!MCqpdLGW7s}Fc`NgNt7-Nz6z}53RbLM$1>&Zqa;#2l0xg(!RI+>T1wK;dfaSP3$Bz@1XrE5}0RDY~(q)#=hA)sabEUG2Nue6^EL5XE>pE+I=Dyt6%rq zHpPnO`ZoPZ_Ao8f<|f0oVx*U!{q*+^m7y5?Uve=z#Fs7zP^`MYH@$BrZ@ZQ_l4Y_b3 z8uH;=kW@W1`~9G4wx8w1#6vsa5q6f~Uo6C`-E!0UKeeu~Fe;c>*XPMoxUA6iNM;Vs zh5dUr<)V3}_q|5dQFBXb`Vjr0&qXXmz!I=~_!k4gfSG0}gObWGPJ7@{4ZojkLA%dy zGip`G5Q`PhXMAbS(oAdzY+F@Hf}X}mWfAUY)v8dbr2PEW8g@B%_`;e{HKmbCTg*Ce*o$Yt zGZ4lzYA|<$r6#BH%-N&t6jNuz z;6!@tq*(8PE{o|z47Y|Z;(BNVq$o1=9)Zz$leBS`cl7jetEF-`6!5g9wlSp!S@YJJ zbrre`7CG`-7y>}%xmGUj&Ym|W#>-C6s;+spll8`9vbCa6s|H%W7)C|e~Qv2PeD01 zx@9xm8=$^9H>~`6d<3B*Q|Xf;t92S_<+cd@o?Ok=x?sjLYH4C5iKW9e$r0-4ai7q( zGC!&@YAIXH{H5L<)NMt2d%*9;tshB7`nhPNvMh?6%qX0d$aBix|5Y|F(MpRxOCtP3 zUhb1zJ_@;X%tBD0zGqgWEUI|=4$ufr{aL`$XWj}&ECwHD$E$9v1|tbEW6*>4qCr+a)Ln~0n$cqV!Hea+c+}WG5HSm zEJPrR$yXK@X6B6d(k25BkLHdJ-_4T%v9E;0#4oRoft78LbkaFQTt7dT^Ds8aJq5H_ z+tiE9L6iwwsG|Vhcj-RzCM&ZLb7U4@cD%|g^|RdwWc+qf1H0XVt4`@mf+`>5q7o0ZmhkOvdU!W9)RKG zh$eyENzmfTEYcX{Famx9*9WXUt-MHzuW`zzs@xoaC=K;dX8N5tWjltFN0WGDKf3Zw zs9ptSsnU`yPR>v$!y+q8s78ffV~nL+ar5+f4SluwGvNA>%JLC#=}L#v7ib*;|2Np| zo?J!jpXTySRVv}dqD0}#si%(V;KoQTuK`)0!_t(`Vy_(;3SWrpzXgT+7&{aYjU(M) zbK;SGb!pkMj_M9GgY!zn_e$0>{1BQ_?Upr#s5aJa&?f^1zp5iqb$ky-j*!^MMME2_ zFqT7ldVhK~FXhVTlA_=e^8xLdP{ldvx%$#T{gp3rI>?50=lX9Zw2LhU4Ke1JR11+6o6-3{8kdk zeP(cKkB;h??bPwJ3Jya&{3!xDGw3ggEil0gm&?SDOicWK1W?7LklXc!!SeGjjgivg z1DHbvl>wI0?fuP2>&XEm$9l! zYivQxC$G++3wzkhPGUlxK7D<+KQjWs%*P`{rH8C4h3oZe ze|?(pZHKH5d9K_KvPIWtE6C6HuVoZIJqY%Foca1LK11h{Wnz4Jo{#tWvy+EfGQ_(8 zd7@H#wX9}kUr_`6l;FR7`4bGVZ?q!Pc(`BbK)``Me;xz)+R@Cw+0}@A{i#?p;(FoE ztEJ})Y}|^h@pi&j=FIt#TY*J-FCu%{X-K@`#gO?~K%7~|4VFeSs47>#jFmvtTI$vz z?=dF1-8g|f8Mo6g=y5vttA+hg-5)0pP4pWNXa?HMWUbL*7L$VnR@(#)1}lwtuG11t zbc)U&7F?x``@Z?D{(39hbCdfz8^<~e$0{4gbF<8#BTTUK)7>rH{XK#U})!9B9~_dFYiCC^Xthnz!YW zn?{p9iol3wx`t`>wY#L#JMgT%o2q=esFd>=7*QQWyIz&`y}p~1E4NYyMNKhGWll&9H3!qd=aJd0-+J)&TA9On)($UQ05R#$*GJK zn8uRl8i%}m>|XBt8ib(h4O>|esiru_RI!<$YClr)lm&IGpxHO_O@EDUGcS)LH^-vd zel&@|L!F0>Ka>DhK0@Av#<)uO=g?;wYCEOeMc%E|;qbf_@WdJMJ3SZ*07d@+aaTi= zJ)&Tc`b{D6w2@k zji3^I+Lhnb+!YnX+6*AXmw+80qX6)yfMaN5iR0#)fE;SFm{H9^&U|oyC4_{PXMD}4 zyd$=o2mxr2WUw7hHnP(tAlpq73-&UNRZWpV_w(-XO+H&+hEpbHkRE{mT@Y6EsMM%o zzOET0GiHoiBSpW2%T+c_zxi85yNj9L=per48uvct6lQaSUR8ssviYa-5?xu*L87wb zjTA4JY|b;@#juB>MtV`6rn)8*T_w(J{o^h7^E{HmSbsl?UGHx+!|B-u3`5p){id@E zA58hhC<@3 zD26+iwmqSe*6szuHyVf@)(tx+nJhnGpN2M)Ek)mUdzp&XXVrYKURBcWXQJQQT{Aww zt-X<6?2BKGkYd>Q3+EOO0~I- zqy2a3W?d#VLf_(FL!x>2=S|pVhh~pnh?^~tedE`Z{|46jEu6LlE03WdJ3IwQSF=Lm zxRz7xYgZKcL(o8AjIvrZ#_H>_Su^}(_X8qij_lUYpCAc&QcKAPw!0AFOYG`)dr0m# zEfEkoL22Je!mT%m_f*xFo#yw+Lz0t=JtHAzY_Msl**-QtdSG-A{!e_$h}4Lg8E$or zZH&rLG7s?F$VbFdi?qzttx;GhtwiZnt3XCdSdd*nM^MyRIr0OUL)c z>1{0aNgwe}K^^oZc2orW`M^=w0$O}rU&RV&n0;EGMeezUl<7EKEa z?-t%>l0C`*65lP%xHrt+s?qtZWu)v}7qHDJtW|tQ6|M0$IjY$*u0%7YL_8=66d5@s zTciVlWsx4|_(3FoOoZ?uqk=elf`*>X=K?~iMV;7x#-X;Hb*CIPM513GjI`xb2!i4N zoaqOrUx!3o%_sluD!(tGCUcp`RfOjXeV1X{N&yAu-X(l$8SN{xn^ICNO(!eD5&$2L<#SPURIYC`_mE{8}(nP}hU=bZabNnvzk{Gh>6=dMe$%|xX_afSC4U?*>o3Q}Dr}~NF{?1u&L<(`yI_1AY z^)LpYle5>-V(`#xgf5$^pE|-TyTAliAs%g5k}I2j8=p3aqigzda2_X$8jcH>ORbJl zI|-eP{*Gi$Zky+K(R)x;iWL68UH}|7e){0CyjmXRcG@XG$Y(5xh@Uxq1(TFGMO$of zA#Rpp$O6EC^t2IKhRp)}s!IG?DA3?HQxiN^C5NOh_Ma>DV|acuB0EIE1dT7PHZr;> z6H$H*Oi1ea$+DS`d+8wMR(`i_?Hb07v!W>70*&&>iY#yzlu6k%5xZ8nrX93-4?BPP z4b^2Ry>auH?c#fyEA<#~GdSW@T$Y4pjE&3ecpgel&yFfo#R`##q2Jn8nySra|?KN;w z#?(|rIn(ec=Vd6>QpdzZ&QE{(L*ULX{I%=r({pb4GTC!(_-c3@+yE$S0LsBBz|2hk z!=o^08aXv}9RLoX@c?&SQgQ~jY~W6!rKN)JfB!WA7r^u4pE_) z02A1W2&}U5&C#OQn28IGL^qUE4#^Nsf&n`k%1kmBW*~eskbZIY-d^^60gBJV3RrGz z3Y9{olNoHXz=z@+O7jn+0kJrQ<{L&2h@gce(xWoi@!8zu8@!Y}UbHMh|6F` zCNq3OiNSGP?@$_<=j0jWA&#K%{9Sw_C_Z77h-6km4hH~^%pzW4g|IGc~ zh{XQ|?e-qp=Fb?F4_vKG;;vl{xcHgy>JTp%0ZehPpaR#%@Y zZcx_h%J5Vh)^8FU+Jo|Mvk%PIVN@G?R+#!ImMRSRX%KEQloJnSrI-lPEPWDeyu;z1 zVtauI$>%x=XwEDTH!6`xC6Osq29+tKbG+yrF^%g_69m$HLK%UvjEGcbL>dc-V=`D# zX{?|)x_>0_7Rft=#1Xsl1BmCUPcn9T;BD!ayQNObCi&BxG|Fb2tgv?C5lA zNx7)d+bKueu;0dQ0PY6FLkN#?ENR4%G=y;PFo8EiEqkm`;~2uUGieIzF^qOEF~lZb zv19+)(&GhEosf3V?yh_Uz+QvD*;-aJ+1O2DPhJ(b*$q@t0fZB(XjTL-hbX=Rjv z)!;MLiAm}Z!~NU|LDR@T{_!^eLp;0}6R?}&a*;|Ff9BnH z-+lAVH-YQ#fB&_oiyKbw)@%RuvQAsi35Q?_GO$>WQ3mDKnfn$Sd1e{#(jnYb2sZ)B zPB9Sz+z@N+6#@4QuowE+3wREE28K;>W{~l8B7sUIDTGX=GPpFhn9A{^dIr(`A{l`( zjNo`ia00_Gg60!K<#@V!&{0BvvN(d~AIA_!lf^M)-&ks3BHKHP$_pfrg&3*`Ck!G) zrg5UuIYDvcfLLNm4!x$tE6&Iu^CL)=HoVQ)sn^P72tgdM_Wk#F+}nf)O;;ZxfZRhvdzLLbX_#M58G@fr3C9QO@w$5SN$G~lS+rdC66lR0}H7s^wnD2{71#}0dI%G)3LTv55{ zLBwj?BUEke)vY{G&NL5KD#?{ba;H(;88m`WME3Qgd3jR;f*8SJOtC-J%ZDrwk;Hyf zHW$a>phDtkA+fXo02-pHVR4M;1V&6eJt3KqmcdR}&NSpEvu`Fb%c25htfuxMSYId<>W4r4A#nZHTR%M0*%0@8jRff7 z;U^pJ)^D4>{^wxHBaz5o{_+|7_K3&`ZhFkKJ?#LVM7NSvPaJ{LpNSoGd; zQ(!3kUv4lKrds&KS$Rd-dIs5x{OklGIG2OuFwiWj6N`+aliX;4D5f7s06Nf_A{yV1 zCJtlzMkqua&ftm&bQX>ybQ1-T1pyxXK-a(|Mo=;{IGGic!~_Jm2uWcFCenGqZbZH# z*B>92%oYZ@2F6hmvlxY?!VCd6;Un$Lzvy582zv91S&bIFLd~*V#iCrry!f(7;is^~ zceQap|APEyW8Pn^#TSvDmoWUxE|iZPiJv;twAmzen$9~IbG;xtlQdn`2yK->jZayS zKRe*xvkCjiqFCR)8}HnKLD!kvH5=LW+Pe(Vg(KqNF~5)ju6G-XFzU|ghdXpYjR&ku z<~*zbZjj{=Zk5tbYDh;VxZMo9-DJ!COxuH-jtAwK?IPr6v-eE0V4eh@^?;Yzn#I^V zxa-?HY9LT*wg^odq640ccOl_iNLUxLt1Fq{N~gGSSp+r(FW?fqeaIp&5}!}x@`&EP zWD*(46*-5*(fuQ+0g=?e7zH{+$I=s%7^$hu%oJvJGV4Y%t00kH5znlQ_1;-qJAIs! z4W~~yHvsBExg^;NbhzM|RgXy?p)^ka0d4?4v$wZ*_l)3Y-hTU+o0}DnrU*|@8^QC7 z!Eqii#`^WIfBh9#S6^4W=iG444X57{H{4UPs;VC<6S)u^9Q?cA{qBO&kf^pca1AeN~#39*Gm}>45Z{;0h;}vGb53=O?T5*JM4jaj4p;!Pt076Hz$j)?k zJe5qL(pZW$3$I`Xhz>k&g0r(do#`47&JIlwh$Cp;kt7fo?w7zs1}MUC4+0m(@N{DP zV0|M!!jov}1-#67aw^oMK?hl{jcA0R>b30iKZIO=Unl#0-JB0}i#{=^ylkHIk%8b3 z8ni!X6W-SI_#>3~XJhIIHtv76cKVYo{!_gAAD!$><7^C*bUvmVyo1F40T%S0$&E|! zW=qG#Jnqq$*KQwwt1EgeTHI-6(}Qy#@eUmG^y?t9y3xcj7h0FGMJLQ;7;d)8b2x~1 zljl*SneKZr$n7ZWtys(bT-0&ByIjJQkMras%v)pLQ1wr8Gsrm`|f*-bzC9tvMy{Jj;58#it|uxie^;q;sv9uyu2Hvl0mEzQ{27`zsX z&#)%~bm1^Po&D^y&!(iX&tg-ln0V{xhy|^TfBMs(wzakW@P}`J+_1d-6mrAqm*{YE zB&(o=)|m2ZjQEu>L5E#PjhRoGv6sTc0m4o(5T+XYBw8xCA;OX$YR>hy;CNeccuI6o z2tpA9^gwW?5!|T?auCvaV!F_u?it8%#vxJmc6>eq;D*2iW?%v%Ad&vJ+yF>G0^Kj3 z=AS_KiK4qR94H(#&)1D1atu#lT+iVpA}s5m$PRPoc2lQ%4V$XZP0BABSAJnqbJ?^S zkS|PYFPl|fHp{tal=cZM{6nbU2N2Gmblv}38~a;bhyR8_erv4p9uD@dlhc1ecyB|} zzJOzXxktl{qY>W0fbUKjv!93Qqq2s%K5fpVL1)^S8>7$4t`lZF zg+p(8;`hVdkK;WyMb_J%)_cKrM`d_fFI_gmmQ8WxbA0(M_hdPGXMLizcCXSw%NKZ?mXU0)0l33Ld?C#RsJCAdD;=U$x&JB-C9*rBo z5-Tk&y>R!qDH6#F(82$}3Po2>Px9-pzYO-32n6EvYqa1wH=Lex!-K-N%MB_jUvfiH zQIVII7kF)9q5kk2h3?i-?H~T|>yD0?hc7y?j|vO>@{zoD?b`J8^lPuZc9$Cpo=R>2 zDp=WKW@@|3Ex1Lxx2vUuF63xA1Y`r3^1OPV#nJcCo zJZ%){z=v}La1O_sO@Y(h94T&iD$#>V=g^n}I?I#J^I>Apj*bWvl}z^ZWd^Z&DV z9$->iSKkLaI58#0>B*Zo$(Pr59LKRSE>u%?f$e2_?^T*!H8UD%dfDD93rk;mGsc+S zd+(UObkpmCjpw_PU|-w_Wx?3yKL1CLb{IxiS6Ao!?zwGw5^qh4XibW6;AX8BuLQ+v}_iWMKb;5EY>H})1?-CcNY zOX206{;NBjS9UtC?6h9pg`EBb*@^kKTEi=4racyXw-w(e*KU={o1L~(KJ$e_%jtmf z6v{mt7G55qzOfm*u`2+ep=FQn!UpG=RpwJGkV^*_o;tKQ&NrL-iHw;_W0pdfp)_PE z&1|hxYjo&LF1>{k0-K#01M#6sPe^TWXmoC?1x4+FfWwCyqj76Rt-q++Q{LdEprNYT zT~qC@t9K8pM?b0!F06`do;&l4&#ASZ9yvNZ<%UP62jGTV)2dIMdiB*;sckggd+)U? zR~D0_G9SzZuK41M75(~kqawrLXHR{1=hstiNId0+Z%b{>4YyqEj2SaN`Q#JwwEe5E z{;j2D(S6qQl`zaWdUX1|k8dDLc!NTbO=SQ?b_wov7ah8P$7q_r!OqE zS1#lR3cCxv|4W;k;aw#uTP`e}k{cSG?H@yrkKpDF6UL}`Lj@6X@Z}Nc5TF#|``kR# zDsfpLx7AFHk@Vmw>PN8CSA5HiMJw|zm+cE%8;*36#|A> zn=aVKmG0zAwsS$zHZ-7oE2$o3lQRxrxp@Xq0WlHJ@#%xpAQE%GOZJ&gf_J ztcQn-u5EL)EOuQSjeR!JabXp*FBaYc``0P$yDa!FGqxMH>^ImB+H9v#>lruPtmB-p z@y_LouPwB-Z1%Qn^0sX7Tv%>7wore3iRI)H^VtpI&o3U_wsdvA$TBieFw7qdN{!}h zm5HV{i8LmWN-t3wq-v8)YgTAYa*bI5TjY>gqOnLJn;doHiNUgImR z^;S3f%IiH<)vn4iq`cT(SLU8x7M|?K4sF}q`WzoadddxtP7lBh)D6nz@^<6|_l_NZ zv1(Pr1NhWCtmNi!SkoH1m^EwGT^`C)Zb&@khHp!4%MGnAcH+c|z->SO{PXSGw+F}V z+s7mlYwsuIux8n^>T~DjBoeFcy~5nMv9w#aSIN!j+ch;coj-rRQ>RvLnEq4chTG40 z>(H6=xq-*e#YHhSkSy+G7ZG3Wd*k;M1`Ds91c6%Rk8Doz#o33^(kp(Ciu? zyRx(D+K%Y8&Ec!7@k>iwTamQ0b;?t?_HdHn08@XULA%N6*rLHUsGU2__%4%YpUK{A za~-f)PoUOQcIXJqKW^n-8K!Gl=(xJves#6u+7_&3f8O;Y#W#+X-Z)lr?O^`p!_)Vy zoVRsm+2uW>t{(nq%bXD(RRqW9`^rODel!~NdHr6*ZL_$nW-F{hjA|cZ^gE51MejDi zP95a5=zV@7-jG*duc-A`4)qNk6|5MF7FXCSt6fzU?#2py1aAMNuJ%z6X!zFJ@{}7M zh3=OdsIrrhYRF8=Q+Hzb~N!?&eJ#0}%d zjRQYRRRfS38@=~yd~V)cexEZb)*Lu6xl5N$psarJi(l;9x9{Z1ljNlCr`$k3<73OV zERse(X8Wp=4J9cS{|6eRm(1B)5g%X}k&TU`M;cj?;k+3Q3um)XSy6R52D8$W`GVJ)Qg&7qn58R$_;k2Em(*aG&8y>`mXSe9g(YB{HHb)e|liY*m<)mn(AstR}CFoJ+dip_(=cI27FjUXyou{ zV?(&63NJ5l$Ky8CqX~ozm9_rbM*r}!{$XSBx>4Txk)HAjTVu7aA?jGaY%ci(+tNba zJw4@yN2mMZ2I|%h9z6KcOD{e9Ea3nK4(xvO<_hws9@zOU>CxkL>gsR2@djvG-6aEU zTf9%X;qK|D%nbtvzC+#hs8OT9&&p&n>Q9r2{lE*T-&;X}U*0rvVwk$zE3do)?yI@E zxl5NW?TGaxx{zHSKUHp^Lfq%K9$c?0S;!BK&T`c;eI>b`yewxh)okk{x4tVb7;G8J z@Hemm!#E*;4zliVn$%M3ET z+wMY8XP_93RbzSefF3+R`9zEX%^P-01UJ;-1$BO)!umj@0@Yb0)eXV&1}r~rY^3v7 zF`&h%l2vKa)oIcd$^3OWitQq33s1g7B-Y4*p*Mi+v=5@izPc$mNluIQ$fv9ss7Zz^!vMO_lB|0 zW)54YbFEi6HXGbqV9Pexve#feV6yL0LA&Mh-6GC0yYy0tp=G%F`Z!C=eCM@|___I( z&!(x)&4n*-_MhL^cxFTV#%}!jTJO0j?$e_k7e98MpJ%d5-0s);l6X3Pms{m?&QDmHn#Z^rDvq^bCnX~C&8BBMv+1x1FuVq4WvY{YoHu?ZVF z9v?f|H)@P)+?dd)3jc;B^AeAK4#ih1%TsQ6RC)kzpwVdUi0!7H`_-?0xpnK<2j0Aj zH0YaAR7yG2XP$Xx!GZ<<^>IAqhQw2D__p+jxMAeTk>F>`%gd=h`}BG9%P&{l@y$o_ zKDHVtKA)c;Nu3%ONpi!qpDs60&-ldFPuE&Y*D4Dp)3J%X@KBb&oQ48=D9Ckt2J7JW zglJ!Nd4>z1!w7z~j^Qt71!8m$mTh(A*ioj(%ktt}+{JKPX^5SNIYe%U6hSl|i^!&+ z%V9ADxWVqUxdR9u^TevKLZWZNlV9)68|sbMqtQA~Y^Wzb6wR;m71a9kYkhzld__*J zQCeQ(s~Z_AEV5TIxXYN((k$hwblLJ0$)bUR^>o!%zIG!=z6ppc|5T?r9@3wR89$4e z4nX1qO5Py__cX4&GShNxz4`h^!-ch$!xIB1i|v;dyINK`&dswemhyKFFm329Jsq)} zL~T2JNVhSxy9I`$NN^EkTqCz{(IZ=7%T}FrJ7oG)YuGMRZWT&)2>B=MnsXuD`8a&3 zM0aJf>9YySBQ@+Zv$WT?2hMGd@1N_tvKc==)6tA0M>XL+Ip+Od==^xYr3tQ!6OoHE zDFC|W9&ks!f5HJKbZDw%h?D*L6q4H{E zM3ZmSMBmhzp^1~RaZUKRrts)mzzy>gPr2c{(~p%Ks8Xt}t!;O^hoY*={t%m25sqZZ zl9~=3enxE_ZnxVL_u%=Sazo-NH+)-aYi{U%r;A;^di8V9JqI%Xt6#nF04pfmr*7?z z(T>;+s}Nj$|Ni~0+;HT`wEJ2@D)HmvhV63}ZSYlWHkM4KqZ9ej5xht(%U{FtSFrsB zbkvt_u?|q1d&%(rP*twCo)a!-2a8$05Dh_d?T##qGu!TAx;+e!F~efWvRLUhE5mB% zI~*dXfn$J$u-#&}I4m~AYWLdwh3>pMEFY*I&mW3MYrNrVG*Uw-UXAC~;PGlKzZMUc zBCepJpaQKQ5za40^11wv=$ZxD%Ef8&c>{&>l0+Lh+N}ZzK*MIPd=p!~OQUM`z{h<0 z16Ive9&d+`yI(Ci7t~#h7)}={PK}UXnB#A5h#y6amm1C2Cg^X>GwgTEx4#MR8Du;i zv0p6l9P?X`7P$5?O-GYVOZm`RwPTCkwHa^&Y~2Z)w=3bDDrl=nvP&X4Y1f}}!N*PN z&rr>o62*}s&Z$YN3!Cs$D?QC~Y^N5wug=4_D3lxD)Es)>zVTK0CK3O9v28EMu#aXs zZb0^6w$IlLUHZ|mi67U@{3P$AdC^Z6g+5sn2HY@z2{;dfKLI^Vo`H{>fKQ$t9x>Ke zTxp&(J^115$oL89`0>H9N)4{Wrh)O@|J|c2hYy z{m21g4{}j5x2rB*T-dGKE7ahx?%lgzzI^#xFa79>_bE5rJ^hrqVZeY}+%R_RSn#tK zE?ju&rI*?fE0R4kdv?)-E<<@w@5Q-uAOGPGzX6vbSDIRsAma@$yzrD8$aL+>l?}t1 zj*XwT#Zxttg-_;1#tP!YxZws)u$mL7yo z%k9pwA=wsZj>DOYxN=?QOsgr=WX?1iGV~U@#m2GexjMZ$;vBG91@N23;B%v{$PDyhb7Ec)NmPgNA$+e zBIcHnww5V|GexTPA8>bY)E7$8^TqCqWu6Q5*h!Oj!(io7iFrzvY@ODzRd3%4Ted^y zZE|R{NVSPC+oMn)GZ~JW^v$sLI0B!Ds80=5UfSS0z1n+mZQ$yr=%q#Y*$VqUoo(Y= z>OHR;7r)6p=+PcF=nuTDKmERKV=u+wQcq&8@8~4&v{}I!^J25-MdvIGQJS|TG8br3 zMz%uPNB*ZUj3N@7a)f7 z1?Xon_xW1yk&t_-2-T7zHN6uq`3c5w-Qy2f@c>N*$0_d&+BIl^5!)ukfOaj$J>x_s?>>`yJm zt0TRa8mt%VOw-5Prp}AcSwL|^_>)E9IrC$)K8{bA9BgQGmsD7b%B_`kuF(@>#l=3a zN!>IWpFTY{X;Kt$!=#4z(utEEa~*ng+~9CH61VMRTctK}Y*fqR`0<%bmsYJ@S+`aDo6G3K`xQ%@fLTR-+W71{o3|x~Hw%g4P#Eu|{3-KUUkJaQd;Bj}P z+)G3t05p(_R$+OyK0pt494fB%mDl;rYOUmbuDu_(EKN8rLoy>vKAWakkS$-9Azhv! zS(&X^$5d@#E4GRhdsNUaiE@uZeb8XsD^n~Rn7M_)+R0@dG#hu>9nG-epw_reA~@%R zKXbun3z3WE=x099K?c4v2|ZwSpDgoiG1xb1Q0za0%lmW2W+`XTRSTK&kJ5!p7>W%d z{VKL{haB3af>$#H+XT|XHr3go`pdJCtLwv;7hz{=ET08zXClbii1S>&^L!z4KF@PD z>Ny+3Pr99lEvEAU{GwTZBdFi2=QIu1P5dM{^W)IekAh88yd#^C`bKA2l{H>y2-e3wd(Kr~78l z{o?$w8>gF3P5ES@G&)-t|43RgO$IsF4NVIR4Y0tV zH!DvDz@QaCFmSu0w6k z4IYme{7#oHov3)=!>4DTeHI+c%gYmFtT@p&+_1H>(%Oz}O;W%AU6-cTW*#xZ+veOr zowT&H^z7M_vc)gI{Fmd$XFu?84bn1hgdhg#?K*Vq*m2F8H3`y1-MnjRtqhNj8-NF* z9v`>_Wm3V7|K~q%w0nZN;h+Ef+cVERL#>zf@QL(`?K*eveCEuV)|%Y5+yHXhvEwf~ zbZDFRj(UHELSf>z7i=qT=-2NSH;f-Yo~n4qj~{>W#TP*l0eW7)e)t1VBf9%DZE0Ej z+G~HO2K`K!FoC*sD>wY|mp@f*pyCb9D>pVTT=K<*W7kg}JU@T=N?p-xZfKso^h0s+ zOmWd9QNc)Vgjnl|<%^T?hG336kmL5|pza*6Jrl8J*bPIBioSCGyBu~mChr}Nysu0@ zL~qY_IA{)AuFaffHf0fG9RNKT06GlOXmVgZ&kE5D+8n)_1{wG!o7U>E*g{2EyatO_ zVSzFf&_mJCU|uzDLACkiSWRQptb^G}LQ#f-mmy=P3fL(eQKmqeBh)j*PM+K)P`WrW zj4jP$$!gj134HZDDYRM)uVW}zrn5IPd3!|S-D24mgL97oIi$1hQ)-UFnv-7Jg(7dW z7~9`Hez;e`;dcx64Gta%VpG`4c&c2{mnG}XRP5%Vq z7(Z^qx4o&`_HV;symDNXCthwZs`i%D`tmFBXek;k_4*6kc%BQ3AxO|+N9{(~0E=Lq z0M@hRxK7l(Btz zBsYXrN9s~Q%u znsoFbd}fY-p3CE~MG~1p4Z#MR3BjFC)M2(;Og4+bWY9udNTapN)NZaclq0Gc%xg;G z&Cd{TVvDvZq1~R)E(^L3wl?dGM|JwsKKJ=xaDQgtz&n5&3irPqKg0@eLcPY`H2EN@ zWH4WvC6uwmGPXp{m8ykGwLq!nD5Pn8QGbTG7cDlBwGM)JqV^qb#}34Hz-d0@vK;d{ zPx!q1Qe(USlfUnsf_?84g5%C^fh{Sv6Bu?n?A@y~@91ehKRCK0#aiXk6;%21%kWSk zhDN+-)Qbe&HVknEJg$Jti94+pvr(Zp@^yNl&ccOE97%0;q@pHTT~{=|vUJ&`X^9`~ zdC>mrd^Mdr zcW%Id0n~Teh#PVeq(K2?PMMWOixz=a9p&bYMm8|S59T?@+h2X+#OzmI`Aa)8*Z@F8 zDwS$98esQT-;va6wL+oL>-85dT%c;gBjN_&%er^(4lV>Pcju*n6McqA&puJyK)Hb% zHx`>rY&JVdAQRJ@%>jfZ`oujo0;uiBrjedD_X29Tdb;>FE5`WDyrv&3+dhh zCRQN`7qk8TTik#;0XO8hZP{*PiWwRRX$EN2Nm^x!PMW0R_Yrb>a0R`Evi>r4vI-so z=~BpHk{Jeo4)TFARf<}js)bTD2*>7DI6WBZE5QQgSh&&`uOXIJe1#;S*R;|@0x0nS6lR)Pd8FX}EX;GlGGFn+%Ft(~>)y$6)KUi+KtySRNdw6~M zTj6E7|==M!+7(RSB zRV%x9f9>n#S4s~5Bh(e*mSGi&GPm&u*(|2O+mNT>F9h>ptzKK)L zty;CqSu-OYSt=`BAum{^DqRazuhLY_SCmhc6j!kWg)FR+8z^Gp5gO(uxdDg>Iplb( znJ!bRRhz6=Cc&z~M%56batJIzyUfG)(7%yd?vR#Q0U383>hGZn=P?e91gD|R1zqv%PXvn z#H#~&HQ;N(g4$4VL$qdGLH(qnhABmL6ALQF18{?bkCNJFqc(UWik0%}tH)yp)y}C=M6_wwQJ}P*C+ke#D z&?eQGys0(2caQ7bxg*65r%#`5t;x}7wC%W|Z8CiQ`eN{Qgv07PxdBiPs2&O7h4 zAva8%I5BY>p|)+?Ms9HR%!9T2|Lb+X{BlL#zHd?2t*WYOy)J;}u3c|&!@+|;QEni| zMQ+)3;^VoOmoGapZ2Z6R~H&BknYKYaQEf|F7xr%FIX1$J?DuA>m8|ny_ z1Og?#V3{vc>j#RG$Gl;o;*rtP(b4j8@yZE#)f4jxO~`kLT@s!$IEcTLV_m{?Eao{j zs$BbE^I@a)m_K*~aDy*!#1D?4GttOa6g8)-TppyZsivP^h{NO}xBxcL+WHvm%w zd?#>h->T=HdybmTICA8}aszcCYJVxe-@knM@^-iPsKhWiIq~wLd`4xZ)#()AxXkO7 z-d!jv@%iKi1C#3O1&yud=4PsFslj@?cI|q2ZlHRUXU@#G*?2fEze|2R9?7Slj(-3@ z@l9?hDk^HzctabADu>;>n>u$U;tjw4^{+2pyqI_-@rJh1jT@lSZfoTRl}eSkBi``d zd+)V5H&E^VYuB#5{PN2bOirF0ec*vU-*#QXlAJvG;je$)6>tf7sn@Mr*LqXm56|a&PZd8}AhijIqDi&rR0d7#;lJVBb5VJH%CmH~$hgcLz24QcN zJXxpBHtV@ItHfs05b*{_(CY|ct~idDc*B)=aYM)zHirw*n#LlXMw8YvS0+$nc|O1m zq4Gea#viQ@P}~r!4MwYj;c|btJP@r8`OExxDZw8|#HoNaLXljYt*pqps|WKxaOJ1)6a`KnEajaw-%WC5823eOZ05YHYY}Qm}D)XvB>0$Qi-m)BFun{52E& zk!oCTHpzKv8B;6G)`(M;k_-)}FDKA9Yn#os%jr7c@lxn;7{{A^w{!@{4`J9594pP{ zK}?xdZ}GSh4E4BNPLtVcG+2xltI=UII_!3*!)diUO?CtrcD>V~bl4rD0nP-Rzjl+j~9+VrlR#aH2TL9OhC`2R@QSsh(L=fUX*RL-lqp}a_;Whzt4=(p# z_2rja02>@XKBsHf&QuHd+_`g!J33HWXS6@G_gzLVnRPF@TKzzKcP;+`aYLt09YI|J zN3mF}(P%(THW&;&di09*mfJ*z?Yp+2NM2(UG$3Ohx_vEH)?H`yMz_GR6KWP%N zTutlhQd};V`W&Eb8(JZ1zRAZQ7eDh1@v{H*uYbLADS5;J~DxE;pPz zaOlL`d8Zf5J5n?HaIj&QrFzB?+b7xRMs4|8Me#;W#Rj-$jkbEJs$!0;Y?8RBo*%2` zMasGH2rGu>5WN)+YKQ|Bb;$9Ub3Mjfk0!%TqJtUWLaSuSddWaoJ=m;BHu8HYq{)yx z-Jqdc%rd*x;Iev9i=W^IX9PzJu}BqOGAx9~ETMc)d2POgC&}o+5T+|2wZUSyqXEoU zh~dRvtO$2SP&)aBWOct3TadtKhW;OO@5_h9=G{4nZ2fd_|XaiqN& z2A090wP0=(b$F~sn^A8u8?0uX#jLYh^j14;b!#2C7IDZ-TDDS_CXx-|%agg;VGl)EtMgV|{=A^Y8!jpO=68+h2jZ=+J?f zAxX{u{N*n@tzSQ~O&kWu8+dB>q8m3BClV|7?VHf0OJ~4dojP@LI2_?{*iWWNBw{j| z$cSIy3n}xq;y0zJ1@klN*4Gr9uwW>SrLOruKZW6{_a^Z=TDYiOiB2es#T?766&hFWQ;A=jfOqYkYC+@Kg_1l#~%LOIAJ z{y-}m49n8=@>IP}?6jD@1`lS$ytWYTh~n--U$oi}xB)M)dqd7hK|sP(WW3Md4VDRp z$hheeDHD=&U>;j35cG$_v*u6jxUl*M}m@r_4+|<%a*$DryrNYG3Kr1Y9pLUclx) z7&lPwIk?9wR~B5odQWWcHzQ>Wb>f?pZ@gQ^XKP0@G#+TT$iZubo#BiQ2>z>WRT<5 z29PICP}~48djI~PCO44j#*ORyKACs&!&%1~#vRFTZ1#=VAj_YYWLzgJTCXVHpe)@8 zRj<)juhJ0QFil)MT9`kSAFJdQ6!QyXtSFv~yR(VG4S*P&bPUK!M@>L96v}p~(yT-%HcgvMgAVspX}qcmi1&ulLQl3>P0fsDz#;m z+PYL>ox*|weK}AczCJ^%$xy0O73wrOoU1m_wHB7%%GO&s;HcB4OC$r>yk0aQC`Dq< z))=$Y#%#5b3F|n7j3Rx&i`5M+t*I}ntSGAS1(!{pk$B1t|EHDX#`#fH5)7&W0VNPP z!7$lxfBPcvY7gz$R**x`?@=or;w}I>fb4Cv+xZkXP_7~+r3YylA9tyeW^oN@TK(b| z&wb64J@*{3ngz(;Lu1+F&<}ze$PBDHefpztSnc;KsA}xoPN5YMJ%#|Of*_U%eJ9L#BRHf_G6{|H>OH`E~NlPb+ zi$;kGh700Vy!>*0VF4#E#EkfIaU|Pg&-Ier;I-W527Rtalj&4v*nw2(b|s*PG@CS8 zFHbfAZh*3#I)(?PyC5zCOHf#e!A`H*!{{dv zq$*{ZT3Hq(qv>UIQVfHfWl{+33b9?zvxsRr016VOju3+o_!BS$#SK8xG!6Y-CiArv z<*P}>J?W#=>dw-rdTcuK4)q0D{Xw_Klki!Kd9<9Tru@l6gwG#p-Dy3PHr4|lR z+#&{`ff8YVAtgb7A+Hx#-kWRa&2fJq@F&SU1LdZEGQ&WXEe&?&7_AJQm9BHLO-`24 zM%Ni~RO&RTbRb{Yhsp0rm-XX8Lu7_@6_ldTrYRx74NSd(XMlM+z0`=|Sb1GZRb6pa zO<`?GQH?jSa{7;89%kEW6KMUAedOD|{Bmm|v7@DB!>h0UjSNEk3^=w^r#=LB8Q7Qa z-9y|(WO;73*?1H;Q2S5-a|~{cY~DTA%(Qwk2%^j7SxRL#C50mEkAM6PXau)KIOp-) z$nqzj8*XbQRfj2k@x|su;=qR=mbbe-HulNk22#|(l-dJRaRW$6NlDZ{m6n>?WP>Yeaf^)`8-M=upOam0FSgDe`WiQo z^EhUZ1AvHplCx6(`|tlqxZ&1P)BE@D9NBby)PxgNqmRZ$9CXz#qN8&%obER~#sB~y z07*naRQoM803BAzOP9+_=gG>ZNs7mc3Py_yM~U+4c=1YJVIem!%!-Ct5sc<{(l7@h zuZ@P8>6j6S=GA7qG?@-USx)lADIH{xCmGa3EHKTbXQBi*aGV4;XfVAiV8s1;l;Q@z zr_kq$p%&a~K^-<1%1M{8`isOFYI&|+o}(kUfoYU8jRZIFtO}7sA+P~x5N1K593sph zAw+P4hz1L@32u;P!E&HXNS*LM8nA-t4-2fW2 zc7#9&zzt>xkXmn3hybi;r8JE=O(jZEiUupigA~$1az(0Aou*a{k|_sD;3OG5KxRl% znUXcS0SaTX+B8UQ8ltwOLC_GXGFhk?D3E->;l58}zei_wr*Yq9h*TBJ1ZaOg!a=J9KBaJ#ew0 z9dP;byq9183$R>(8-RN_d-fC37*SL0+pJHtZADG2uv&SPd!x7kMWv5u>%V)`iq_pe zzx;ANWzc)|A{v6^CYn#Vp>4Ce?9TBVEiH?HGn+Cc`lNCL5gxcR!{p@5)zsz=WQOmR z8z@LUc5KGnxupvhlrLOZF@Jv98*ltCwb=8NDN_=+T`V=k0YMPT5Q#*oZBiiAt5;il z>u2xW^y`+dJb9AjhW4F0bsRGWU%Ip!#Yx zs~>ZsX6*625r;f=>!q=&1NFO&mHTaVYn7$TWX1F3Wm6=@V?+fbgn2;21O?Unq7q&~ z5idW+j)z!LoDp)-0X^Vmy3foE8X0~)4Tp0)P>vhU^#DORE`Sj-Vt%Gsn`YB9Py-9o zF+32{3GrPpjOhS3_=4Si^K+z4*lwH+CdtM&c4OP#*tTsO8)IYJwr$(CZR4BwJLmip zXKH@vshXLZny0&;`@Z^u2QE{vy=m68_KxGBK6&+|A0c@;TRY#sMcrT0%Oi}tnj1az z8?dZDtYCAs$5?^)J-;7*IH6nNxOwFP@T^E1{C^-$h=5%l2p`xeBXgWnBPou8gEVL2 ze2%N-gG%CTS>uX+h-8SHezBOP0JCVjsOfwkE2axSWx0xlLPU=Uk$aLxR%vWeneZs% zEPoip%lnqs*E{YOm()~xYl7RR*+(VhumpQJy~ zp@pA=1NJ$>vDAuyxN>N!h2OC3lKG~mc;tw|L4oeKcIrAUzp_+Or|G7s;GskZBSPFs zG|>YoUlMSx3<5)=z=0lP!zk{(rK4IDM9@>apXQac=0rq(6h63{@N_5UBeo%o*gv^j z43P}ZF2nbvuCB$xX;ao@{sMob)PJF8bSkg%3aW4m>u^eIaB4`|2^JLq8e`Px$Z3g2 zI{X?r9_X(y^TTqP2e)b4?z;iEp=SVtYLk`iC-g&TwTZ*o;J-ty=Ujv}<^*Auo2%W7 z0kH$Ga(>gFFol1ukng72zNUR}hqYTVpX1o@2&8DFASq^3%>w;1&n&zGk|Pu?BQsx~ zN)8yW(8GdJAq<{F`QL z!!ncl_vmhyG&EQR;nisT!;$3sj{@(Yzi(j$;G}V&$mVfMoNBaq#K{FnafOtJSd_-m z*am&{n$V6;oLMq-^e3dE+?W6~^g*hzzCrg`Gg3Nyn$V!}+ntn;y4?F(UhpF^^}{f5fl4@!r6Af&G`g!B&1Fs@HeA30tH0-)!6tjWBF} z&RaoV1Mgo+<}GG!}RY4*!ML@a1}K12*wSe+{WD1cKguKV`Y#jDv%NqPs}} zX$M3fGBxTU9A4s&g73xBo|Rd%Dsa8Im)s`UteOk+O0zu->TI&}T|!!|I*XiAJB-o_ z?AZCBa)9|;guW#?Epe)9V|2fc(1ceR5`H=`3DTO3NC<+*r1C>|(_+jYdhBYUPCW{8 zlu%9<{c$asvxZ7iohF5t0Yz`o$AfASIPRa+&t^Gt9r&E`%H}y!)`W+G2x_dl=;~Ow zVg@o-{0K#&Wki_D%%BKqHVblbL{^*C1cb|2dI(1XKo~;JJz$VF`C&6o3gJ6dtXKqS zBeuZHFJtD4p2p(YCKM>*&{L+P98LbgI=J+qVF5x9==$S--w z^P$sM<0r2s51)4J{od|gJKX~CwZhT-20CAmbGiiW%iqEVzz6pH75L%noC>)$D@PVf zPBc==nSKCelC!^9aL~}>=L~``fOnxJ9PTbX9c2-FZ|jBvdSv)v!Cf$JxzneAkQ+M) zmug*{w+T31_h_H6Y#`5~G*uwc3s;KdDCf7bvMsLOUmWaLRiy&IPC&aZfe667?)Tep z_s8Kg(A{b|U#!?y*XJsIW5!>KFrE-tb*}^MM*s13-w0yhCO5whfm+927H`~)ERt`4 zSST%5X>XIxYXaTnf%ws6Ivd^X;*#&VjVWOi_MxYrh+>z9P$*GP2%@a_E@Xg6mhYFl zMd{K~6|5b%=abbxe4ek*Yu~{0uFt3SoG-b0aUYKj{-X2cDqLK)OE>fVu$Phjml~m~ zw%b4_k?Pqc%RARq%BEBaRiQ#A;~+rB&CUA84B@Y;BQ%v8I7v512A7HPaE}%Fzi9R~ zedu6#HnZSZ&ON}iTd0i~o8_atxL%W#t63&F9+bsdc|3x`76F2rRKXU8(Dc3*29Ri< z#6<(UyqKlTb=8D*&6qFz_)OBDh2@8mw2DL&Q(K-B+*c z9k=%BJz>;3gEqQ|eSAwnj0n}qrINy+Mq$Xt2Lcb~Pz$M*$?ytd;1j~3#RmvNe>|c{ z#v1|4no+%0YQTYddCsX4b$*&@fUrtFaxFh?`II$_n-%#2g3&Geg$_jTh7Q3Q0CN$Y z4u7n=Y@)g>6b)T0FxKpsg5O)Pdp?Q*8lY-t>oFY}xNq2~>`>hX{}*=s-2Kf1jErPL zoiKTdp9Dk*Je8GQ6vV$nE8;!Z=b4tQh)u!mE zk1z&S88XmBRv9p;;9Gj3X8h4unS`2W{6%vDV|SMiJy@`0tZ9p+T+c8|qIaO^a-ain z2&MKP`dehL`yYKac7}Vydao7LsZH35e zF&Em^WF(XBll@r*7mnohBGiz@zx2k>Pxf2YoOtE$JEuEDM1q6iof(^B9-xn~6;CT7 z7lF@;IX*#S4vaHbsGXo}5}9nYfN)WZ#$3QBIbb+-!XfTtu@zhI68B50f-4Y?48h!bm^-Q*Y3VO4!SbrZ;Lf$JM@z*dpq>QI$$rD zFly8g#jfxf2*-E`0sV@u4I)ae0>i^$&&EIBU5j;gp|i@RI_)uC%1vX2!3pDDuqztK zEe@~cehGW4=jW0kdv?7TkT{bRbQvqp6M(Zdyx~}soTLKMWtW-p5CMEgmh1s674n0U zTY5uZZ0%<}vl7l*E|RaJ-mk-VeHuyKS$HMKe=S-d(KZ4U9L$U|(C%yVbYyUiMKVd* zR@mjKxCCbdW8pbYPI>eC1EPhTvbiS(?7=658>vyz^EhdCf({o!ijqxs^2Q6lvJ% z+tA_vz*TtQ?TdV7|GjC3bzOJb`S+p=S;ZFS6A!erJ!VN;+*fu0#6x%DK(6_T{7JEk7wTTpm%x zLJ>w9PYi|LQ;=0Jm?<+X5kTC{rFSN_>SWTP8XF%1WuR_h1jwHKzV*$s&hk>iV>%YPq_9f>*SEAVIc& z$k>=|cX^wepZbGCvs$g#z1--xYRMR<;KtN`-%tCKZ3>=w;s?9|g_r49u{b4c55W3F zr+Q%*i7*ri*Ip|tb~b%2k&BG%khUrG?gW@paW#_*ZA3LqPN0W42FCTA}d^XlfwZ|7N zr^Rrh*`ciXaPaHLC0JXkIv;c-7)$!tp8We? zKpL2z!ZkgIUE|2)S@G_}ayWE41i1bezZ$*V@Kw3NlXA_wB}AZ8A8MdoXjXcK@uMmO zZw%Ah*oimk!&Y;`yime@mfx% z9f-3goz6;Q?>Q~noDR#_EGy&FT>G=KqCr`C)xt8mVcGu+1duA<;M4<2R8l02b?Jf+yK&ff^eXRFb!jjtkpkCz<|zbfTbyC2ydY+ zKqCM%l!PA|@-MbjN2+s}Fnc64ILze_UyjK8bwConnw7^JhXHm#q8|G2=^Kn40_ zOjCqI=>4Gf2C(Xg1O)vsV?gvE>Cgwf1iJYyp2hNV-E#T5XV9qVL+%^91X*+lokLbl zxD}(0ONJmN(EnvNI8L^mt5$o{IiHC20948o_kvBc%(t(eLP2MYR=d0>;wJ#q=;sTi zirVcikm%uQNlRBxN%OEGOZ&L~FDy1@uAT-wi&z&<$XrgYR~xBy1qn=YNbvlxD5boiF%$x>Tk+~Etz$xJ>YvYQHL3R=%M3X<-6}PC zI4RJP8(;z5&SW~ZaX5#{_Tt4m=FxD3yMpESxPvkD57FB(d5VCZZKLCLaFWi(%P0Tv z9!K?$hgs{NU;|ow9tWqfkYO=dE(P|o)or^G?e3qHl%Da*Z&1ZA(l!se@E?bfWrzii zB44R_X4X~JNmw+d!?3Hhm^)C6TRZw9+7+SkXtmqX%niY~kry@oC`Ws?-1|YC637-h!F+U6M<3i@1WRSIRqn_j_ilG{0tzKBMl&8n81Q-Gq5Mct_=Yr$DJp{ zFGV}V;#tKus)6`}%(-Q{(ryf7VP1OE%s+}Qt(|6*b}57rSQhLqk@=2n@UswV(PCho zT}wERf(Jfc)bpLM+`i&*4RMcd^qADkW^uoM+)p=i8t$b0fm=ifFhks!@W=_!{$jDo zAzxCwg`&)uU;7|7-Q`pOVf-Anah35SheG%VL|d(wG~%P9qplhqw&Y{;zAw8W-1xHPi)A*&eBZCr3t#Ng z(*6WEY#U~+Ui)i7NmR#Gx*W?6uMdyo$2LB%uZ4zUPppxAT1>gEZWA`1k1mFa?YAV~V`0vQtnmJp!71hSvlcQHf6?fS#PTb|foKTHg@Gt;MoAz> zv!h|{jBIyrW$ZggsMYBjZ}iUkjA6XHT*dc8#^&S=Bt_| ztR*7(Mtnu8O~xd}-n$WX+TW8zBMo+nj~S@o6e81DdHaP}FXp1WKp?79+eEcP0%x*u zqebdM*X#b)Maqi?c$dH>-Jv5BBjr)ga~bJyv6iP|qgDso(l?;k05-6;>rK|jQ<>!| zl^#H*e(B=&2-^12W=HFX=QIadolS6VRz&w@OFb%@Zg2eUfQp9ZIf7DNz)O^ma24 zr)!)(fLRZukx1qJc0cgoBf3zx)A-ED*x?Q_9OsuAa4HrZ?u8}64Qo!}n{#-k%(Rj$ zPvd?$EnN({omzO_swl3ilo+Kq8Nt9+zaTb$dEcINjoA~F3_U6?bx{rI{&DTjIP#3w z9)4IvK0z8c6(g?rA&Tg60!qOJmIE3Z%2q?7BB)P-#3pD5PGyF2jME~(GhV&_P6PrN z>-o05=Js1>NBE?nkjQZR_}C)7*CDr!d!?vF!x$|~FdC0evwj@g;u3cXfV_Z&kXIv9 zC|D7eurff25U+jK0b!ND=wz$O0f$yN@I|b_Y8_3eP`eukMyX_iACpp}tMXo4Ztjgz zSK!UB-CAz>xf;Y}t}GeRF$sym(O^_`B^O4w7e?cVV$eu0lm;hXKG4a-)2?IdG_oz( z(L)z|d1J_>S#YIUC`47S;ph}$>f~YSq(zZ_ItSxDOns?DRw7 z`*xq)JUhw|Xgz%FFn)fHM`o&4U3Ox7%TZm`xSZ*4KJ^#{xBV^Vx zDA3@PikNVsQSFDCf&QeaCaQ~iyvDzJr`yBXLP9q|bYTrE023(FZ5%mN-dUhRP4aOj zI7;5SIoMp;k@^50Yx<-+4D|Gez>Ix`l_l#qD)W~IG(pl-Se#A1da-q#k!*HAPmG;0 zvVR4T9LX-$EW(Na%u4?8u00m8al2oz8Z3YRarz$u~1O!e$M=5gxkR(43d3{fB}0c&1X)aQKPD38I-TOO8HR#J^b zJSumAV7VH=^pm&luk&slpqcVbW>dq4O4CIBCES>yNR*mZljZVySvoziw`6Ow<5Zoj z{=~$ic`?rXhsRZP7hFJ!jbvQfuy=0zH%afA!(HL>_yAxDi$%OU9&|r4R^Vlhtd1*! zF2;WVCv?}^yBM@Pm<5c#KSNzSccb7|w8t(^c5a1BnTuxP0~9=O0;2p_%5<1nV1rUmEW z6L-A_o*MG4B~cxf;H)w|=}!A2rw+0xzg&N0+tVPnV*MyHOmEf5s`8+!_|&ezpxF4t zLMrrg|3UY?&@Ew2@==7pkV!CZX*99=Vn%*(Rw6=Cy~vMHd2={7P-9rk4;cbAYtd@f z(3*;_luPULNs~2*H@R>1B|cJ=>(kMPVg>JNZL?A(%rrRD_e(azk(>WcJn?4qFXmy` za|kL-Fx^*3uTZ2)gB7#2_5MZa9-02NU)rLuq2kB&^I^iCnep~aYPfHmcWWb!uHYH)Jb8PXLV2vAPNRNGxE zGq84c{`9mVc#AD@SpW)Ae^sA-z5(m_ZJ`jC%kA$ifAH(>G*D+xpbKpimArePg$`~a z&&}%x&@9q*C-na9ygE=QIBgA@uCq^e3Acg9On+ud_7+O+{y#n(G~(6AspSuZ=Q65x z+2>%4^$ySHuJsknN8HYb?Nh*iNps5Mo=+d2iEmHcU1S}IUAj5lKupzTF{s- z2CcG^GRNclbGsWDZa34~-CG4siO37~Lb$C4Xn4Ka>yl`OxQ(dp>{RS(W6F1Eb52Mn*=4p11$Ldb5C*bbu}#TsD6{ zB&2~TD_?T)A=;{m3^*{zb}>d#gWFD06O#u0e7c9NJe#H4D)p9cxqSH=_Tb_ zXSVy#&Bv{b*BQ+1t`lcl3{p$>QbRE3H{P6I*V;E<>*jnc{SY+Oa@GXC)jc7{o%6&8o^E-D7@xrd#&%*`Dt8vY{* z&Vp`OweaW-ffqu=5%aM^zb`fJlRtl${`1^LXqwS9%5A7~VbYh{)o--HzLEPk9pzQOM?Jgv;P%x52HS?EyJx-q|j=mPmI#75|JjK5S9o zSXq~7)2b&g}wj*53+ppzA^-6@4Ahaie|v#lq1 z0AWrXH~zKmY=?-*a+V?PiQ*?KUdR9A0(9(-+FMssmzj8J!^njsYiANbDJdxH%kgr2 zzBS8F+TP)A)wC}D3>&aE+a9vb{u1R2wHUa~;_+0n_^N=azfEN8_u%<(IG ze#3I>-*>)oRQe&qba(j2$Q^7qcPM8EKmu37Oi0M&9hcjbI3+6Y+isO->g*%Vz*drS zrHglNAuA=$X>9+^PX69v`~*|Jke{-XdDeBUozv}2z7u2Lkn{|&&gRD}dd8wyf@C9* z4sM6q)Xu!}8eBgOZ_Y=Ew-|6?g|q92u<;12RKWWI-1D#iwwL#Z&afDVE#j0YVg@_( zJuoRtMNautN*Vrap^aG-v#2FdH4xd=!#K95A`MT(yMCN*y9_pJimDkf0KIIw&Ow@41P(YZ>kh_8(8F^OOcZ&69E?!0S=RGcvpL6 z5!_)}d}nZdeN+CWI`?4$+Nq-9oe$oyciYwbqxt(o07BXZyAi(qz}*x>`2ot`hWG&| z<125`wr4DkdpL>GBF#q7%Aub2t(D@zv{iuzck!oFdp$%>=;*d!?g6I$XKjGj_uk`X zTF#$+1TzYPg+D@ty@cl|{YosE>mgML!kb!`zmpM@hyoA8dmW4#Bv|LEbx%cw^Ft#; zMj&B@LkWq6X%z_Ll#P49kbdmmCD@t2LD~qFZSDiH_p(m!_h|?_6G-Ty$*n?@o~%axh)-)LH=?Z)o5QYhde=LK(D>ma5Ub|3mihUUgjiB zxDWS^w4_8pHgLYVdH{_gZbDQ))_vn9 zYu6LLo=MItIFmKi)PQ@RX@9&kugAlQiUk<<@#t4@C1SzL=_(X24)I|GhA8UzewhuF z686#iVST5B?TNq;S4=LtI-l6u_|HdXxVE&x%*nbf6{8x(rFj*8d)YTNzT*PrA^Gu& z_?4IbApx#xh>MPijl$ly6>hCf64C5a*Qr$dp;g33C|fq*+GV$RW5<5yOnu|RtQF_v zRU+}#6oMLzv^E?gO?ls`#ByVVgYM|{_s?hi@gxP@*XqH1dImIWYE0l2rA;e%0dC;E zp)#z-ankCfisV}p(A9BJa1VVDqiH6AkK~4(Fe>;csE_`61z3ouTVmjAyDu=zj&SujE6jSmKmdBjTWWy&* z($do+Tvn--V!Uous;?TAHTQgEs&uhsa5s_P;@xvNqLi_<7Dmw#H=aLdA&9m4jmFIm z2)%ZY#(oI+asE2+YW@4%mwh|4w3L*btg5A@^-yo%?(V*SE=E?lOjki};Pj&h2M0q# zqBBi&&7jRd+SF*L=W||uJ_-tou8Ve)grFc;iJhn@RLRHuGQ>NfnwT@yHB;`|R#H-u zj(AY-7R1x<8dGcT{4T4xjg5@#?C!))H&I+oi}0xT2U z{!OmnNpAo4LK8d3Zkrokyr+;f-uDMv9Vdk48-q-jma0KLGq-jv$T|M+IUM-aXmTGV zlBOtx&0as_aM(yY`Wpyx{9%YWzk2>oP}AOlRHn%WZRIU~ zM*Ux%pn%>4fjXl&8g*fKJaKR=xDJ1rhP#2Z))lq+i%}`fIK;)J<*lXR(X3soX(xL< zPOh2^ycJp{T`}Z&P_gFWS*uE((W5FiJkJd@;wL!xrxwU7MWH*f9^?id1T!XtGYD@h z+%LrO6GIhwu}Z_}{XJd^a$gy#?~Dxl@C?W3yPXc}FXMLunqvj+WmutV3#M#rNfn-@ z>YvBkp9f)6-T0!DlSz*bh(8NQ2%FRRN9bs35l~*#E%nFRiU&+ zXM!MuB8&Nx(C!A6SNDdfI^+ev5J93nEOm3=4hzm=-!5%^+r8hbdu;bHFmLV~TVCG& zJqJ$?D|e1*4=4=0M_Is;*}KMN4Cl=8J&(~}@!8Db(TLpCsK{Tc zp!+3|yb-|c-BIP%s+G&3UC3p@kO99EJ>;W=!WSOh+)106SA7S?Iy#oDH#uOK3PBxV z1|andYSsfYBvh;MV@C30Q>&3;)~;ySxD9p+$4WOCYXbd7DhY&)575uAHHfL*BBl1A zg238054p`XNCoo4;0Bk07ko1MQh(Z|xK7&&Q$~$W(;K?anD`Vq)ebv?%OrrWLTn}9 z{JmE>SzDum2Wt5#UF&;={ei&b=kP?nj9#9shDdfz^e4RUF`Pl4y2Bk=yAxpe-J=Rp zG^?=aK>{wDlF9@y)trQ>CI(+d0>O#|hF#;sk9w9sRucl1p-XZG0L9fJgidhtMaY`$ zoo$84c7ZJ)AFbY|vP=2NeT2h!D9+F;kVJM*^sE9K!5m+Sq7P&(zWn-FB&) zxFR3-dtB?g#;O+OUhQ|)Bh5Ea;iWnjW@aUCCDv9A=jSAFumL@zUqJ;WCuw(CqozI& z2hZ+-g1K(1(l78O2N%Z62IbtA*1EAIbN@K>crB%M4zr-pO4vf(*kO?bQIXeA0NHjc zG|w5reN5U@!}=N?joFY$KeoRYv#_h>ho8M@%bOZk&{68ecmWNVTV?jKYb9*#Saq3E z-qy~iE^SUYJZY7`$A=G^$Z$@;dT}3NaNu-1AR~-=$uXHCr&rZW+hZd! zn@qaOa=`p=8wYEwY1ErksG)Hq!tb5?-rLmPIW_Atsl9P0&7MTBPQ z-8LY6<>0!H%Pwu4mpj)UwA=hWBl{Mp1raqwaht+<0=n-J`!4hUuAsOnp~}P})2(cD z8e|t;W>}x3nu^lZ7@+TaNrrw64){3;@^kR!2%=?lQzpLnVR!h_OjRc#sf8ex)IV8* z!N{+N!lED{n z6H+J%zSpX=>oh$!kUzMKuqAiAC*Ex5B1jb9aRETQ^NpWmAO?cB3=9mlx}Z8&+ugc{ z0C{;E92}wY@{OpdCXD1F0$&313tK<_rhvL`eSIy~&B~N-32br#A{4~fS(k!t7`MQR zVu1|>s>7wx*2lUc>_6H;Ep?&U$y|vkj4o6^-TL{>CiqV-Wwt||tlMWkIcqI~W1%02 z7asbbPB&Ka(KP}V!6-N08$X7I;`P0?9ZOii)+ZmqydQM>G*T?ycM>tYTK_dSC-0A}TTVk$oDK>d!!g->*@3Dw>Q8{iKq{oacf}1Rpa!sp(rorL&L6Z_? zZ)k|2KvaLp7lk6pg(LnNhE&-Bqp}B0ETMf--390GabQ^@z!W0+T{6hp($7#Z{wSLL zJgL7OI(j}^zuWBAY^aL7|MD9qZ-UfbCXSs>T`Pr{TIOd$5$w3aVB)i|GFD#jJMs=} zX5n2I<*Qt!SAzhg1Qu#I46LAJ@PXjM^Aq%Eb^8XeD+ zmL`aj!Hkl?Y;(sc5mM!Mf__)|xpce+inq3I>c!WBT62B|>=FZ9O5d+L`aHh#p0}s# zT(rZv{lhqg?%o#WJ|jDup(Yix4~-Ab%gzmpFQsRn>R3!TG+x$IYez+SjhG6VQ)-px z_!*w-S)S|bf7CD-sA=2LzeVbK3?-H9hjpGA*vDGT=t9Ga#s3srUuD();g*j64k&=AU94u7QSeNFoLVHw)XGJrD% zu^bi|z@BJ;3?UEd*(o##(n<}WIV*u;WA z5efw&qjPae%*LmCY1H;Y(G`_Qr<6i(Wih)NgKqO@q^{0oBA|@3#Mx%C(X0_rXRkbq zaERmRoMXpalqPFqrd;Kwo;W7&9C@w!uXhCreC08J@T-aWZVht?zo&p#(9GOV(r}$fRLg>51Mdd(fsnQ}n=kIva zG9QDEXFL$Dx+K zyCVFFQUpj2FdwIjm0YS020RLDeT%VY_6!>h#i(3%Shr?4i&#CoN1wO02bYy$yF6sI z*t8KS>`g}h0tasE$yTn~k4p28Tn1kI?Bs8#yH8jSACT*R;hJtuf5Vvma-Eh_m|82! zsF&oiO|~Co(3~ez-DcWO(J|_!9(>D9@C#K66%r%UoZi_`@^gD#kqsHc;0jh@zVMmJ2$9j~HMK54ij)uK z_n6?=H3Cf>c%Je#gww-wAK}zt`FZS)_ z`w=EnRB#$X$fP*1@lb!`v2nm4gmEY+*iR_$q(WM0MAWhviJS2WCgPM##*c^xl_Gaj z3I#wMONX>lgfo1`CnyWHpjl%{lfpxZ)cW|3lnuhR{Vuqm{?7Dq{|b%{4tra6%8Vw?iBESOJHlG$Xn^IPh@MD50jC;ynL?Y ze%hA99Lz~bTVCGyk6`&X>SB*c4jVtArZ5NJV40VBixA@%QtB13$mkL2%>fP;OC3VRB}3XS}^~v)VIL>Jt~%dTu;H5H$)h6)x9wt2e|P zJ~hb~Qp?vi%lB5}AuGtTtHdWzvb@v{0aOeFsX1>+nsxJ4Ya#gbBVW)AUonv9HHG`; zT<3~L5h!ZohL{r!k`q5s?!i4w+F2&}S`2QV-1oMp6t`P<-mb`w@shQ_Tt*M8F|@xt zhz?@R`;H3+6j}ci{|+-99W@>OZ7M!$I=cZ0XF76vjK@f)aINbyw&XN9_c${9?4@_1 zZPrWO51kMb82-mU2jVv|Bs$7$!irxvL{eNJrFE6+Z$%-PD#%*UDx?iFj14`E4I^l+ z-65gobe@M2f$o*mP*MSjv|n7>amhqf6EWzfp<_%CEU5u&)cQivDfj$CG%?Nj4ml^4 z!t>hxPKo0LiKCq3WSNNpPvK-XuU>|$xW&QCOC1j-hNab>RmGnAX8g2T@*||MrDoR3 zU0nzWMwVk&5G(m0?#{1GY&UM(S$JoKp5S&EQ$wQ1ZywOU2>5LH9E{k28>~@;7Y9L6 z3fm}wjS%Z~r_vMKNZWZK-eU;##Q1dz+E`E=c`>ydm7O<%bCNhdnA{%{Im{A?C5x zc|yQ(NXI+WQA>maMk|tYsw(^dNti#CZV< zB2ksV3j9$)7C;LY$ZIxOe-w=EdayitL>bxzTAJ0Hs+G$M7hhLq`K920uYBdK3lZ6q zuZ5q-P_?vsOj00hya2 zzP1&O$_0;VXp|YMOk`F0b3Sg|Xk3?uY%k)UbI`ld`4AiWGHd1(+OM~A>5pR%?(f=$ z)r{{U($Kbjz`kjv5}Yp;Pf~`UwT^_q(Snbm23$vmhq^(x06qhxe3O{Z-@L~tT5I~{ zGdxf}o((CU%$Q~iSnZ3SE|yT2i(O5sTt?Xl_ukIQ-P$gn=Y^Kgk&IPsMOr!J+4Zu# z$1x7E9@nren!}I&xvKs!ov8g~#4pQnfoH##Ev#p9;0`QV3J8%7$IqP+dMB^gT6KPE zX$a%+luc5UW&T|34fi;3E$5;ydVag?#jzLCza@zcGi3OK(tIsLRy09cJXKgow>7OX zHLd-76h#RYwmdv&u`sSp0;?FxyfnFyRa_JM?*`D9B>b?cY_KUUb?K~hsjW0*BQ#O8&u7Uj4i~D?2JC-1~PMRUUqK^cGJW9*^u8pZCz)GTWQ(WwfYI#wl3^otzF@*9T6;DTVH25 zP&=&hgYAAT4M6&G+ae zY47ag+U(=p6ynknYUA!7ugO||)|USM5$`_mR+aXe2~NKI{#Xs1?p?AJ7a$h!z*oeG zC%^K9kv1z?nS!R)4Qs{U+Nj%_SY7u}KJdP*5P*#rz)`hZ zsUwdUnIhKJ>J19sH#exmhx0JEyj{c=vlciK>6cstnh6NP3F?Tans8XP#x8Oez^w#R7uH(B_|gU zGZ4p<{J86c##@O1dF`GmzXY6|gfA|xMMa5G+O7a~KR}Y^FdPPL2A?l}0#M!q1X)(x zhE5|0V|>|~y!Rj1C*vbA4l1Gb?MF&y9Z?$aVpl%-G%=Sl~6p95ryb1&8Z62BO1l};^ zcZ>;`%4YgjuLI%q+nsH$mLpcG3F=fS%>KE&5cSKkY~Fmf%|iDOINMw-nRGca9Qf%+ zCyB+D%A(D7gI#NyObP}X2OfI7=b%61;BB$Buf%qO&Uy>f@}u%Y3GM~kkVkGv;f9Ih zMs#vz#c-!)05hO@(-T?J9epX;Jc%hh8R=Xpzk8MRzohJUwd{$i8{A6kP>ouP1UKe> zU0I<#@o3=VtEFM0Ny&z~-!tawDf@Fzg5?nLe|@?ocJAKpYal_CuC1JeM!TpuGl6KY zxHTRl1@P;zbe#fp>%K65{8eh9PQ&|&;1b@}y4S>M&vX;);O>}e#gyUjTU@cUqYFYF z3Jpk59fVG0GXFgvHadSh8s8jGk$)|-(Z+?1#fOPJPk^*K3430!eUcaC9Ej4Ps)k6@ zvCfsZOp({Kk=l7&H~pSmReJh%y#&8eiRHmhcb21nA<&-^_!j&AsCKRO@OHjRO`}?6 zDcpRWN$Vn&4y>KT+|pGCNKBk;dop`txJcOcMf>)}NKizHkwuB)hYaE zvt^{RrN=Y6vzc7Kznom2jHW(oH#TB7GG?J$;|B*7jg9w>51ASp?*Rb9j-nzFTG!G_ z5pIUAt}-=W_impTCix@NV*bWC3EG{75v{5arpBkKh1 zaL!lKxGm=UL@b%3^SMp4;Jjwras#K<>TtU?ZpMPRW7;?0x*cds>(NrU zUinN}$xLavOnJFX3Cdgv%1rfrX1qRBlnx}!p8V~dS@Dlqsq!lUi!*&7lUgy4pP7!b z^|C>A89InYAj+1)iEiOSOJ~4OWJOO64;OU%2xZKd4>RlTyG#iC+fbSinF{phz1Vn# ze(eazI~*H3e8VOxmYbu_(6uceKpI4Y^}1#Hzb>dVkg@q<+3pM0Kz<(+t6rQ0KiyT!a#&&VSjoSk zN~F7VuqH!E!T2;Tth5ePuQZ8!OA_jyi4Bz!b1Ed})sR0yop)4VISxOD7zGRu9h6@U z)M3)*40P;&R@d0&*2JjF^^8v+A0FP`;Zt+F(IYbN`AkU&5MBFhNogzz-*uKYM6OO$ z3~oSwd;|<;aO*q;zD8yQMP|G zr*YHJyGkyjx}oOnFQ$^Y^Xl)l#YGk%#>vXc%E(yP((>|u=7S+t!o?*B2zc@zVF9J` zts7}SF>W30L`mZSvfhiAIq_))-}anlk(I@UiAsneLYvCihDC|>a}gSrz9UL~rk7b= z3J~)uH0H$$=7ov{<*Ef0>g6Q5WxoU()kIohk#It-gKc zJ9a?FQLqRCc1_2O(znqOjsUjX#!(^ql%V6hAYAhDYs~U?KMp?9gmi=o`k}+Ap;KM3 zGrJ-X#4O9@1Rx?Hlm-7R!7q9UuTf8JYik3nY=ii7(|yC*zO32#KMwCucfYwQ*EY2^ zQO1u$fc?&<+rKaTKpVZQqntyFK|{K6tqG9&l@Ku)ssvj{nOvRk$ePp4;Polp^$Pt1 z;%-i!=ZC(5!5+}bpTOs3cMEt9+u_mB+-%Da#LlyleU*K|A;4anAy)!F(4eq^Kg#gG zCWN05>4#DIe{TT}AnKI=>+yjDuvJQ6ef;0R-wN#n@V^88CfNVBdNAN;G$Db|?067M?^-rN>DH0e*lHoQY!!e literal 0 HcmV?d00001 From ee149a2712f1e132d2bbfb32adc2b31a1f570e40 Mon Sep 17 00:00:00 2001 From: YangChen134 Date: Wed, 19 Jun 2024 19:49:05 +0800 Subject: [PATCH 2/7] exports all examples (#22) Co-authored-by: Yang Chen --- lazyllm/docs/components.py | 3 +++ lazyllm/docs/flow.py | 16 ++++++++++++++-- lazyllm/docs/module.py | 22 ++++++++++++++++++++++ lazyllm/docs/tools.py | 8 ++++++++ lazyllm/docs/utils.py | 23 +++++++++++++++++++++-- 5 files changed, 68 insertions(+), 4 deletions(-) diff --git a/lazyllm/docs/components.py b/lazyllm/docs/components.py index 68d9ddbb..4cbe8054 100644 --- a/lazyllm/docs/components.py +++ b/lazyllm/docs/components.py @@ -77,6 +77,7 @@ def test_prompter(): ''') add_example('register', ['''\ +>>> import lazyllm >>> @lazyllm.component_register('mygroup') ... def myfunc(input): ... return input @@ -84,6 +85,7 @@ def test_prompter(): >>> lazyllm.mygroup.myfunc()(1) 1 ''', '''\ +>>> import lazyllm >>> @lazyllm.component_register.cmd('mygroup') ... def mycmdfunc(input): ... return f'echo {input}' @@ -648,6 +650,7 @@ def test_prompter(): ''') add_example('ChatPrompter', '''\ +>>> from lazyllm import ChatPrompter >>> p = ChatPrompter('hello world') >>> p.generate_prompt('this is my input') '<|start_system|>You are an AI-Agent developed by LazyLLM.hello world\\n\\n<|end_system|>\\n\\n\\n<|Human|>:\\nthis is my input\\n<|Assistant|>:\\n' diff --git a/lazyllm/docs/flow.py b/lazyllm/docs/flow.py index 03171188..055902dd 100644 --- a/lazyllm/docs/flow.py +++ b/lazyllm/docs/flow.py @@ -109,13 +109,14 @@ """) add_example('FlowBase.for_each', """\ +>>> import lazyllm >>> def test1(): print('1') ... >>> def test2(): print('2') ... >>> def test3(): print('3') ... ->>> flow = pipeline(test1, pipeline(test2, test3)) +>>> flow = lazyllm.pipeline(test1, lazyllm.pipeline(test2, test3)) >>> flow.for_each(lambda x: callable(x), lambda x: print(x)) @@ -189,6 +190,7 @@ """) add_example('Parallel', '''\ +>>> import lazyllm >>> test1 = lambda a: return a + 1 >>> test2 = lambda a: return a * 4 >>> test3 = lambda a: return a / 2 @@ -233,6 +235,7 @@ """) add_example('Pipeline', """\ +>>> import lazyllm >>> ppl = lazyllm.pipeline( ... stage1=lambda x: x+1, ... stage2=lambda x: f'get {x}' @@ -325,10 +328,11 @@ ''') add_example('IFS', '''\ +>>> import lazyllm >>> cond = lambda x: x > 0 >>> tpath = lambda x: x * 2 >>> fpath = lambda x: -x ->>> ifs_flow = IFS(cond, tpath, fpath) +>>> ifs_flow = lazyllm.ifs(cond, tpath, fpath) >>> ifs_flow(10) 20 >>> ifs_flow(-5) @@ -380,7 +384,13 @@ """) add_example('Switch', """\ +>>> import lazyllm +>>> def is_positive(x): return x > 0 +... +>>> def is_negative(x): return x < 0 +... >>> switch = lazyllm.switch(is_positive, lambda x: 2 * x, is_negative, lambda x : -x, 'default', lambda x : '000') +>>> >>> switch(1) 2 >>> switch(0) @@ -439,6 +449,7 @@ """) add_example('Diverter', """\ +>>> import lazyllm >>> div = lazyllm.diverter(lambda x: x+1, lambda x: x*2, lambda x: -x) >>> div(1, 2, 3) (2, 4, -3) @@ -490,6 +501,7 @@ """) add_example('Warp', """\ +>>> import lazyllm >>> warp = lazyllm.warp(lambda x: x * 2) >>> warp(1, 2, 3, 4) (2, 4, 6, 8) diff --git a/lazyllm/docs/module.py b/lazyllm/docs/module.py index 55a30d99..6655224f 100644 --- a/lazyllm/docs/module.py +++ b/lazyllm/docs/module.py @@ -33,6 +33,7 @@ ''') add_example('ModuleBase', '''\ +>>> import lazyllm >>> class Module(lazyllm.module.ModuleBase): ... pass ... @@ -58,6 +59,7 @@ ''') add_example('ModuleBase.forward', '''\ +>>> import lazyllm >>> class MyModule(lazyllm.module.ModuleBase): ... def forward(self, input): ... return input + 1 @@ -75,6 +77,7 @@ ''') add_example('ModuleBase.start', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().deploy_method(deploy.dummy) >>> m.start() >>> m(1) @@ -90,6 +93,7 @@ ''') add_example('ModuleBase.restart', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().deploy_method(deploy.dummy) >>> m.restart() >>> m(1) @@ -179,6 +183,7 @@ ''') add_example('ModuleBase.eval', '''\ +>>> import lazyllm >>> class MyModule(lazyllm.module.ModuleBase): ... def forward(self, input): ... return f'reply for input' @@ -198,6 +203,7 @@ ''') add_example('ModuleBase._get_train_tasks', '''\ +>>> import lazyllm >>> class MyModule(lazyllm.module.ModuleBase): ... def _get_train_tasks(self): ... return lazyllm.pipeline(lambda : 1, lambda x: print(x)) @@ -215,6 +221,7 @@ ''') add_example('ModuleBase._get_deploy_tasks', '''\ +>>> import lazyllm >>> class MyModule(lazyllm.module.ModuleBase): ... def _get_deploy_tasks(self): ... return lazyllm.pipeline(lambda : 1, lambda x: print(x)) @@ -238,6 +245,7 @@ ''') add_example('ActionModule', '''\ +>>> import lazyllm >>> def myfunc(input): return input + 1 ... >>> class MyModule1(lazyllm.module.ModuleBase): @@ -356,15 +364,18 @@ add_example('TrainableModule', ['''\ ''', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().finetune_method(finetune.dummy).trainset('/file/to/path').deploy_method(None).mode('finetune') >>> m.update() INFO: (lazyllm.launcher) PID: dummy finetune!, and init-args is {} ''', '''\ ''', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().finetune_method(finetune.dummy).deploy_method(None).mode('finetune') >>> m.update() INFO: (lazyllm.launcher) PID: dummy finetune!, and init-args is {} ''', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().deploy_method(deploy.dummy).mode('finetune') >>> m.evalset([1, 2, 3]) >>> m.update() @@ -372,6 +383,7 @@ >>> m.eval_result ["reply for 1, and parameters is {'do_sample': False, 'temperature': 0.1}", "reply for 2, and parameters is {'do_sample': False, 'temperature': 0.1}", "reply for 3, and parameters is {'do_sample': False, 'temperature': 0.1}"] ''', '''\ +>>> import lazyllm >>> m = lazyllm.module.TrainableModule().finetune_method(finetune.dummy).deploy_method(None).mode('finetune') >>> m.update() INFO: (lazyllm.launcher) PID: dummy finetune!, and init-args is {} @@ -396,6 +408,7 @@ ''') add_example('UrlModule', '''\ +>>> import lazyllm >>> def demo(input): return input * 2 ... >>> s = lazyllm.ServerModule(demo, launcher=launchers.empty(sync=False)) @@ -431,6 +444,7 @@ ''') add_example('ServerModule', '''\ +>>> import lazyllm >>> def demo(input): return input * 2 ... >>> s = lazyllm.ServerModule(demo, launcher=launchers.empty(sync=False)) @@ -474,6 +488,7 @@ ''') add_example('TrialModule', '''\ +>>> import lazyllm >>> m = lazyllm.TrainableModule(lazyllm.Option(['b1', 'b2', 'b3']), 't').finetune_method(finetune.dummy, **dict(a=lazyllm.Option(['f1', 'f2']))).deploy_method(deploy.dummy).mode('finetune') >>> s = lazyllm.ServerModule(m, post=lambda x, *, ori: f'post2({x})') >>> t = lazyllm.TrialModule(s) @@ -505,6 +520,7 @@ ''') add_example('OnlineChatModule', '''\ +>>> import lazyllm >>> m = lazyllm.OnlineChatModule(source="sensenova", stream=True) >>> query = "Hello!" >>> resp = m(query) @@ -565,6 +581,7 @@ ''') add_example('OnlineEmbeddingModule', '''\ +>>> import lazyllm >>> m = lazyllm.OnlineEmbeddingModule(source="sensenova") >>> emb = m("hello world") >>> print(f"emb: {emb}") @@ -594,6 +611,9 @@ ''') add_example('OnlineChatModuleBase', '''\ +>>> import lazyllm +>>> from lazyllm.module import OnlineChatModuleBase +>>> from lazyllm.module.onlineChatModule.fileHandler import FileHandlerBase >>> class NewPlatformChatModule(OnlineChatModuleBase): ... def __init___(self, ... base_url: str = "", @@ -670,6 +690,8 @@ ''') add_example('OnlineEmbeddingModuleBase', ''' +>>> import lazyllm +>>> from lazyllm.module import OnlineEmbeddingModuleBase >>> class NewPlatformEmbeddingModule(OnlineEmbeddingModuleBase): ... def __init__(self, ... embed_url: str = '', diff --git a/lazyllm/docs/tools.py b/lazyllm/docs/tools.py index 36aff1e2..5f3c7ff3 100644 --- a/lazyllm/docs/tools.py +++ b/lazyllm/docs/tools.py @@ -33,6 +33,8 @@ """) add_example('Document', r""" + >>> import lazyllm + >>> from lazyllm.tools.rag.docment import Document >>> m = lazyllm.OnlineEmbeddingModule(source="glm") >>> documents = Document(dataset_path='your_doc_path', embed=m, create_ui=False) """) @@ -65,6 +67,9 @@ """) add_example('Reranker', r""" + >>> import lazyllm + >>> from lazyllm.tools.rag.base import Reranker, Retriever + >>> from lazyllm.tools.rag.docment import Document >>> m = lazyllm.OnlineEmbeddingModule(source="glm") >>> documents = Document(dataset_path='your_doc_path', embed=m, create_ui=False) >>> rm = Retriever(documents, similarity='chinese_bm25', parser='SentenceDivider', similarity_top_k=6) @@ -130,6 +135,9 @@ """) add_example('Retriever', r""" + >>> import lazyllm + >>> from lazyllm.tools.rag.base import Retriever + >>> from lazyllm.tools.rag.docment import Document >>> m = lazyllm.OnlineEmbeddingModule(source="glm") >>> documents = Document(dataset_path='your_doc_path', embed=m, create_ui=False) >>> rm = Retriever(documents, similarity='chinese_bm25', parser='SentenceDivider', similarity_top_k=6) diff --git a/lazyllm/docs/utils.py b/lazyllm/docs/utils.py index 1557e29c..8655fb3e 100644 --- a/lazyllm/docs/utils.py +++ b/lazyllm/docs/utils.py @@ -22,6 +22,22 @@ } ''' +all_examples = [] + +def get_all_examples(): # Examples are not always exported, so process them in case of need. + result = [] + for example in all_examples: + if len(example.strip()) == 0: continue + example_lines = [] + code_lines = example.splitlines() + for code_line in code_lines: + if code_line.strip().startswith('>>>') or code_line.strip().startswith('...'): + example_lines.append(code_line.strip()[4:]) + else: + if len(code_line.strip()) != 0: example_lines.append("# " + code_line) + result.append("\n".join(example_lines)) + return result + lazyllm.config.add('language', str, 'CHINESE', 'LANGUAGE') def add_doc(obj_name, docstr, module, append=''): @@ -58,9 +74,12 @@ def add_english_doc(obj_name, docstr, module=lazyllm): def add_example(obj_name, docstr, module=lazyllm): if isinstance(docstr, str): - docstr = '\n'.join([f' {d}' for d in docstr.split('\n')]) + docstr = "\n".join([f' {d}' for d in docstr.split('\n')]) + all_examples.append(docstr) else: - docstr = ['\n'.join([f' {d}' for d in doc.split('\n')]) for doc in docstr] + docstr = ["\n".join([f' {d}' for d in doc.split('\n')]) for doc in docstr] + all_examples.extend(docstr) + if lazyllm.config['language'].upper() == 'CHINESE': add_doc(obj_name, docstr, module, '\n\nExample::\n') if lazyllm.config['language'].upper() == 'ENGLISH': From 2c0b94e217b938a6c7e46e74def3356b5493007d Mon Sep 17 00:00:00 2001 From: SunXiaoye <31361630+JingofXin@users.noreply.github.com> Date: Wed, 19 Jun 2024 19:51:01 +0800 Subject: [PATCH 3/7] Add auto path for data and model (#21) --- lazyllm/components/utils/downloader/model_downloader.py | 1 + lazyllm/module/module.py | 2 +- lazyllm/tools/rag/docment.py | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lazyllm/components/utils/downloader/model_downloader.py b/lazyllm/components/utils/downloader/model_downloader.py index 35238d94..13dc7f31 100644 --- a/lazyllm/components/utils/downloader/model_downloader.py +++ b/lazyllm/components/utils/downloader/model_downloader.py @@ -8,6 +8,7 @@ 'MODEL_CACHE_DIR') lazyllm.config.add('model_path', str, '', 'MODEL_PATH') lazyllm.config.add('model_source_token', str, '', 'MODEL_SOURCE_TOKEN') +lazyllm.config.add('data_path', str, '', 'DATA_PATH') class ModelDownloader(): diff --git a/lazyllm/module/module.py b/lazyllm/module/module.py index 734e5b24..3ebc6496 100644 --- a/lazyllm/module/module.py +++ b/lazyllm/module/module.py @@ -379,7 +379,7 @@ class TrainableModule(UrlModule): __enable_request__ = False def __init__(self, base_model: Option = '', target_path='', *, stream=False, return_trace=False): - self.base_model = base_model + self.base_model = ModelDownloader(lazyllm.config['model_source']).download(base_model) self._stop_words = None super().__init__(url=None, stream=stream, meta=TrainableModule, return_trace=return_trace) # Fake base_model and target_path for dummy diff --git a/lazyllm/tools/rag/docment.py b/lazyllm/tools/rag/docment.py index 68fa0108..43739784 100644 --- a/lazyllm/tools/rag/docment.py +++ b/lazyllm/tools/rag/docment.py @@ -1,3 +1,5 @@ +import os + import lazyllm from lazyllm import ModuleBase, ServerModule @@ -9,6 +11,10 @@ class Document(ModuleBase): def __init__(self, dataset_path: str, embed, create_ui: bool = True, launcher=None): super().__init__() + if not os.path.exists(dataset_path): + defatult_path = os.path.join(lazyllm.config['data_path'], dataset_path) + if os.path.exists(defatult_path): + dataset_path = defatult_path self._create_ui = create_ui launcher = launcher if launcher else lazyllm.launchers.remote(sync=False) From 47af82727a0462e5bc6aaa9d60a898d8a4c850cc Mon Sep 17 00:00:00 2001 From: wangzhihong Date: Thu, 20 Jun 2024 11:41:47 +0800 Subject: [PATCH 4/7] add `aslist`, `astuple` and `join` for parallel and warp (#23) --- lazyllm/docs/flow.py | 61 +++++++++++++++++++++++++++++++++++++++++--- lazyllm/flow.py | 47 ++++++++++++++++++++++++---------- 2 files changed, 91 insertions(+), 17 deletions(-) diff --git a/lazyllm/docs/flow.py b/lazyllm/docs/flow.py index 055902dd..009edb57 100644 --- a/lazyllm/docs/flow.py +++ b/lazyllm/docs/flow.py @@ -153,7 +153,27 @@ .. property:: asdict - 标记Parellel,使得Parallel每次调用时的返回值由tuple变为dict。当使用 ``asdict`` 时,请务必保证parallel的元素被取了名字,例如: ``parallel(name=value)`` 。 + 标记Parellel,使得Parallel每次调用时的返回值由package变为dict。当使用 ``asdict`` 时,请务必保证parallel的元素被取了名字,例如: ``parallel(name=value)`` 。 + +.. property:: + tuple + + 标记Parellel,使得Parallel每次调用时的返回值由package变为tuple。 + +.. property:: + list + + 标记Parellel,使得Parallel每次调用时的返回值由package变为list。 + +.. property:: + sum + + 标记Parellel,使得Parallel每次调用时的返回值做一次累加。 + +.. function:: + join(self, string) + + 标记Parellel,使得Parallel每次调用时的返回值通过 ``string`` 做一次join。 """) add_english_doc('Parallel', """\ @@ -187,19 +207,52 @@ asdict Tag ``Parallel`` so that the return value of each call to ``Parallel`` is changed from a tuple to a dict. When using ``asdict``, make sure that the elements of ``parallel`` are named, for example: ``parallel(name=value)``. + +.. property:: + tuple + + Mark Parallel so that the return value of Parallel changes from package to tuple each time it is called. + +.. property:: + list + + Mark Parallel so that the return value of Parallel changes from package to list each time it is called. + +.. property:: + sum + + Mark Parallel so that the return value of Parallel is accumulated each time it is called. + +.. function:: + join(self, string) + + Mark Parallel so that the return value of Parallel is joined by ``string`` each time it is called. +``` """) add_example('Parallel', '''\ >>> import lazyllm ->>> test1 = lambda a: return a + 1 ->>> test2 = lambda a: return a * 4 ->>> test3 = lambda a: return a / 2 +>>> test1 = lambda a: a + 1 +>>> test2 = lambda a: a * 4 +>>> test3 = lambda a: a / 2 >>> ppl = lazyllm.parallel(test1, test2, test3) >>> ppl(1) (2, 4, 0.5) >>> ppl = lazyllm.parallel(a=test1, b=test2, c=test3) >>> ppl(1) {'a': 2, 'b': 4, 'c': 0.5} +>>> ppl = lazyllm.parallel(a=test1, b=test2, c=test3).asdict +>>> ppl(2) +{'a': 3, 'b': 8, 'c': 1.0} +>>> ppl = lazyllm.parallel(a=test1, b=test2, c=test3).astuple +>>> ppl(-1) +(0, -4, -0.5) +>>> ppl = lazyllm.parallel(a=test1, b=test2, c=test3).aslist +>>> ppl(0) +[1, 0, 0.0] +>>> ppl = lazyllm.parallel(a=test1, b=test2, c=test3).join('\\n') +>>> ppl(1) +'2\\n4\\n0.5' ''') add_chinese_doc('Pipeline', """\ diff --git a/lazyllm/flow.py b/lazyllm/flow.py index 50ed0205..24dd1d11 100644 --- a/lazyllm/flow.py +++ b/lazyllm/flow.py @@ -3,6 +3,8 @@ from lazyllm import Thread, ReadOnlyWrapper, LOG from lazyllm import LazyLlmRequest, LazyLlmResponse, ReqResHelper from .common.common import _MetaBind +from functools import partial +from enum import Enum import types import inspect import threading @@ -218,24 +220,37 @@ def _hook(v): _barr.impl = v # input -> module21 -> ... -> module2N -> out2 -> (out1, out2, out3) # \> module31 -> ... -> module3N -> out3 / class Parallel(LazyLLMFlowsBase): + + class PostProcessType(Enum): + NONE = 0 + DICT = 1 + TUPLE = 2 + LIST = 3 + SUM = 4 + JOIN = 5 + def __init__(self, *args, _scatter=False, _concurrent=True, auto_capture=False, **kw): super().__init__(*args, **kw, auto_capture=auto_capture) - self._return_dict = False - self._sum_result = False + self._post_process_type = Parallel.PostProcessType.NONE + self._post_process_args = None self._concurrent = _concurrent self._scatter = _scatter - @property - def asdict(self): - assert not self._sum_result, 'Cannor set asdict and sum at the same time' - self._return_dict = True + @staticmethod + def _set_status(self, type, args=None): + assert self._post_process_type is Parallel.PostProcessType.NONE, 'Cannor set post process twice' + self._post_process_type = type + self._post_process_args = args return self - @property - def sum(self): - assert not self._return_dict, 'Cannor set asdict and sum at the same time' - self._sum_result = True - return self + asdict = property(partial(_set_status, type=PostProcessType.DICT)) + astuple = property(partial(_set_status, type=PostProcessType.TUPLE)) + aslist = property(partial(_set_status, type=PostProcessType.LIST)) + sum = property(partial(_set_status, type=PostProcessType.SUM)) + + def join(self, string): + assert isinstance(string, str), 'argument of join shoule be str' + return Parallel._set_status(self, type=Parallel.PostProcessType.JOIN, args=string) @classmethod def sequential(cls, *args, **kw): @@ -265,11 +280,17 @@ def _run(self, input, items=None): def _post_process(self, output): o = output.messages if isinstance(output, LazyLlmResponse) else output - if self._return_dict: + if self._post_process_type == Parallel.PostProcessType.DICT: assert self._item_names, 'Item name should be set when you want to return dict.' o = {k: v for k, v in zip(self._item_names, o)} - elif self._sum_result: + elif self._post_process_type == Parallel.PostProcessType.TUPLE: + o = tuple(o) + elif self._post_process_type == Parallel.PostProcessType.LIST: + o = list(o) + elif self._post_process_type == Parallel.PostProcessType.SUM: o = sum(o, type(o[0])()) + elif self._post_process_type == Parallel.PostProcessType.JOIN: + o = self._post_process_args.join([str(i) for i in o]) if isinstance(output, LazyLlmResponse): output.messages = o return output From 42162021b08bbfb4c2f5c50783a3973e768988ba Mon Sep 17 00:00:00 2001 From: wangjian052163 Date: Thu, 20 Jun 2024 19:53:12 +0800 Subject: [PATCH 5/7] Online chat formatter (#8) Co-authored-by: wangjian --- README.ENG.md | 20 ++-- README.md | 20 ++-- docs/source/api/components.rst | 15 +++ docs/source/best_practice/prompt.rst | 16 ++-- lazyllm/__init__.py | 4 +- lazyllm/common/common.py | 6 +- lazyllm/common/logger.py | 2 +- lazyllm/components/__init__.py | 6 +- lazyllm/components/formatter/__init__.py | 10 ++ lazyllm/components/formatter/formatterBase.py | 50 ++++++++++ lazyllm/components/formatter/jsonFormatter.py | 57 ++++++++++++ lazyllm/components/prompter/alpacaPrompter.py | 12 ++- lazyllm/components/prompter/builtinPrompt.py | 38 ++++++-- lazyllm/components/prompter/chatPrompter.py | 10 +- lazyllm/docs/components.py | 91 +++++++++++++++++-- lazyllm/module/module.py | 2 +- .../onlineChatModule/onlineChatModule.py | 6 +- .../onlineChatModule/onlineChatModuleBase.py | 84 +++++++++++++++-- .../onlineChatModule/sensenovaModule.py | 37 +++++--- 19 files changed, 404 insertions(+), 82 deletions(-) create mode 100644 lazyllm/components/formatter/__init__.py create mode 100644 lazyllm/components/formatter/formatterBase.py create mode 100644 lazyllm/components/formatter/jsonFormatter.py diff --git a/README.ENG.md b/README.ENG.md index 1ee46a16..2a17f6a8 100644 --- a/README.ENG.md +++ b/README.ENG.md @@ -93,9 +93,8 @@ mweb = lazyllm.WebModule(ppl, port=23456).start().wait() ```python import lazyllm -from lazyllm import pipeline, parallel, Identity, warp, package -import time -import re, json +from lazyllm import pipeline, warp, bind +from lazyllm.components.formatter import JsonFormatter toc_prompt=""" You are now an intelligent assistant. Your task is to understand the user's input and convert the outline into a list of nested dictionaries. Each dictionary contains a `title` and a `describe`, where the `title` should clearly indicate the level using Markdown format, and the `describe` is a description and writing guide for that section. @@ -134,19 +133,18 @@ This is the expanded content for writing. Receive as follows: """ + +writer_prompt = {"system": completion_prompt, "user": '{"title": {title}, "describe": {describe}}'} ``` ```python -t1 = lazyllm.OnlineChatModule(source="openai", stream=False, prompter=ChatPrompter(instruction=toc_prompt)) -t2 = lazyllm.OnlineChatModule(source="openai", stream=False, prompter=ChatPrompter(instruction=completion_prompt)) - -spliter = lambda s: tuple(eval(re.search(r'\[\s*\{.*\}\s*\]', s['message']['content'], re.DOTALL).group())) -writter = pipeline(lambda d: json.dumps(d, ensure_ascii=False), t2, lambda d : d['message']['content']) -collector = lambda dict_tuple, repl_tuple: "\n".join([v for d in [{**d, "describe": repl_tuple[i]} for i, d in enumerate(dict_tuple)] for v in d.values()]) -m = pipeline(t1, spliter, parallel(Identity, warp(writter)), collector) +with pipeline() as ppl: + ppl.outline_writer = lazyllm.OnlineChatModule(source="openai", stream=False).formatter(JsonFormatter()).prompt(toc_prompt) + ppl.story_generater = warp(lazyllm.OnlineChatModule(source="openai", stream=False).prompt(writer_prompt)) + ppl.synthesizer = (lambda *storys, outlines: "\n".join([f"{o['title']}\n{s}" for s, o in zip(storys, outlines)])) | bind(outlines=ppl.outline_writer) -print(m({'query': 'Please help me write an article about the application of artificial intelligence in the medical field.'})) +print(ppl({'query': 'Please help me write an article about the application of artificial intelligence in the medical field.'})) ``` ## What can LazyLLM do diff --git a/README.md b/README.md index 522101d8..3d88235c 100644 --- a/README.md +++ b/README.md @@ -90,9 +90,8 @@ mweb = lazyllm.WebModule(ppl, port=23456).start().wait() ```python import lazyllm -from lazyllm import pipeline, parallel, Identity, warp, package -import time -import re, json +from lazyllm import pipeline, warp, bind +from lazyllm.components.formatter import JsonFormatter toc_prompt=""" 你现在是一个智能助手。你的任务是理解用户的输入,将大纲以列表嵌套字典的列表。每个字典包含一个 `title` 和 `describe`,其中 `title` 中需要用Markdown格式标清层级,`describe` `describe` 是对该段的描述和写作指导。 @@ -129,19 +128,18 @@ completion_prompt=""" 接收如下: """ + +writer_prompt = {"system": completion_prompt, "user": '{"title": {title}, "describe": {describe}}'} ``` ```python -t1 = lazyllm.OnlineChatModule(source="openai", stream=False, prompter=ChatPrompter(instruction=toc_prompt)) -t2 = lazyllm.OnlineChatModule(source="openai", stream=False, prompter=ChatPrompter(instruction=completion_prompt)) - -spliter = lambda s: tuple(eval(re.search(r'\[\s*\{.*\}\s*\]', s['message']['content'], re.DOTALL).group())) -writter = pipeline(lambda d: json.dumps(d, ensure_ascii=False), t2, lambda d : d['message']['content']) -collector = lambda dict_tuple, repl_tuple: "\n".join([v for d in [{**d, "describe": repl_tuple[i]} for i, d in enumerate(dict_tuple)] for v in d.values()]) -m = pipeline(t1, spliter, parallel(Identity, warp(writter)), collector) +with pipeline() as ppl: + ppl.outline_writer = lazyllm.OnlineChatModule(source="openai", stream=False).formatter(JsonFormatter()).prompt(toc_prompt) + ppl.story_generater = warp(lazyllm.OnlineChatModule(source="openai", stream=False).prompt(writer_prompt)) + ppl.synthesizer = (lambda *storys, outlines: "\n".join([f"{o['title']}\n{s}" for s, o in zip(storys, outlines)])) | bind(outlines=ppl.outline_writer) -print(m({'query':'请帮我写一篇关于人工智能在医疗领域应用的文章。'})) +print(ppl({'query':'请帮我写一篇关于人工智能在医疗领域应用的文章。'})) ``` ## 四、功能点 diff --git a/docs/source/api/components.rst b/docs/source/api/components.rst index 82505a3f..d2cff17e 100644 --- a/docs/source/api/components.rst +++ b/docs/source/api/components.rst @@ -60,3 +60,18 @@ ModelDownloader .. autoclass:: lazyllm.components.ModelDownloader :members: :exclude-members: + +Formatter +========== + +.. autoclass:: lazyllm.components.formatter.LazyLLMFormatterBase + :members: + :exclude-members: + +.. autoclass:: lazyllm.components.JsonFormatter + :members: + :exclude-members: + +.. autoclass:: lazyllm.components.EmptyFormatter + :members: + :exclude-members: diff --git a/docs/source/best_practice/prompt.rst b/docs/source/best_practice/prompt.rst index 05356a9d..9f7058a1 100644 --- a/docs/source/best_practice/prompt.rst +++ b/docs/source/best_practice/prompt.rst @@ -53,9 +53,10 @@ LazyLLM Prompter的设计思路 - PrompterTemplate中可选的字段有: - system: 系统提示,一般会读取模型的归属信息并进行设置,如不设置默认为 ``You are an AI-Agent developed by LazyLLM.`` 。 - - instruction: 任务指令,由 ``InstructionTemplate`` 拼接用户的输入得到。这个是应用开发者需要着重了解的字段。 + - instruction: 任务指令,由 ``InstructionTemplate`` 拼接用户的输入得到。这个是应用开发者需要着重了解的字段。如果instruction是字符串,则默认是系统指令,如果是字典,且其键值只能是 ``system`` 和 ``user`` 。``system`` 指定的是系统级指令, ``user`` 指定的是用户级指令。 - history: 历史对话,由用户的输入得到,格式为 ``[[a, b], [c, d]]`` 或 ``[{"role": "user", "content": ""}, {"role": "assistant", "content": ""}]`` - tools: 可以使用的工具,在构造 ``prompter`` 时传入或者由用户使用时传入,当构造 ``prompter`` 时定义了工具之后,将禁止用户使用时再次传入。格式为 ``[{"type": "function", "function": {"name": "", "description": "", "parameters": {}, "required": []}]`` + - user: 用户级指令,可选指令,由用户通过instruction指定。 - sos: ``start of system`` , 标志着系统提示的开始,该符号由模型填入,开发者和用户均无需考虑 - eos: ``end of system`` , 标志着系统提示的结束,该符号由模型填入,开发者和用户均无需考虑 - soh: ``start of human`` , 标志着用户输入的开始,常用于多轮对话中作为分隔符。该符号由模型填入,开发者和用户均无需考虑 @@ -63,8 +64,8 @@ LazyLLM Prompter的设计思路 - soa: ``start of assistant`` , 标志着模型输出的开始,常用于多轮对话中作为分隔符。该符号由模型填入,开发者和用户均无需考虑 - eoa: ``end of assistant`` , 标志着模型输出的结束,常用于多轮对话中作为分隔符。该符号由模型填入,开发者和用户均无需考虑 - ``TrainableModule`` 所使用的内置的Prompt的拼接规则如下: - - AlpacaPrompter: ``{system}\n{instruction}\n{tools}### Response:\n`` - - ChatPrompter: ``{sos}{system}{instruction}{tools}{eos}\n\n{history}\n{soh}\n{input}\n{eoh}{soa}\n`` + - AlpacaPrompter: ``{system}\n{instruction}\n{tools}\n{user}### Response:\n`` + - ChatPrompter: ``{sos}{system}{instruction}{tools}{eos}\n\n{history}\n{soh}\n{user}{input}\n{eoh}{soa}\n`` - ``OnlineChatModule`` 的输出格式为: ``dict(messages=[{"role": "system", "content": ""}, {"role": "user", "content": ""}, ...], tools=[])`` .. note:: @@ -74,7 +75,7 @@ LazyLLM Prompter的设计思路 **InstructionTemplate**: 每个Prompter内置的,用于结合用户输入的 ``instruction`` ,产生最终的 ``instruction`` 的模板。 ``InstructionTemplate`` 中的用到的2个字段是: -- ``instruction`` : 由开发者在构造 ``Prompter`` 时传入,可带若干个待填充的槽位,用于填充用户的输入。 +- ``instruction`` : 由开发者在构造 ``Prompter`` 时传入,可带若干个待填充的槽位,用于填充用户的输入。或者指定系统级指令和用户级指令,当指定用户级指令时,需要使用字典类型,且键值为 ``user`` 和 ``system`` 。 - ``extro_keys`` : 需要用户调用大模型时额外提供的信息,有开发者在构造 ``Prompter`` 时传入,会自动转换成 ``instruction`` 中的槽位。 .. note:: @@ -105,11 +106,11 @@ Prompt生成过程解析 "Below is an instruction that describes a task, paired with extra messages such as input that provides " "further context if possible. Write a response that appropriately completes the request.\\n\\n ### " "Instruction:\\n 你是一个由LazyLLM开发的知识问答助手,你的任务是根据提供的上下文信息来回答用户的问题。上下文信息是背景," - "用户的问题是输入, 现在请你做出回答。### Response:\\n}" + "用户的问题是问题, 现在请你做出回答。### Response:\\n}" 4. ``AlpacaPrompter`` 读取 ``system`` 和 ``tools`` 字段,其中 ``system`` 字段由 ``Module`` 设置,而 ``tools`` 字段则会在后面的 :ref:`bestpractice.prompt.tools` 一节中介绍。 5. 如果 ``prompter`` 的结果用于线上模型( ``OnlineChatModule`` ),则不会再进一步拼接 ``PromptTemplate`` ,而是会直接得到一个dict,即 ``{'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\n\n ### Instruction:\n你是一个由LazyLLM开发的知识问答助手,你的任务是根据提供的上下文信息来回答用户的问题。上下文信息是背景,用户的问题是输入,现在请你做出回答。\n\n'}, {'role': 'user', 'content': ''}]}`` -6. 如果 ``prompter`` 的结果用于线下模型( ``TrainableModule`` ),则会通过 ``PromptTemplate`` 得到最终的结果: ``You are an AI-Agent developed by LazyLLM.\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\n\n ### Instruction:\n你是一个由LazyLLM开发的知识问答助手,你的任务是根据提供的上下文信息来回答用户的问题。上下文信息是背景,用户的问题是输入,现在请你做出回答。\n\n\n### Response:\n`` +6. 如果 ``prompter`` 的结果用于线下模型( ``TrainableModule`` ),则会通过 ``PromptTemplate`` 得到最终的结果: ``You are an AI-Agent developed by LazyLLM.\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\n\n ### Instruction:\n你是一个由LazyLLM开发的知识问答助手,你的任务是根据提供的上下文信息来回答用户的问题。上下文信息是背景,用户的问题是问题,现在请你做出回答。\n\n\n### Response:\n`` 定义和使用Prompter ------------------------- @@ -153,6 +154,7 @@ Query为string,而非dict - 当使用 ``ChatPrompter`` 时,不同于 ``AlpacaPrompter`` ,在 ``instruction`` 中定义槽位不是必须的。 - 如果不定义槽位,则输入会放到对话中作为用户的输入,在 ```` 和 ```` 之间。 - 如果像 ``AlpacaPrompter`` 一样定义了槽位,也可以任意取一个名字,此时输入会放到 ```` 字段中。 + - 如果 ``instruction`` 中指定了系统级指令和用户级指令,则在拼接完成后,系统级指令放在prompt_template中的{instruction}位置,用户级指令放在{user}位置。 .. _bestpractice.prompt.tools: @@ -286,4 +288,4 @@ Query为string,而非dict - ``TrainableModule`` 需要手动调用 ``start`` 以启动服务,想了解更多关于 ``TrainableModule`` 的用法,可以参考 :ref:`api.module` LazyLLM中内置的场景Prompt -------------------------- \ No newline at end of file +------------------------- diff --git a/lazyllm/__init__.py b/lazyllm/__init__.py index 080b6b97..4486e124 100644 --- a/lazyllm/__init__.py +++ b/lazyllm/__init__.py @@ -7,7 +7,8 @@ Loop as loop, Switch as switch, IFS as ifs, Warp as warp) from .components import (LazyLLMDataprocBase, LazyLLMFinetuneBase, LazyLLMDeployBase, LazyLLMValidateBase, register as component_register, Prompter, - AlpacaPrompter, ChatPrompter, FastapiApp) + AlpacaPrompter, ChatPrompter, FastapiApp, JsonFormatter) + from .module import (ModuleBase, UrlModule, TrainableModule, ActionModule, ServerModule, TrialModule, register as module_register, OnlineChatModule, OnlineEmbeddingModule) @@ -33,6 +34,7 @@ 'AlpacaPrompter', 'ChatPrompter', 'FastapiApp', + 'JsonFormatter', # flow 'LazyLLMFlowsBase', # pipeline, parallel diff --git a/lazyllm/common/common.py b/lazyllm/common/common.py index 637b64d8..a6fb5114 100644 --- a/lazyllm/common/common.py +++ b/lazyllm/common/common.py @@ -334,11 +334,11 @@ class LazyLlmRequest(struct): def split(self, flag=None): if flag is None: - assert len(self.kwargs) == 0 and isinstance(self.input, tuple), ( + assert len(self.kwargs) == 0 and isinstance(self.input, (tuple, list)), ( f'Only tuple input can be split automatically, your input is {self.input} <{type(self.input)}>') return [LazyLlmRequest(input=inp, global_parameters=self.global_parameters) for inp in self.input] elif isinstance(flag, int): - assert len(self.kwargs) == 0 and isinstance(self.input, tuple), ( + assert len(self.kwargs) == 0 and isinstance(self.input, (tuple, list)), ( f'Only tuple input can be split automatically, your input is {self.input} <{type(self.input)}>') assert flag == len(self.input), 'input size mismatch with split number' return [LazyLlmRequest(input=inp, global_parameters=self.global_parameters) for inp in self.input] @@ -346,7 +346,7 @@ def split(self, flag=None): if isinstance(self.input, dict): assert len(self.kwargs) == 0, 'Cannot provived input and kwargs at the same time for split' d = self.input - elif isinstance(self.input, tuple): + elif isinstance(self.input, (tuple, list)): return self.split(len(flag)) else: assert not self.input, 'Cannot provived input and kwargs at the same time for split' diff --git a/lazyllm/common/logger.py b/lazyllm/common/logger.py index 8a9cc15e..9f951f46 100644 --- a/lazyllm/common/logger.py +++ b/lazyllm/common/logger.py @@ -18,7 +18,7 @@ "log_format", str, "{process}: {time:YYYY-MM-DD HH:mm:ss} {extra[name]} " - "{level}: ({name}) {message}", + "{level}: ({name}:{line}) {message}", "LOG_FORMAT", ) lazyllm.config.add("log_dir", str, "~/.lazyllm", "LOG_DIR") diff --git a/lazyllm/components/__init__.py b/lazyllm/components/__init__.py index 7a93c8a0..183feafa 100644 --- a/lazyllm/components/__init__.py +++ b/lazyllm/components/__init__.py @@ -6,6 +6,7 @@ from .validate import LazyLLMValidateBase from .auto import AutoDeploy, AutoFinetune from .utils import ModelDownloader +from .formatter import FormatterBase, EmptyFormatter, JsonFormatter __all__ = [ 'register', @@ -19,5 +20,8 @@ 'FastapiApp', 'AutoDeploy', 'AutoFinetune', - 'ModelDownloader' + 'ModelDownloader', + 'FormatterBase', + 'EmptyFormatter', + 'JsonFormatter' ] diff --git a/lazyllm/components/formatter/__init__.py b/lazyllm/components/formatter/__init__.py new file mode 100644 index 00000000..f40c2d42 --- /dev/null +++ b/lazyllm/components/formatter/__init__.py @@ -0,0 +1,10 @@ +from .formatterBase import LazyLLMFormatterBase, LazyLLMFormatterBase as FormatterBase, EmptyFormatter +from .jsonFormatter import JsonFormatter + + +__all__ = [ + 'LazyLLMFormatterBase', + 'FormatterBase', + 'EmptyFormatter', + 'JsonFormatter' +] diff --git a/lazyllm/components/formatter/formatterBase.py b/lazyllm/components/formatter/formatterBase.py new file mode 100644 index 00000000..9889d1d7 --- /dev/null +++ b/lazyllm/components/formatter/formatterBase.py @@ -0,0 +1,50 @@ +from ...common import LazyLLMRegisterMetaClass + +def is_number(s: str): + try: + int(s) + return True + except ValueError: + if s == "None" or len(s) == 0: + return False + else: + raise ValueError("Invalid number: " + s + ". You can enter an integer, None or an empyt string.") + +class LazyLLMFormatterBase(metaclass=LazyLLMRegisterMetaClass): + def __init__(self, formatter: str = None): + self._formatter = formatter + if self._formatter: + self._parse_formatter() + else: + self._slices = None + + def _parse_formatter(self): + # Remove the surrounding brackets + slice_str = self._formatter.strip()[1:-1] + dimensions = slice_str.split(",") + slices = [] + + for dim in dimensions: + if ":" in dim: + parts = dim.split(":") + start = int(parts[0]) if is_number(parts[0]) else None + end = int(parts[1]) if len(parts) > 1 and is_number(parts[1]) else None + step = int(parts[2]) if len(parts) > 2 and is_number(parts[2]) else None + slices.append(slice(start, end, step)) + else: + slices.append(dim.strip()) + self._slices = slices + + def _load(self, msg: str): + raise NotImplementedError("This parse str function is not implemented.") + + def _parse_py_data_by_formatter(self, py_data): + raise NotImplementedError("This data parse function is not implemented.") + + def format(self, msg): + if isinstance(msg, str): msg = self._load(msg) + return self._parse_py_data_by_formatter(msg) + +class EmptyFormatter(LazyLLMFormatterBase): + def format(self, msg): + return msg diff --git a/lazyllm/components/formatter/jsonFormatter.py b/lazyllm/components/formatter/jsonFormatter.py new file mode 100644 index 00000000..cd79c7ba --- /dev/null +++ b/lazyllm/components/formatter/jsonFormatter.py @@ -0,0 +1,57 @@ +import json +from .formatterBase import LazyLLMFormatterBase as FormatterBase +import lazyllm + +class JsonFormatter(FormatterBase): + def _extract_json_from_string(self, mixed_str: str): + json_objects = [] + brace_level = 0 + current_json = "" + in_string = False + + for char in mixed_str: + if char == '"' and (len(current_json) == 0 or current_json[-1] != '\\'): + in_string = not in_string + + if not in_string: + if char == '{': + if brace_level == 0: + current_json = "" + brace_level += 1 + elif char == '}': + brace_level -= 1 + + if brace_level > 0 or (brace_level == 0 and char == '}'): + current_json += char + + if brace_level == 0 and current_json: + try: + json.loads(current_json) + json_objects.append(current_json) + current_json = "" + except json.JSONDecodeError: + continue + + return json_objects + + def _load(self, msg: str): + # Convert str to json format + assert msg.count("{") == msg.count("}"), f"{msg} is not a valid json string." + try: + json_strs = self._extract_json_from_string(msg) + if len(json_strs) == 0: + raise TypeError(f"{msg} is not a valid json string.") + res = [] + for json_str in json_strs: + res.append(json.loads(json_str)) + return res if len(res) > 1 else res[0] + except Exception as e: + lazyllm.LOG.info(f"Error: {e}") + return "" + + def _parse_py_data_by_formatter(self, data, *, slices=None): + if slices is None: slices = self._slices + if not slices: return data + if isinstance(slices[0], slice): return [self._parse_py_data_by_formatter(d, slices=slices[1:]) + for d in data[slices[0]]] + else: return self._parse_py_data_by_formatter(data[slices[0]], slices=slices[1:]) diff --git a/lazyllm/components/prompter/alpacaPrompter.py b/lazyllm/components/prompter/alpacaPrompter.py index 9bcbaf8f..6231750d 100644 --- a/lazyllm/components/prompter/alpacaPrompter.py +++ b/lazyllm/components/prompter/alpacaPrompter.py @@ -1,15 +1,21 @@ -from typing import List, Union, Optional +from typing import List, Union, Optional, Dict from .builtinPrompt import LazyLLMPrompterBase class AlpacaPrompter(LazyLLMPrompterBase): - def __init__(self, instruction: Union[None, str] = None, + def __init__(self, instruction: Union[None, str, Dict[str, str]] = None, extro_keys: Union[None, List[str]] = None, show: bool = False, tools: Optional[List] = None): super(__class__, self).__init__(show, tools=tools) + if isinstance(instruction, dict): + splice_struction = instruction.get("system", "") + \ + AlpacaPrompter.ISA + instruction.get("user", "") + AlpacaPrompter.ISE + instruction = splice_struction instruction_template = ("Below is an instruction that describes a task, paired with extra messages such as " "input that provides further context if possible. Write a response that " f"appropriately completes the request.\n\n ### Instruction:\n{instruction}" "\n\n" + LazyLLMPrompterBase._get_extro_key_template(extro_keys)) - self._init_prompt("{system}\n{instruction}\n{tools}### Response:\n", instruction_template, "### Response:") + self._init_prompt("{system}\n{instruction}\n{tools}\n{user}### Response:\n", + instruction_template, + "### Response:") def _check_values(self, instruction, input, history, tools): assert not history, f"Chat history is not supported in {__class__}." diff --git a/lazyllm/components/prompter/builtinPrompt.py b/lazyllm/components/prompter/builtinPrompt.py index b3a0d1be..455ba97a 100644 --- a/lazyllm/components/prompter/builtinPrompt.py +++ b/lazyllm/components/prompter/builtinPrompt.py @@ -1,10 +1,14 @@ from typing import Dict, Union, Any, List, Callable, Optional from ...common import LazyLLMRegisterMetaClass from lazyllm import LOG +from functools import reduce import json import re class LazyLLMPrompterBase(metaclass=LazyLLMRegisterMetaClass): + ISA = "" + ISE = "" + def __init__(self, show=False, tools=None): self._set_model_configs(system='You are an AI-Agent developed by LazyLLM.', sos='<|start_system|>', soh='<|Human|>:', soa='<|Assistant|>:', eos='<|end_system|>', eoh='', eoa='') @@ -73,21 +77,25 @@ def _get_instruction_and_input(self, input): assert len(prompt_keys) == 0 return self._instruction_template, input assert isinstance(input, dict) + input = input.copy() kwargs = {k: input.pop(k) for k in prompt_keys} - assert len(input) <= 1, f'Unexpected keys found in input: {list(input.keys())}' - return (self._instruction_template.format(**kwargs) if len(kwargs) > 0 else self._instruction_template, - list(input.values())[0] if input else '') + assert len(input) <= 1, f"Unexpected keys found in input: {list(input.keys())}" + return (reduce(lambda s, kv: s.replace(f"{{{kv[0]}}}", kv[1]), + kwargs.items(), + self._instruction_template) + if len(kwargs) > 0 else self._instruction_template, + list(input.values())[0] if input else "") def _check_values(self, instruction, input, history, tools): pass # Used for TrainableModule(local deployed) - def _generate_prompt_impl(self, instruction, input, history, tools, label): - params = dict(system=self._system, instruction=instruction, input=input, history=history, tools=tools, + def _generate_prompt_impl(self, instruction, input, user, history, tools, label): + params = dict(system=self._system, instruction=instruction, input=input, user=user, history=history, tools=tools, sos=self._sos, eos=self._eos, soh=self._soh, eoh=self._eoh, soa=self._soa, eoa=self._eoa) return self._template.format(**params) + (label if label else '') # Used for OnlineChatModule - def _generate_prompt_dict_impl(self, instruction, input, history, tools, label): + def _generate_prompt_dict_impl(self, instruction, input, user, history, tools, label): if not history: history = [] if isinstance(input, str): history.append({"role": "user", "content": input}) @@ -96,6 +104,9 @@ def _generate_prompt_dict_impl(self, instruction, input, history, tools, label): else: raise TypeError("input must be a string or a dict") + if user: + history[-1]["content"].insert(0, user) + history.insert(0, {"role": "system", "content": self._system + "\n" + instruction if instruction else self._system}) @@ -105,6 +116,18 @@ def pre_hook(self, func: Optional[Callable] = None): self._pre_hook = func return self + def _split_instruction(self, instruction: str): + system_instruction = instruction + user_instruction = "" + if LazyLLMPrompterBase.ISA in instruction and LazyLLMPrompterBase.ISE in instruction: + # The instruction includes system prompts and/or user prompts + pattern = re.compile(r"%s(.*)%s" % (LazyLLMPrompterBase.ISA, LazyLLMPrompterBase.ISE)) + ret = re.split(pattern, instruction) + system_instruction = ret[0] + user_instruction = ret[1] + + return system_instruction, user_instruction + def generate_prompt(self, input: Union[str, Dict[str, str], None] = None, history: List[Union[List[str], Dict[str, Any]]] = None, tools: Union[List[Dict[str, Any]], None] = None, @@ -116,8 +139,9 @@ def generate_prompt(self, input: Union[str, Dict[str, str], None] = None, history = self._get_histories(history, return_dict=return_dict) tools = self._get_tools(tools, return_dict=return_dict) self._check_values(instruction, input, history, tools) + instruction, user_instruction = self._split_instruction(instruction) func = self._generate_prompt_dict_impl if return_dict else self._generate_prompt_impl - result = func(instruction, input, history, tools, label) + result = func(instruction, input, user_instruction, history, tools, label) if self._show or show: LOG.info(result) return result diff --git a/lazyllm/components/prompter/chatPrompter.py b/lazyllm/components/prompter/chatPrompter.py index dff1dd29..d7e828b2 100644 --- a/lazyllm/components/prompter/chatPrompter.py +++ b/lazyllm/components/prompter/chatPrompter.py @@ -1,13 +1,17 @@ -from typing import List, Union, Optional +from typing import List, Union, Optional, Dict from .builtinPrompt import LazyLLMPrompterBase class ChatPrompter(LazyLLMPrompterBase): - def __init__(self, instruction: Union[None, str] = None, + def __init__(self, instruction: Union[None, str, Dict[str, str]] = None, extro_keys: Union[None, List[str]] = None, show: bool = False, tools: Optional[List] = None): super(__class__, self).__init__(show, tools=tools) + if isinstance(instruction, dict): + splice_instruction = instruction.get("system", "") + \ + ChatPrompter.ISA + instruction.get("user", "") + ChatPrompter.ISE + instruction = splice_instruction instruction_template = f'{instruction}\n{{extro_keys}}\n'.replace( '{extro_keys}', LazyLLMPrompterBase._get_extro_key_template(extro_keys)) - self._init_prompt("{sos}{system}{instruction}{tools}{eos}\n\n{history}\n{soh}\n{input}\n{eoh}{soa}\n", + self._init_prompt("{sos}{system}{instruction}{tools}{eos}\n\n{history}\n{soh}\n{user}{input}\n{eoh}{soa}\n", instruction_template) @property diff --git a/lazyllm/docs/components.py b/lazyllm/docs/components.py index 4cbe8054..3368bedc 100644 --- a/lazyllm/docs/components.py +++ b/lazyllm/docs/components.py @@ -520,20 +520,84 @@ def test_prompter(): >>> downloader.download('GLM3-6B') ''') +# ============= Formatter + +# FormatterBase +add_chinese_doc('formatter.FormatterBase', '''\ +此类是格式化器的基类,格式化器是模型输出结果的格式化器,用户可以自定义格式化器,也可以使用LazyLLM提供的格式化器。 +主要方法:_parse_formatter:解析索引内容。_load:解析str对象,其中包含python对象的部分被解析出来,比如list,dict等对象。_parse_py_data_by_formatter:根据自定义的格式化器和索引对python对象进行格式化。format:对传入的内容进行格式化,如果内容是字符串类型,先将字符串转化为python对象,再进行格式化。如果内容是python对象,直接进行格式化。 +''') + +add_english_doc('formatter.FormatterBase', '''\ +This class is the base class of the formatter. The formatter is the formatter of the model output result. Users can customize the formatter or use the formatter provided by LazyLLM. +Main methods: _parse_formatter: parse the index content. _load: Parse the str object, and the part containing Python objects is parsed out, such as list, dict and other objects. _parse_py_data_by_formatter: format the python object according to the custom formatter and index. format: format the passed content. If the content is a string type, convert the string into a python object first, and then format it. If the content is a python object, format it directly. +''') + +add_example('formatter.FormatterBase', '''\ +>>> from lazyllm.components.formatter import FormatterBase +>>> class MyFormatter(LazyLLMFormatterBase): +... def _load(self, data): +... return str_to_list(data) +... +... def _parse_py_data_by_formatter(self, data, formatter): +... return extract_data_by_formatter(data, formatter) +... +>>> fmt = MyFormatter("[1:3]") # 取列表中索引为1和2的元素 +>>> fmt.format("[1,2,3,4,5]") # 输入为字符串"[1,2,3,4,5]" +[2,3] +''') + +# JsonFormatter +add_chinese_doc('JsonFormatter', '''\ +此类是JSON格式化器,即用户希望模型输出的内容格式为JSON,还可以通过索引方式对输出内容中的某个字段进行选择。 +''') + +add_english_doc('JsonFormatter', '''\ +This class is a JSON formatter, that is, the user wants the model to output content is JSON format, and can also select a field in the output content by indexing. +''') + +add_example('JsonFormatter', '''\ +>>> from lazyllm.components import JsonFormatter +>>> # Assume that the model output without specifying a formatter is as follows: +"Based on your input, here is the corresponding list of nested dictionaries:\\n\\n```python\\n[\\n {\\n \"title\": \"# Introduction\",\\n \"describe\": \"Provide an overview of the topic and set the stage for the article. Discuss what the reader can expect to learn from this article.\"\\n },\\n {\\n \"title\": \"## What is Artificial Intelligence?\",\\n \"describe\": \"Define Artificial Intelligence and discuss its importance in various fields, including the medical industry.\"\\n },\\n {\\n \"title\": \"## Applications of AI in Medical Field\",\\n \"describe\": \"Outline the ways AI is used in the medical field, such as diagnosis, drug discovery, and patient treatment.\"\\n },\\n {\\n \"title\": \"### Medical Image Analysis\",\\n \"describe\": \"Discuss how AI-powered image analysis tools help in detecting diseases and analyzing medical images, such as X-rays, MRIs, and CT scans.\"\\n },\\n {\\n \"title\": \"### Personalized Medicine\",\\n \"describe\": \"Explain how AI algorithms can assist in genetic testing and tailor treatment plans based on an individual's genetic makeup.\"\\n },\\n {\\n \"title\": \"### Electronic Health Records (EHRs) and Medical Data Management\",\\n \"describe\": \"Discuss the role of AI in managing and analyzing large amounts of medical data, such as electronic health records, for improved patient care and population health management.\"\\n },\\n {\\n \"title\": \"## Challenges in AI Adoption\",\\n \"describe\": \"Highlight potential challenges to AI implementation, including data privacy, ethical concerns, and regulatory issues.\"\\n },\\n {\\n \"title\": \"## Future of AI in Medicine\",\\n \"describe\": \"Investigate the evolving role of AI in medicine, the anticipated advancements in the field, and their potential impact on medical professionals and patients.\"\\n },\\n {\\n \"title\": \"# Conclusion\",\\n \"describe\": \"Summarize the key points of the article and emphasize the potential for AI in revolutionizing the medical field.\"\\n }\\n]\\n```\\n\\nPlease use the provided `title` and `describe` information to write your article, leveraging Markdown format to denote hierarchical levels. Each `title` should reflect its corresponding level in a Markdown format, including \"#\" for level 1, \"##\" for level 2, and \"###\" for level 3. The `describe` text provides a guide for developing each section of the article, ensuring it aligns with the overarching discussion on the application of AI in the medical field." +>>> jsonFormatter=JsonFormatter("[:, title]") # ":" represents all elements in a list. "title" represents the "title" field in the json data. +>>> model.formatter(jsonFormatter) +>>> # The model output of the specified formatter is as follows +["# Introduction", "## What is Artificial Intelligence?", "## Applications of AI in Medical Field", "### Medical Image Analysis", "### Personalized Medicine", "### Electronic Health Records (EHRs) and Medical Data Management", "## Challenges in AI Adoption", "## Future of AI in Medicine", "# Conclusion"] +''') + +# EmptyFormatter +add_chinese_doc('EmptyFormatter', '''\ +此类是空的格式化器,即用户希望对模型的输出不做格式化,用户可以对模型指定该格式化器,也可以不指定(模型默认的格式化器就是空格式化器) +''') + +add_english_doc('EmptyFormatter', '''\ +This type is the system default formatter. When the user does not specify anything or does not want to format the model output, this type is selected. The model output will be in the same format. +''') + +add_example('EmptyFormatter', '''\ +>>> from lazyllm.components import EmptyFormatter +>>> # Assume that the model output without specifying a formatter is as follows: +"Here's a nested list of dictionaries based on your user input:\\n\\n```json\\n[\\n {\\n \"title\": \"# AI in Medical Field\",\\n \"describe\": \"Please provide a detailed introduction to the use of artificial intelligence in the medical field, emphasizing its potential benefits and challenges.\"\\n },\\n {\\n \"title\": \"## Applications of AI in Medical Diagnosis\",\\n \"describe\": \"Please discuss the utilization of AI in medical diagnosis, including its advantages over traditional methods and notable achievements.\"\\n },\\n {\\n \"title\": \"### AI-assisted Diagnosis Tools\",\\n \"describe\": \"Please elaborate on specific AI-assisted diagnostic tools used in medical practice, such as image analysis, predictive analytics, and decision support systems.\"\\n },\\n {\\n \"title\": \"#### Image Analysis Tools\",\\n \"describe\": \"Please provide a comprehensive overview of AI-powered image analysis tools and their role in enhancing disease detection and treatment planning.\"\\n },\\n {\\n \"title\": \"#### Predictive Analytics\",\\n \"describe\": \"Please explain how predictive analytics leverages AI to forecast diseases, identify risk factors, and develop personalized treatment protocols.\"\\n },\\n {\\n \"title\": \"#### Decision Support Systems\",\\n \"describe\": \"Please discuss the role of decision support systems in facilitating clinical decision-making and improving patient outcomes.\"\\n },\\n {\\n \"title\": \"## Advantages and Limitations of AI in Medical Field\",\\n \"describe\": \"Please identify and elaborate on the key advantages and limitations of employing AI in the medical field, including ethical, legal, and practical considerations.\"\\n },\\n {\\n \"title\": \"## Future Perspectives and Innovations\",\\n \"describe\": \"Please provide a forward-looking view of the progression of AI in the healthcare sector, predicting future developments, and discussing the potential impact on medical professionals and patients.\"\\n },\\n {\\n \"title\": \"### New AI Technologies\",\\n \"describe\": \"Please discuss emerging AI technologies that could reshape the medical field, such as machine learning, natural language processing, and robotics.\"\\n },\\n {\\n \"title\": \"#### Machine Learning\",\\n \"describe\": \"Please explain how machine learning algorithms are being used to enhance medical research, such as in drug discovery, genomics, and epidemiology.\"\\n },\\n {\\n \"title\": \"#### Natural Language Processing\",\\n \"describe\": \"Please discuss the role of natural language processing in extracting and analyzing medical data from various sources, such as electronic health records and scientific literature.\"\\n },\\n {\\n \"title\": \"#### Robotics\",\\n \"describe\": \"Please elaborate on the incorporation of AI-driven robots in medical procedures and surgeries, emphasizing their potential to revolutionize patient care and treatment options.\"\\n },\\n {\\n \"title\": \"### Ethical Considerations\",\\n \"describe\": \"Please address the ethical concerns surrounding AI in healthcare, such as data privacy, transparency, and patient autonomy, and discuss potential methods to mitigate these issues.\"\\n }\\n]\\n```\\n\\nThis outline provides a comprehensive structure for your article, addressing various aspects of the application of AI in the medical field, from specific AI technologies to ethical considerations. You can start by elaborating on each section, providing detailed descriptions, examples, and relevant information. Be sure to include scientific research, case studies, and expert opinions to support your arguments and provide a comprehensive understanding of the subject." +>>> emptyFormatter = EmptyFormatter() +>>> model.formatter(emptyFormatter) +>>> # The model output of the specified formatter is as follows +"Here's a nested list of dictionaries based on your user input:\\n\\n```json\\n[\\n {\\n \"title\": \"# AI in Medical Field\",\\n \"describe\": \"Please provide a detailed introduction to the use of artificial intelligence in the medical field, emphasizing its potential benefits and challenges.\"\\n },\\n {\\n \"title\": \"## Applications of AI in Medical Diagnosis\",\\n \"describe\": \"Please discuss the utilization of AI in medical diagnosis, including its advantages over traditional methods and notable achievements.\"\\n },\\n {\\n \"title\": \"### AI-assisted Diagnosis Tools\",\\n \"describe\": \"Please elaborate on specific AI-assisted diagnostic tools used in medical practice, such as image analysis, predictive analytics, and decision support systems.\"\\n },\\n {\\n \"title\": \"#### Image Analysis Tools\",\\n \"describe\": \"Please provide a comprehensive overview of AI-powered image analysis tools and their role in enhancing disease detection and treatment planning.\"\\n },\\n {\\n \"title\": \"#### Predictive Analytics\",\\n \"describe\": \"Please explain how predictive analytics leverages AI to forecast diseases, identify risk factors, and develop personalized treatment protocols.\"\\n },\\n {\\n \"title\": \"#### Decision Support Systems\",\\n \"describe\": \"Please discuss the role of decision support systems in facilitating clinical decision-making and improving patient outcomes.\"\\n },\\n {\\n \"title\": \"## Advantages and Limitations of AI in Medical Field\",\\n \"describe\": \"Please identify and elaborate on the key advantages and limitations of employing AI in the medical field, including ethical, legal, and practical considerations.\"\\n },\\n {\\n \"title\": \"## Future Perspectives and Innovations\",\\n \"describe\": \"Please provide a forward-looking view of the progression of AI in the healthcare sector, predicting future developments, and discussing the potential impact on medical professionals and patients.\"\\n },\\n {\\n \"title\": \"### New AI Technologies\",\\n \"describe\": \"Please discuss emerging AI technologies that could reshape the medical field, such as machine learning, natural language processing, and robotics.\"\\n },\\n {\\n \"title\": \"#### Machine Learning\",\\n \"describe\": \"Please explain how machine learning algorithms are being used to enhance medical research, such as in drug discovery, genomics, and epidemiology.\"\\n },\\n {\\n \"title\": \"#### Natural Language Processing\",\\n \"describe\": \"Please discuss the role of natural language processing in extracting and analyzing medical data from various sources, such as electronic health records and scientific literature.\"\\n },\\n {\\n \"title\": \"#### Robotics\",\\n \"describe\": \"Please elaborate on the incorporation of AI-driven robots in medical procedures and surgeries, emphasizing their potential to revolutionize patient care and treatment options.\"\\n },\\n {\\n \"title\": \"### Ethical Considerations\",\\n \"describe\": \"Please address the ethical concerns surrounding AI in healthcare, such as data privacy, transparency, and patient autonomy, and discuss potential methods to mitigate these issues.\"\\n }\\n]\\n```\\n\\nThis outline provides a comprehensive structure for your article, addressing various aspects of the application of AI in the medical field, from specific AI technologies to ethical considerations. You can start by elaborating on each section, providing detailed descriptions, examples, and relevant information. Be sure to include scientific research, case studies, and expert opinions to support your arguments and provide a comprehensive understanding of the subject." +''') # ============= Prompter add_chinese_doc('prompter.PrompterBase', '''\ Prompter的基类,自定义的Prompter需要继承此基类,并通过基类提供的 ``_init_prompt`` 函数来设置Prompt模板和Instruction的模板,以及截取结果所使用的字符串。可以查看 :doc:`/best_practice/prompt` 进一步了解Prompt的设计思想和使用方式。 -Prompt模板和Instruction模板都用 ``{}`` 表示要填充的字段,其中Prompt可包含的字段有 ``system``, ``history``, ``tools``等,而instruction_template可包含的字段为 ``instruction`` 和 ``extro_keys`` 。 -``instruction`` 由应用的开发者传入, ``instruction`` 中也可以带有 ``{}`` 用于让定义可填充的字段,方便用户填入额外的信息。 +Prompt模板和Instruction模板都用 ``{}`` 表示要填充的字段,其中Prompt可包含的字段有 ``system``, ``history``, ``tools``, ``user`` 等,而instruction_template可包含的字段为 ``instruction`` 和 ``extro_keys`` 。 +``instruction`` 由应用的开发者传入, ``instruction`` 中也可以带有 ``{}`` 用于让定义可填充的字段,方便用户填入额外的信息。如果 ``instruction`` 字段为字符串,则认为是系统instruction;如果是字典,则它包含的key只能是 ``user`` 和 ``system`` 两种选择。 ``user`` 表示用户输入的instruction,在prompt中放在用户输入前面, ``system`` 表示系统instruction,在prompt中凡在system prompt后面。 ''') add_english_doc('prompter.PrompterBase', '''\ The base class of Prompter. A custom Prompter needs to inherit from this base class and set the Prompt template and the Instruction template using the `_init_prompt` function provided by the base class, as well as the string used to capture results. Refer to :doc:`/best_practice/prompt.rst` for further understanding of the design philosophy and usage of Prompts. -Both the Prompt template and the Instruction template use ``{}`` to indicate the fields to be filled in. The fields that can be included in the Prompt are `system`, `history`, `tools`, etc., while the fields that can be included in the instruction_template are `instruction` and `extro_keys`. +Both the Prompt template and the Instruction template use ``{}`` to indicate the fields to be filled in. The fields that can be included in the Prompt are `system`, `history`, `tools`, `user` etc., while the fields that can be included in the instruction_template are `instruction` and `extro_keys`. If the ``instruction`` field is a string, it is considered as a system instruction; if it is a dictionary, it can only contain the keys ``user`` and ``system``. ``user`` represents the user input instruction, which is placed before the user input in the prompt, and ``system`` represents the system instruction, which is placed after the system prompt in the prompt. ``instruction`` is passed in by the application developer, and the ``instruction`` can also contain ``{}`` to define fillable fields, making it convenient for users to input additional information. ''') @@ -598,7 +662,7 @@ def test_prompter(): Alpaca格式的Prompter,支持工具调用,不支持历史对话。 Args: - instruction (Option[str]): 大模型的任务指令,至少带一个可填充的槽位(如 ``{instruction}``)。 + instruction (Option[str]): 大模型的任务指令,至少带一个可填充的槽位(如 ``{instruction}``)。或者使用字典指定 ``system`` 和 ``user`` 的指令。 extro_keys (Option[List]): 额外的字段,用户的输入会填充这些字段。 show (bool): 标志是否打印生成的Prompt,默认为False tools (Option[list]): 大模型可以使用的工具集合,默认为None @@ -610,7 +674,7 @@ def test_prompter(): Sure! Here is the translation, keeping the original format: Args: - instruction (Option[str]): Task instructions for the large model, with at least one fillable slot (e.g. ``{instruction}``). + instruction (Option[str]): Task instructions for the large model, with at least one fillable slot (e.g. ``{instruction}``). Or use a dictionary to specify the ``system`` and ``user`` instructions. extro_keys (Option[List]): Additional fields that will be filled with user input. show (bool): Flag indicating whether to print the generated Prompt, default is False. tools (Option[list]): Tool-set which is provived for LLMs, default is None. @@ -629,13 +693,20 @@ def test_prompter(): 'You are an AI-Agent developed by LazyLLM.\\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\\n\\n ### Instruction:\\nhello world hello world, my input\\n\\nHere are some extra messages you can referred to:\\n\\n### knowledge:\\nlazyllm\\n\\n\\n### Response:\\n' >>> p.generate_prompt(dict(instruction='hello world', input='my input', knowledge='lazyllm'), return_dict=True) {'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\\n\\n ### Instruction:\\nhello world hello world, my input\\n\\nHere are some extra messages you can referred to:\\n\\n### knowledge:\\nlazyllm\\n\\n'}, {'role': 'user', 'content': ''}]} +>>> +>>> p = AlpacaPrompter(dict(system="hello world", user="this is user instruction {input}")) +>>> p.generate_prompt(dict(input="my input")) +'You are an AI-Agent developed by LazyLLM.\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\\n\\n ### Instruction:\\nhello word\\n\\n\\n\\nthis is user instruction my input### Response:\\n' +>>> p.generate_prompt(dict(input="my input"), return_dict=True) +{'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\nBelow is an instruction that describes a task, paired with extra messages such as input that provides further context if possible. Write a response that appropriately completes the request.\\n\\n ### Instruction:\\nhello world'}, {'role': 'user', 'content': 'this is user instruction my input'}]} + ''') add_chinese_doc('ChatPrompter', '''\ 多轮对话的Prompt,支持工具调用和历史对话 Args: - instruction (Option[str]): 大模型的任务指令,可以带0到多个待填充的槽位,用 ``{}`` 表示。 + instruction (Option[str]): 大模型的任务指令,可以带0到多个待填充的槽位,用 ``{}`` 表示。针对用户instruction可以通过字典传递,字段为 ``user`` 和 ``system`` 。 extro_keys (Option[List]): 额外的字段,用户的输入会填充这些字段。 show (bool): 标志是否打印生成的Prompt,默认为False ''') @@ -644,7 +715,7 @@ def test_prompter(): chat prompt, supports tool calls and historical dialogue. Args: - instruction (Option[str]): Task instructions for the large model, with 0 to multiple fillable slot, represented by ``{}``. + instruction (Option[str]): Task instructions for the large model, with 0 to multiple fillable slot, represented by ``{}``. For user instructions, you can pass a dictionary with fields ``user`` and ``system``. extro_keys (Option[List]): Additional fields that will be filled with user input. show (bool): Flag indicating whether to print the generated Prompt, default is False. ''') @@ -664,6 +735,12 @@ def test_prompter(): {'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\nhello world this is my ins\\nHere are some extra messages you can referred to:\\n\\n### knowledge:\\nLazyLLM-Knowledge\\n\\n\\n'}, {'role': 'user', 'content': 'this is my inp'}]} >>> p.generate_prompt(dict(instruction='this is my ins', input='this is my inp', knowledge='LazyLLM-Knowledge'), history=[['s1', 'e1'], ['s2', 'e2']]) '<|start_system|>You are an AI-Agent developed by LazyLLM.hello world this is my ins\\nHere are some extra messages you can referred to:\\n\\n### knowledge:\\nLazyLLM-Knowledge\\n\\n\\n<|end_system|>\\n\\n<|Human|>:s1<|Assistant|>:e1<|Human|>:s2<|Assistant|>:e2\\n<|Human|>:\\nthis is my inp\\n<|Assistant|>:\\n' +>>> +>>> p = ChatPrompter(dict(system="hello world", user="this is user instruction {input} ")) +>>> p.generate_prompt(dict(input="my input", query="this is user query")) +'<|start_system|>You are an AI-Agent developed by LazyLLM.hello world\\n\\n<|end_system|>\\n\\n\\n<|Human|>:\\nthis is user instruction my input this is user query\\n<|Assistant|>:\\n' +>>> p.generate_prompt(dict(input="my input", query="this is user query"), return_dict=True) +{'messages': [{'role': 'system', 'content': 'You are an AI-Agent developed by LazyLLM.\\nhello world\\n\\n'}, {'role': 'user', 'content': 'this is user instruction my input this is user query'}]} ''') # ============= Launcher diff --git a/lazyllm/module/module.py b/lazyllm/module/module.py index 3ebc6496..bdbfd1bd 100644 --- a/lazyllm/module/module.py +++ b/lazyllm/module/module.py @@ -273,7 +273,7 @@ def prompt(self, prompt=None): self._prompt = EmptyPrompter() elif isinstance(prompt, PrompterBase): self._prompt = prompt - elif isinstance(prompt, str): + elif isinstance(prompt, (str, dict)): self._prompt = ChatPrompter(prompt) return self diff --git a/lazyllm/module/onlineChatModule/onlineChatModule.py b/lazyllm/module/onlineChatModule/onlineChatModule.py index a9a6cba5..201ca97f 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModule.py +++ b/lazyllm/module/onlineChatModule/onlineChatModule.py @@ -20,7 +20,6 @@ class OnlineChatModule(metaclass=_ChatModuleMeta): @staticmethod def _encapsulate_parameters(base_url: str, model: str, - system_prompt: str, stream: bool, return_trace: bool, **kwargs) -> Dict[str, Any]: @@ -29,8 +28,6 @@ def _encapsulate_parameters(base_url: str, params['base_url'] = base_url if model is not None: params['model'] = model - if system_prompt is not None: - params['system_prompt'] = system_prompt params.update(kwargs) return params @@ -39,11 +36,10 @@ def __new__(self, source: str, base_url: str = None, model: str = None, - system_prompt: str = None, stream: bool = True, return_trace: bool = False, **kwargs): - params = OnlineChatModule._encapsulate_parameters(base_url, model, system_prompt, stream, return_trace, **kwargs) + params = OnlineChatModule._encapsulate_parameters(base_url, model, stream, return_trace, **kwargs) if source.lower() == "openai": return OpenAIModule(**params) diff --git a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py index 85a2fd10..edb75ce4 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py +++ b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py @@ -1,10 +1,12 @@ import json import os import requests +import re from typing import Tuple, List, Dict, Union, Any import time import lazyllm from lazyllm.components.prompter import PrompterBase, ChatPrompter +from lazyllm.components.formatter import FormatterBase, EmptyFormatter from ..module import ModuleBase, Pipeline class OnlineChatModuleBase(ModuleBase): @@ -31,13 +33,16 @@ def __init__(self, self._set_chat_url() self.prompt() self._is_trained = False + self.formatter() + self.field_extractor() + self._stream_end_token = "[DONE]" def prompt(self, prompt=None): if prompt is None: self._prompt = ChatPrompter() elif isinstance(prompt, PrompterBase): self._prompt = prompt - elif isinstance(prompt, str): + elif isinstance(prompt, (str, dict)): self._prompt = ChatPrompter(prompt) else: raise TypeError(f"{prompt} type is not supported.") @@ -70,14 +75,80 @@ def _get_models_list(self): res_json = r.json() return res_json + def _parse_output_by_key(self, key: str, data: Dict[str, Any]): + if "choices" in data and isinstance(data["choices"], list): + item = data['choices'][0] + data = item.get("delta", {}) if "delta" in item else item.get("message", {}) + return data if not key else data.get(key, "") + else: + raise ValueError(f"The response {data} does not contain a 'choices' field.") + + def _synthetic_output(self, response: Dict[str, Any]): + if len(self._extractor_fields) == 1: + key = self._extractor_fields[0] + content = self._parse_output_by_key(key, response) if key else "" + return self._formatter.format(content) if content else "" + elif len(self._extractor_fields) > 1: + res = {} + for key in self._extractor_fields: + content = self._parse_output_by_key(key, response) if key else "" + res[key] = self._formatter.format(content) if content else "" + return res + else: + content = self._parse_output_by_key(".", response) + return self._formatter.format(content) if content else "" + + def _stream_post_process(self, response: str) -> Dict[str, Any]: + try: + chunk = json.loads(response) + return chunk + except ValueError: + return response + except Exception as e: + lazyllm.LOG.error(e) + return "" + def _parse_response_stream(self, response: str) -> str: - chunk = response.decode('utf-8')[6:] - return chunk + pattern = re.compile(r"^data:\s*") + response = re.sub(pattern, "", response.decode('utf-8')) + chunk = self._stream_post_process(response) + if self._stream_end_token == chunk: return self._stream_end_token + return self._synthetic_output(chunk) + + def _nonstream_post_process(self, response: str) -> Dict[str, Any]: + try: + chunk = json.loads(response) + return chunk + except Exception as e: + lazyllm.LOG.error(e) + return "" def _parse_response_non_stream(self, response: str) -> Dict[str, Any]: """Parse the response from the interface""" - cur_msg = json.loads(response)["choices"][0] - return cur_msg + cur_msg = self._nonstream_post_process(response) + return self._synthetic_output(cur_msg) + + def formatter(self, format: FormatterBase = None): + if isinstance(format, FormatterBase): + self._formatter = format + elif format is None: + self._formatter = EmptyFormatter() + else: + raise TypeError("format must be a FormatterBase") + + return self + + def field_extractor(self, key: Union[str, List[str]] = None): + if key is None: + self._extractor_fields = ["content"] + elif isinstance(key, str): + self._extractor_fields = [key] + elif isinstance(key, list): + self._extractor_fields = key + else: + raise TypeError(f"Unsupported type: {type(key)}") + + return self def forward(self, __input: Union[Dict, str] = None, llm_chat_history: List[List[str]] = None, tools: List[Dict[str, Any]] = None, **kw): # noqa C901 """LLM inference interface""" @@ -101,8 +172,9 @@ def _impl_stream(): for line in r.iter_lines(): if len(line) == 0: continue + chunk = self._parse_response_stream(line) - if chunk == "[DONE]": return + if self._stream_end_token == chunk: return yield chunk def _impl_non_stream(): diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index ba4566f0..5d6aef16 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -1,7 +1,7 @@ import json import os import requests -from typing import Tuple +from typing import Tuple, Dict, Any import uuid import lazyllm from .onlineChatModuleBase import OnlineChatModuleBase @@ -53,27 +53,34 @@ def encode_jwt_token(ak: str, sk: str) -> str: def _set_chat_url(self): self._url = os.path.join(self._base_url, 'chat-completions') - def _parse_response_stream(self, response: str) -> str: - chunk = response.decode('utf-8')[5:] + def _stream_post_process(self, response: str) -> Dict[str, Any]: try: - chunk = json.loads(chunk)["data"] + chunk = json.loads(response)["data"] content = chunk['choices'][0]['delta'] - chunk['choices'][0]['delta'] = {"content": content} - return json.dumps(chunk, ensure_ascii=False) - except Exception: + role = chunk['choices'][0].pop("role") + chunk['choices'][0]['delta'] = {"content": content, "role": role} + if "tool_calls" in chunk["choices"][0]: + tool_calls = chunk["choices"][0].pop("tool_calls") + chunk["choices"][0]["delta"]["tool_calls"] = tool_calls + chunk["model"] = self._model_name return chunk + except ValueError: + return chunk + except Exception as e: + lazyllm.LOG.error(e) + return "" - def _parse_response_non_stream(self, response: str) -> str: + def _nonstream_post_process(self, response: str) -> Dict[str, Any]: try: resp = json.loads(response)['data'] - content = resp["choices"][0].get("message", "") + content = resp['choices'][0].get('message', '') msg = {"role": resp['choices'][0].pop("role"), "content": content} - resp['choices'][0]['message'] = msg - if 'tool_calls' in resp['choices'][0]: - tool_calls = resp['choices'][0].pop("tool_calls") - resp['choices'][0]['message']['tool_calls'] = tool_calls - resp['model'] = self._model_name - return resp["choices"][0] + resp["choices"][0]["message"] = msg + if "tool_calls" in resp["choices"][0]: + tool_calls = resp["choices"][0].pop("tool_calls") + resp["choices"][0]["message"]["tool_calls"] = tool_calls + resp["model"] = self._model_name + return resp except Exception as e: lazyllm.LOG.error(e) return "" From d7ac613e49618c58c3e50bc7c6cda69b9bbfa864 Mon Sep 17 00:00:00 2001 From: wangjian052163 Date: Fri, 21 Jun 2024 10:22:34 +0800 Subject: [PATCH 6/7] fix bug for online chat formatter (#24) Co-authored-by: wangjian --- lazyllm/components/prompter/builtinPrompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lazyllm/components/prompter/builtinPrompt.py b/lazyllm/components/prompter/builtinPrompt.py index 455ba97a..12143753 100644 --- a/lazyllm/components/prompter/builtinPrompt.py +++ b/lazyllm/components/prompter/builtinPrompt.py @@ -105,7 +105,7 @@ def _generate_prompt_dict_impl(self, instruction, input, user, history, tools, l raise TypeError("input must be a string or a dict") if user: - history[-1]["content"].insert(0, user) + history[-1]["content"] = user + history[-1]['content'] history.insert(0, {"role": "system", "content": self._system + "\n" + instruction if instruction else self._system}) From 727085cb1e540b7caf8087b7f90e9ec6f27ff956 Mon Sep 17 00:00:00 2001 From: Peter Ye <44945378+yewentao256@users.noreply.github.com> Date: Fri, 21 Jun 2024 10:44:02 +0800 Subject: [PATCH 7/7] Fixed the bug caused by the given path not existing in the logging system. (#25) --- lazyllm/common/logger.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lazyllm/common/logger.py b/lazyllm/common/logger.py index 9f951f46..0985dcd2 100644 --- a/lazyllm/common/logger.py +++ b/lazyllm/common/logger.py @@ -34,6 +34,7 @@ class _Log: def __init__(self): self._name = lazyllm.config["log_name"] self._pid = getpid() + self._log_dir_path = check_path(lazyllm.config["log_dir"], exist=False, file=False) if getenv("LOGURU_AUTOINIT", "true").lower() in ("1", "true") and stderr: try: @@ -101,9 +102,8 @@ def __getattr__(self, attr): def add_file_sink(): name = lazyllm.config["log_name"] pid = getpid() - log_dir_path = lazyllm.config["log_dir"] + log_dir_path = LOG._log_dir_path if log_dir_path: - log_dir_path = check_path(log_dir_path, exist=False, file=False) log_file_mode = lazyllm.config["log_file_mode"] if log_file_mode == "merge": log_file_name = f"{name}.json.log"