From 8f436cc2cb85efeb0002606b3655275ca352e231 Mon Sep 17 00:00:00 2001 From: wxywb Date: Tue, 18 Oct 2022 14:41:50 +0800 Subject: [PATCH] init the operator. Signed-off-by: wxywb --- __init__.py | 18 + demo_coco_tokens.pickle | Bin 0 -> 243887 bytes expansionnet_v2.py | 55 +++ models/End_ExpansionNet_v2.py | 187 ++++++++ models/ExpansionNet_v2.py | 103 +++++ models/captioning_model.py | 241 ++++++++++ models/ensemble_captioning_model.py | 187 ++++++++ models/layers.py | 286 ++++++++++++ models/swin_transformer_mod.py | 655 ++++++++++++++++++++++++++++ 9 files changed, 1732 insertions(+) create mode 100644 __init__.py create mode 100644 demo_coco_tokens.pickle create mode 100644 expansionnet_v2.py create mode 100644 models/End_ExpansionNet_v2.py create mode 100644 models/ExpansionNet_v2.py create mode 100644 models/captioning_model.py create mode 100644 models/ensemble_captioning_model.py create mode 100644 models/layers.py create mode 100644 models/swin_transformer_mod.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..af9ed2f --- /dev/null +++ b/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .expansionnet_v2 import ExpansionNetV2 + +def expansionnet_v2(model_name: str): + return ExpansionNetV2(model_name) diff --git a/demo_coco_tokens.pickle b/demo_coco_tokens.pickle new file mode 100644 index 0000000000000000000000000000000000000000..8130d6119f698e9b8dec3e10bd20e5d8f0720536 GIT binary patch literal 243887 zcmZVH1$5ii+sE-TjhUIbxUOR+tz+nTPV6LJ96NsGG)>3M%*@Qp%*@Qp%>4eolAbH~ z|DMyH6McMHS9EnH?LTmzdcUP7&Csu3zuuY^N0*CJb}yD&8})`v!elE0R&4~qsTe=SoV9t}zUTB$xF8k+ny zvOZljY|;RwJaS}x`e=B9qv|t6BN7}nvOZ%p-K0VKebmTCM}4Mf`qW3&XO3n_{~TSP zC7Ln)b4-2KXr?su7=@lKnmP5c_1U9Y(m%)5=ZI!a|6I8~XEa;-=lJ?u(d_A;tJLR? z=1BjXP@gB7Gfi&7xRLdFqq(w$@uTYVMRR8h%4q&*o=F2KW8A1O5Y3xZVsw4MXuc$! z(Ie{%Me`@2M~@sip}ugmK(?UjEfOv03#03cMhp4EnEGPT!r6jySUg%JO>1<0iD=RE z&oT8Sqs7uc$JUpM7Ek{iS6@0>BK>nheVJ&SzkU{ zCdp(Bt-C_BZ0e)yD@MzuK32X`w0!F0>La5Sk_5-7Jw`<SFW!bjZYF7 zS6?k!B|&ApdNd&^+&BuoMzm_`qv~r$tEGRAsjn5Sp8h$uzIL=m`scX%I? zy3txm$}88`i`Gs8th{o4{b-#ez{(1+L9}iXV0?YUXuU+oj~!p%C|W=LdzJC^jiU{c z5UbQTi8f42=(`cgvC)A^8;{+#6sBac+lHeNkiP5IXUu)JkkD}zSwd-3%nf1$ICx7i) z-#*$V`KwUhA=)-6K%r2d6m2(YsCsCjrBE%_s`VYC?I#V-mrKR^PSFloh;pUe=&SD> zO-dTM&{8SYcZqhKG$aXLD`>Uu8tpV`aJtyjsP7i->`RkN_1&XgvUIG_QRu4g5$!r@ zSQ0jFSV23JLbO|!bx*ZesJ52sEz#~-kVG2w)@YA3yVllHwNWodh4lAiQDrSfEomMJ z#PznQHH7bO)Z3$CnuC7sZj`I-^^T|%B6K(6dO2zf5#qSMXVgAv1Z7l&I4)D;&ZuM3 zbb)GCF2(gqR1WFJan1VbUD2LivT~84R-;ajR_p36RLd%DEvk65#QN&pQP-sD)5KGu zjP{x?M}6UEt^IwX zsaa7P<;kT=Uwz+budHbW%( z{h(-nkE}&yaBy@$+R2OcL!txIS`>@*L!*PzG>b*8g2SSN(?p6%!yXs!=~8 zIy5b12FmEj=&-b+#iFTG)Q^e|PunzIRy~f6j_^o?U`-qo9hq)4#bR7PHaaTHv|KF2 z_2Z(WvrP9aB;ES>=$I_UJ!|DE(NBnu&0_4S6la5CWZ6t+4^pgO5?z=z13W@Hm?*0_Fk zbj7ss7TC)0e(bZ|yN>*0CyFR)mEmO&A-IYSMRKFp*HtpeU z^&6w>(*D&}u=-8W^=Zhqwo+@-jc<-_NcSr)g+{4b*%2U*W zw?#Ln#b|5qC{*jWN4I2)(=)trt0;$rEMh|6qu@)YS9?sIP)gO-@$%3m2 z>SH~ob@4>>Xd1kuR4CS;j2=sas{x*h9#3;GYeRoJdLsLwtNu*%WE!R1r9wU%J(ado zxr+>+i=NJ6b;b4Pqi3>MR(~OSHm!BpTC^q9UyPnh`**rrs=pLH@6c+6wnqKs=!LW$ z%a%Y~eXL~nXF-8E~}-;Lh#<+!8PTYoQl+mp~aLAksBe)Nt9=q>cc z^$((Vvv$%k_QUACtVLrTmp_W$Pk-7|TfoQB2kDkkseclEn1-qptgHTM^ikUGl|tNE zsDBoHoc-9+(^daG`XtS*(pu}NRq9_vpQaT^mvuP&GWslw-da;v`YQT7T~o^c>*$Lt zDeWKX-$Y+#Noh&_>)Yt7Y`LvaX)Dyfi@x?{?Q3iG@1t+B*zJW<{fFq=Y^hwT)_;t? z%Q7vucQoohMc-#p%kAtce~y0ez@4S~FVT;_qyqjL{gf53QvWUbIs2tj>Z|`A{gO6y zMN4Yi`XAA+S!FBb_5#P-Kcn9~LakB%EBZalwxX(A{qN|H@M~}VpXkrMr1lqNV~jJ(PqM?@1GtND(nVzil(-n z8K0ql)}W1YqerEk1)R}y)%uzhn8`!+Xus0aWwU`ZdllG*Y<6gt{^|D7Wpl7NYgUjh z9T$4c=EP>pVs#a!s%&#Xvxi_)wN7nrY>qrwt!nc?b7n2u)zYKmRBOrRMd#|D?ulJv zn-8Ab1DC4FS)k32&f_`h*wUcp&1@93V%F?cNM#reu9TGloUC(q z8-tF_%AgZ1$~6`n<(U?39LuA#T36dk66w_IwcM?yR_@#I!e{IFHXt6c9X*>>30AulYp?ZItAK&_hC zcEGmv;!i2-@>yMn(UXbWd3*HPjx2BQ&DdAd@nI)u2hX9e(Lot^1}AwAeGNLqF4&GC zV6WP{WV?bpWxb@kU=7<1**Pl+l5Ft1qq}6Q+BLR4;9axwb$45V<=s58!W9}lv9+MP zXQkHmYv8D-^3zPO4Q$t9jr=V>7sxZLZzj8a_$)1~J|p>RZv!k2SD*v&Ghf zS=J>KzmL*S2IEiyjR&yX=|vk^uh*I`wU*P(VG2L={O=Fi9QycSa!A~2h1L$63igJI zB&gK)f~Wis$F?`z7wVUUrilCCQ=7b7*Q+{v*%#ldDa*;)&+G^8?WkjRO^3(*!F_xc znTmS=vahcqNo)r~`(^utdQZU)0`||60?In!IT$=3JMNhs!s3BhIvRK=x7wl5L0M*8 zRoP+4!P$1BtER(QKExxn7Ie0C1axSYxvA-n1P%*fI@D}OL5DX{Y)3;!WUE=xXs%0DLXvHWsW|4B13e9-)hHpS!k?dWX9=D(Hfcz!r0&qC*Tb^>;6)-np5 zocEmw9v6zIeW;xT9q$z{*~u)Q;I-6-c?xi%w^gYS+o{M&AxPCu15OS>v`0T3ImJuV z-b1C&z)tnd+xxT?o(Y}i5jDbLXCbG1+B$VR8#u#LDo?E`^>eT@z06&9F3V?mUAtO4 z3T3nNz_YWh#!`&!eDoa8QJ3L%0dlTq&{fmGmtBaR7ec9jT!fsThvF>gV(fxo8eC}D zCD?^t{;t?A1uhEEXO{sNdo$?3U9`)gOFSX%H`_~g1$L>|DY2qm30{_MW|m-bFt`f8 z+E2c>$wrjvEy=4f^>bVxZ%Ij?ELf0Wz=f&c*;d<C5ZgZJb`0AqUqy*DpHn@(`lxDUejpmz(6VG0=+w@3NmA@7vR{4wBRZ}LXmXOAO~gnV_+x6_`$9(C(!S0(L9 z?6GWT8{1PXKAshgVc()X4Ly-nHz8K?Gw739T8YN?EcR4Wo`u7oZo_9&sJW_efBo^h8HJh zCBFl`8KR4lbCGx9w?cHWvU(4EI~0I*ubkdT-|?c8Xlx%~?}k)soJS~u58?Mhsv4^k zegwXshs{n#KZZYWZm?4$w@=^?JvWW!so_7xKJr!3u$p~_eC#Ea=t$URpQE2-{iivHvoqsfl{H~8mlzmo)pWBVQdr75#c`vdy5DYH&x_9ysTQ)bDQ@E7`f z&{CJiz1r<>^bc>ehO}Y-V1G7;q?i1Q{uK&KND}xD{yUTa)+nIncTD^zl$0Gtf9T&l zQSJmcYykRSo@k;P5QO^;NC(Cmx}MZ6JsSk}ACO*prl_6tV01vUQmP^7z$Q&D--n`u zrcoW%hM|K8q@#1Bo8HK@;rNgN=@4Xtjxz!oIv^dVlxROS9W-n}I&PRy(Wb|S4@gJF z8p%Cy1`lR{M}&O1HO>uHn-QOG+TbeIOz`vr(ktUE6>hNF%=|DzXdKdsZ5D9G0qH2*k%J}&I;O-WNou!vt%jhqN>Dr!yMqOS+#{Vn-iHWE1*PUlJxz# z;Mud*)Qv}->1sbcH#&z0&Q#-v@SGl4qY%lKI4?R^)}TE)mUY>D(A-(Udz97uz&s(Q z2DA#@9ku{EZ`PO!mZ&<(g7|!%edZcGgy+u_Ne6irMi-bywU!q_7tE9BsGU;D`vqEHp9{2N?CD1jVD4QLrk5f zjYUQUF^wl8qqEZXYW|5*nFwD&=n55L_?q z#iZ75$yLcl@cMaJ-T5ur#^45Nr3l+mE7>O4hPhGsO|gw~)3hOru#NKsyD9l*;3ny| zLV_R(OvE=$o7p|K%~4G#WP%Z@@kC^^JkhvRXxNtE#3tm-V=Hj;JVkE!#I`lMMPB(> z_bGLrxedH!-gZ6Rwk@<(o@mk=wgb1$69qM%h;EYy?W5!D0B#$~uH8J__{8nfO}3>_ zv>jRA-a{0%b?$`h;Kh){wlg#-1XADI1=-QdP}E(Yh7#Wu-N_Sgx7}FY+2b+tzB{su zC!UP5?*Z-VtF67AJwyT9EdbpmH8Bwk~AP5Qu(P zg*v?j(tB4mtm1V^tZ3a}*EASg9jyAQAS7uTTJwx6>h&?w9m2KNG#M`azV^DK2P+G%G&r+ewG#m)xK@UmJbx#(FTxfZR1^PsbvVspdieDIv6aAp@k=lVWh37V#!blZjSd3nfk$u5G<&r|H> zME+v%f>7$BT>@Mfpk$W<7lmjg4pnv;cyW$xHO()$9K0kHr39)bSHPEsqHr#MC3KlL zu&EzjgJYDL*8|st+_WvDdgj^poOjh&F*bN~=#!hd6Zfv58-3r~5$KZP?9C!K!vUbW7+B z-167K{SNfj(2l7V?N0c%|KZr~f^W~W(werru{%QjG^tnp{~qwpJb#Tv-V5Cos;qml z_aS$O#*6KK;GR&#xKXRAE)Ssh22Czi%^pPW^T=_@9s=(7vc>8<6?+(az{{3c#U244 z^z>4U?NRh0FCG&H9z!0^BPWx$9tR)EbLrA3*%R2KdB9|Z;YskZJUtEZA&&>qWzf^e z6S-9E8RW@as`f1Ml((ejC&cy~^mL$3?M0r4p7He+i&XuJy#PNON-K`-MeMoUIMBX? zJs%n~wwHkyLY0)QGI<4j(RWM@dzIyvys!-o%)ExY>}gB1X}ylU;%PTJ>>aww6~zwn<%!op*KR}$?(oQ*qgcOqSM~R-tw|5Lrtr-_rSNCQ3ucW!FRkT z5VEE_4=iG7ymtP9<*pwGQSG#VOg@6aX7*YFp4fs&ij-+*8G zdhN4sS^mnCn_AFC@^{eJS&){xeb4eY>BY8M_Xifg%~olNA-W%t@3LYds`yXX_gR^+ zHrzE zvB0PhO!IRIHVz%_SyXfmvobO!mp&VhjPK&_UYqGqWN70l-TMJp;6P9R0SQ}fT*jFcvMI8WH?!$@XA!iqm%x&v?`GI0A)-!BCnB4A#AFNJ9MZG6JoDF5 zN{7L=z&0D0+(M9bYd%UrceabRB|l8`vT47$6|lLte8aY8c?-{gQ6Ad{+R`)7tq!gJ zZLzI_^_H|R-45H@+bbK8-X7e>dryj*jfZaQxoDKFWbEE1Zs#4Uw_MR>fC{`LyuF9i zjJ%@l1nn?zk`8G7TFSOFi<7c!b+fZWozZpycg(6)Zn0fi-YIKIp8B)hke#!ZD7VD6 zJFrVurE+T@vjg_PcJ;JVvjVu=G^jav;O<#Hlh`y)|p*1g;_FFN~9fDO_cm&lN zXx*EulBy=x!RA>fBVLoCIFFz1_j=KW7pRgvtuO`a@yf9;>_aAd+g5EV%e|hw?n9dG z1x@jUAWfme`n+(}8dF8Y`@mDZYC2Br3+&}_lc#5FKWuOBNa+(bwm-O!mrPT=>;Pn6 zPfHDQAh4guv!b5+?68Bt{c~((Z0lg~0B;a0c4*pb$qqpe3|VORcPMgD9yu;)(>n}2 zI0RE3ha-oCV2zHV?!6oV9_lrVTYGeG;7IH+513eNM}dcXda<_Bqmd)Ljnz|Yb_{f+ zuZD&l%kohkiL1HekfXghw1YYxI3@(qY19eGv7TXl8!lc7J7QtlqeGt&IZp2Wl&E%2RSpVPDNuVv7HN@6#`W|?L6e{tU4Ob<{JHc z?3|E37TX2jxgo07%!SB#K{S{5BINv#BeQ!ihAs%{Cy$g|f?eq8C)ZqNmx330Ra1=Z zGW254qofBWE=Mj2k#z{xKIaPX(hy8Xge#HDJl}NEz*X4g&1QBrc11`o-GZ+HuWUl{ zxE8$1E2k7=yAHbAv*j%AdgL0ugO zPfb^(4?_2axOy1B%O1k+_YFu-xu>xoh93w?!m&L9J{Xc@|Mw{LP_{47;lLgP9u8sj z7=%3zJrc4_PN1K_9t{=it=f~wW4UNJ4S76IN_QHchMvfFX_{R249ib?_8QZE7I?}# zLQlzeAJ5@Wd$&ow&z?u0@mfmD>I*M`&w4?42;xQLxgc$A_7d{E7p;efq+W(z2t_e_ z1$fb`tp~@nre4Kf3h_**p|3$Nhs1TjdmVYjQ<_|~H-J~OT6gu@n=HTP%Tw$vmS4{z z)O5}HHtN_67E7 zZj|gx?6ZMAOQ&NhwSvL1IWJfV=$B9ysANb*0Z@6M#%n|-a@H@|^M7W=z@4a;G_A|>rco5B`(QG*A zN3W>fKB8#9Vn2EDv|s!U_}K&LK{nk{`yKnm)6$ca8a#u3^>|$^_9yV0hv{M~`V0Eq z*BX1$zmY#YmM-=G0sag@)L#E0e|b6@4rtha(BFY#%}DR3Hx*6%$Ent?L9qVNzn*rj z(*^+lc};4X+@ZlUtlyw?h^JQTY%OSyGzjcJC>@5%P(x_wfI;b%omA5zZ3s4SP}V7G z+Hns>2IX<%j#8z~hM|K8r9)VXi%wSMhT}t;LhFK655b~C2c?5`+`yd<7#6bDo!Kci zJvKa8a_?{kY($9Iuo;2rLiu#kI1@7cpmelQ_rp43n;DuRq{OR2Y!+0vtTE` z0=8_Pnl|JWq2)rbC>NU1HG{LLbo9tHCz*Y_UDe*Ot)j~>o{uNj~q@>Nz)`r&bdQ7o( zSYFeEOwohewl1`mX9mT#9=5h;rgx0k`p7!Dm~DWp8F3TZf1mjM*O9CIsqKDRzLi4a5*Bd+&+cg@Q3g8rzQG_8~ECbUPtCgi`3VzGypR zld?87&79c<*fB(lW7`$kDYQ?cx5so@xEs222&eJA-H}~F!8qpZ0qyF6xW7<9cJnew zRE8F8caJEGtrglMkJxENq~N8{C1P7ar6{2-UUMn^rVVV()6=1{9V+Ies2xx#kD5Gf zQ3l)c;&E=bC)S?F)egK9>c|U|J{w#?%ifl%SQoHosD-48bcVEagT-o4B_|!*x}h$Q zSJw4i9jSV`BudM$nn#qy7DL^EiW+KbK=qK8_WC`D<$<)GCPQ&f8bIoW8X?DI%HkBP zCl97QP#-io55{q8D%RU`>2^%b_EY!#tEk^*RbW z+_R7|_xWh>h=8rFx-DeKfJZiAtx~pQ!K1ucYOSp`wTvBy9-Rm6(Cf17c<`7!D5!BY z^w>})#V^~5*l~IEB+Zk+<3qsY-2Y_kgl5wuHSEMtZSCq#MNaaD)x&&x9u+z{P*tz; zI2}4ACv7cfK&OVJboPBFa#|>v&Y%0Lb{2Sg9<0mGM$YhVp;`Rr0B43E+R)EM&I(0O zUZrpzc6LY|)AOj{IU!(j#&rR9Zb-k^E(FdC>1!C_BINuKt(S4Xi?IuuOl$QL?81;| zZ!*pFQt+Z?RHwZRytou;*CRFvH`>BVI=x&2}n1#om(ohS`(oW1d)>-lkQv zr?AJpB#FiLH28# zaaHe@wAaB`ymXZodjojYOQ+jkdT8}c>@`oTqT82mA+Lu}Njcue-thFZV!Q*s83HDE z^WVkZ3IWp=cn^HrYf(usw)fF@Le8Ci_5t#4C~(C-1l|idREl~?>m%&_keG}c1|Opz z_$t*syoP-OeHe-&(joIx@S{A%ZXFgr!#)lL(I)#j@`)EjFJ<}y_|%i{(pzloOXxFC zDSZ_3E9`SmsjFL$nId0!zpC0dEPv@C*nNGAeC6qIu3fb6u&yZs`01{1~F?##7OL#eVVztYWeK2L2oZCI^n+ zv0p;^dOG0`OXjr zGV0gTqd}U%z<>~jqwEl9U=x`Qg$4~yN8J>SQVhce=f-Y#I5uQ(IufUFnw5qO%|)}) zkYOR7@dV}c*zm#W0H8+LWwFfwju@N{7HRvQk>%+;DUP)>A=7(pbYz?vn8AzA_@2!I z&FI-d8b!lqY6_+&LZO*M;yNnMj?5AQ@m8%lpjo|`Otsa%X-;sq!RdK9uO$-aj|c|#c+$v%2P zaJ~?+&lUpa_qJ`=!YnV~t;BR-O-sWT^hkQ>c2Qs<&n=k;vlz6n=axJGRkX#yMZAij z9>P=cm%tYtoDL`Sq~2#sqKk#B6}r!s!WPeszO^*AM4r2btd@b6%!Bbj&a&82z9Jhs zKUfY~+Uu&pG?tg~gp&6UtpF|SRn^A1BCy=xbo`{xR$_7aY#sOUQi5b$bR@h&`g;FX z8^zL!X&j9Jjb?GBG~3ofi;ZDtiWxy1+bZCTWQl^ z8ZyxvOE1`}*;dfzA?LEDa#qz8Tf)}xYee#9kzAIQ+vei zk!?asUA6pkdrONifXZ9uEnN-w63wa;uGCFl$$fA{h@tA`I33j2Vnc=!F0Gf5ZbSa8g>x0e<+C_ zr#KinAe5w*+(teGJTSzpsl=mc zY1qlR@n(|Ku~U4l>%A`8SLsnt^wg%18rDA(JFO|CzVOgzXQ8L(AsIS68#}{SO*e1f zFWNcanbV*qsDWpB>**bDET8Qa*1OO3xF>XuXQ17k_KO!_=jNu*0N91tdBHS@9NR_M z`Q9qJd3iB#f!9IvCYjw~m!KDV(eycvcBOYIcu^Cw{a*%NoX0N19-QJ9bM*Qg_F6aJmD$HODeXtUJNmLOwc8;!)YV(Az_rx&SQN z-Pj$?W_AyDXH$|KitYvPY7VH8Htg;uQ)TYQ?g>qycU##5(7kyuUVix?c3)VDIxl?) zxj)Y}IT3#td?0iaO;rOP3~43rzIYUSCP#G5g>N=+QhFmkUo~ zk2RUXJ%v3Ux`&=^)xN*Oo`#F$C$bSAmy2aa{%L{erJyFMAo1 zaq8EhSDL71Z$Phl*V3h}>1!wUCi+^)pkZ$TuZIkhRq!_UMo5jx9PdDHhN7yu-bLOD zftUmT9`v@?I!-=xWAB6S_>Qq*AF%wcXUHK+GuN>9ysQnK2j9H{Ge1J#d8vCUWXMX1r5f9G@US7tC>+dV8#X;S+!IY6 zn4bX};o-VUdN)^}%?M7{gcNZmaC(odmpYrx49(!-Aa(y)z!|+QK%Km5R?4jSOhb}! zM?SE`foV2u=G@qJX2)j9^U=w`9MG&oCTW%SBdP}KklFHxJu2W_*z9gPm(n&jH#Uct zFtOO?0q67_G+JA+d7-&NT#-iTu(`cut)x$S@QTy<;dw%maGxyz&Ku$3;fd)XFYdBLo@dOL8{7DX1yl1J3ei(w0A31NCa8?;Cs zQB&WLMYA&SqG9a<^uY&ou{^5Q#8S}WIcb-$G_*uWO_PL|L6*!~L@yHUOwTZvg_p|K zxLCs@$z7M_@TEhklNqbaW6OjB={j4Vgs~OSWkc?~t#3tWxsaCz>A3H2E5XZ$yfW4- zIDCaX1)WHZf>sRK>ovvt6a=!>a}4>*~fyT@_<$CLdIdELmu6tm7y`A zjdW{eJTf*^P;;u-QdU97h1#U5`Ec;cA!}=`se`Qwjt|*zpI|j;l~5W{(N@PMG@IEP z*s6JYimG{W;A&0Co@^~}^*nZ2=Xz^nYc!emRqJ4D=Eh1`7h5Y-+PZ8#WbHhQ5)@x%OUB2M+f0FZIg?&ZTVp)&6s@Znv)?S`OHOvQY(}b+n9l&jy zu%^kM9B3wP=bcZ{yLl1?rp(MI*po_xY!Cjh=GVcNI>YH?_WECdN>3!k&Zh1E9$}Q0Dc{Z9thwPCDQr9R# zh0wSqy=$h2jmS#;&=T4~KeQIq@@;TysI*u!>9At6^;rj2YRan5%1~P#tWoH&J+byC z)9tTLti$`9riE32a;S(#io1|KL(`-W*j2&KkR^|%)SyaEdQD0<)D;rb&_Erj=F-sX zDI~Q}s$?2qjCF@B<3?4Tr2*DM`b-V%fh=^&$y_xwY%&^$JSO*9FVYCL?6oOCPe_Yt z!hO)>P>$reqp4VLNJ*D7dm&RiB^}522Kqdj1_?BBun#uXqcMMQUu3U56tk=L!}j({ zGN(`n%KgE8yhcKO6C}0+(0#qQ$pGYm(0(Bgt+|7c{X^7_o`U|_!RP@YoF+~jf*cs~ zm8dX>f(M1j32G7@dT_|Mq7&c4p+oZY%60^DXsBCq6Yfatu-v#ua1?fUo|vYv9St4P z6ijCn$6!b1#Y^rb919-R6jxtyfQ}AvyQ+Hq*74XeAugu5bKtRIl~wIT;J8o)o#&i{ z93N`0p&CudvXjvhLJqo_P_k376GLL%b}DdE2$T#Ko`#(qiqW0C_W5-1l#r_q$Y&s@ zhG2C)-BYkL!PC4b`g#^Gz_+u|(|re#X>4c1XLz&J$~rqa2RhTs18KMqJIk}yml^Cl z~^2X$=YoV)rH9%e34zEM64gqTA$qrUFW4>-q}sa z^*4e|M_s_!gW!E0&UiP(L(u(Mlk3x*>|7qk9%wd=>R}Ih zdJ0F!d=z{r56B~Rk6{nz0h3G8$H7PPjCDin3FuL$p=3b?}uC zSBLvIkXN(j(F=iN9Wmd;Udu|KnyxM0LSJvvC5d|9C)kJ0 zX7(xek>|+Ey43qV13&hnCa>%I9Qq{B!R!m<(>yiCF22M*%Tr69qWcQ`yeXir;=jhe z$W23A_6_!Bwp~_|4;|`4-M)pt%1&y~m+z5pJa-9o`2qYk1k-cQKO*0G z?aFOBdHf0b-iwob>Cb+~eh3lu0O>Etk9nwMHXZg;vzh&d{T#}{b-TXN0RGa16z&i3 z*HDx$O&IwT`YnV@ucrS3e-Ggp`TiUFBPVs)f1p1@S(5ic|BLKOcA$y>gs91JuRr#0D2Ytf7y$m)gj8c7*l%b$Os98@vzf&<2<<;K9d(lGfmLLH z3vVPCf(#rwNrmdyT1j3sFccazG#!wGbe5yuF$^9&G#&oZ05_NL!?7VDJ#A4Vkf9+x zE^DWQhJ}c_Wz}cXW5b(*>4UA%h$hk`J7~J5m^$LlgiYTRtYI@lGc*O$DgP|kj7_G2 z=vlFua--#D!)6XO(6a=yBeR4Icy3@0Xx0!>qz6{9*_ut$?6BE~rlYGBzUUb1U{bQV z@i~U3m*Y4&&jZfcl#gcFL34$|XcG8*$lM_peT7!(M#* zg3x?Tq`^CA{y=Q?3quQpOm#xL2(n-Z#A67HLJJK|hj%NwWLpea*kkqRh_E=ah{uv> zjvch9mtG&DPJ4v5kk zg{|0Zv5m%7$_h<5iZ=!vnYAh7W@CX-A$^VDjzdNVQTJaN852_1?7xbQ$Hq39>bVLw zF62nlYqSnrxd|!8s^IvnMQVClFX^(Yp{s<#HEeZYLbiJKvIEt04QSPnZ*rS$O>DJL zVr_|QA*&Bf2Z!}egtb{-!xKs#)K~{v(-VU9S~zU2W;0t4TRYpnYJ^LlhFKq9XJ|Sa zmAi)P_+eeIh2}wS2&@;N(>4Ou_a5C?AHMN9_4Hi0&5BC}1Qjl6{E!7jo! z&aGRgjhkVca%4vj+bYPJQinK!3KS8YpVV#uM!kjGZo<{<|x zwynV}JT09#Zv$-UY3cC{+ZNf%D<#qQTA{7IiJ>lrm~4A|8&6FY*a6r!fWEE(Z0E@{ zn!h8my$4Y$I{`a5=!VYDz$7oYPWd^%-v!(;geuyuz)k@)6%W`sMAKfj&}+McyLiLs zW%}y!dtkc`O+Ng{!*m6GdosP@Rp6K1Lg-|wT?@9muM>st=J3^u?%|D;+%qjgg^<50 zqHra!r71;iaBWy?o?=NKG%Hy>46I|- zJa+o5ilMc<6*USUL)}dQW9~#Y(0WKxFBj3Hu$YDRPH$sP24nAUdM3$wkwzZO_?FHT zuqRKCRn>=0ZVJfI?o_ZhG)9f_UY)7z1y5-TOT+CA_J!Kiw5s=kriR+o^iee>x-Ys{ z=(>6w7T7zKTHERV$UY%0zDaZdv~MW0NZ)6`_Ve9nmmS3N{+_ZS7@N~L#k#2`T z2j--z9||4hU8hU4x6|$6F#KSzCa%DTgNICmu^j;(>V+d@yB&!g=C#txNbD%&@Lcq9 z2IPoPU_Am092tUWi0D}4sF1WK7B}oT?C4NnESW?4*zhrmk`tdU9y)a%*g-L#O1VwmJhk zH55f-L&=9H&xB74*>u&kDV>F#9@-z%U>8HG}!Y>~> zX%;SdYPx}&dZD6GnWA0IuUGg!H2kX1xbnl5O@-7+_qEtnx$#uMb=cK;8PjQY*Q3|u zg^?x`FK)oE{U6t9`HlE>d7TKaUEEFZ_0tAt?{hPLL+<)+S(5xM_>Flh>b^(GZUt}3 zO9&S2HuUB^M^v5qcKDWQ1IKm;dh0ZS=^b~%x8?0oDNV7vu-o&ZB_HWles|+{B;k)vBXhd0Gdj;=qN+FqHem{CoQwkd3RnZ=R?`=wf_uD)O-j|n` z&EX;J{ybo^i98HGkY@#I10G-edULT{cOa4Aj;$X@`#s1 zzvx?!PhgLF;>;*um;5C9SW`&tBA>z@_Zlmt#?RFMpN5~vxt4xf@EQ0?uV1xQW3qa~ z9r#ozb5Zx0(hl?-KRg{uQ7k8uZk|V<2}!4_p+)%Froc%bd=Y(as9uydU9!b##yzeb z&rAIBd{Z*Gz9Ip?;Cr}gQ7=z=1$wcGG~Niklt<>viE7H%;Ft3v^1q3?$^sjn|KG)}Em z{4qa#Fm%$4=}Pb)XtPiF>BFJPSJ3rogzyt9<5Pb7DAd2*O39h^XYj|N{23SRbNrL0 z>wx%5Ae4ke){3wAHnbP_)IG+!~VwpXfoYn{RjIqq{;*^<@GQ6*R*Q(ANqG_$t0|Hk^M9UW#T{6Fg5NE z{~IbnXbo+G|Al7YlehF-eRP%|`VGr2<23Uw`GIr>@k{?<*_D6ti>~lA%V01+3>cOU zV(N1tdf<)lL*RkKCaJJG_QMSuiVYg(dnc^K%Up)xgNLP~c4=6>o(~+-lz`Ua2yAFm zUObbjXN%!sdAe#mo?fx(@!@&8Wxf?U12|$>I)barKn;MR)8#=m>s}wf0H@D`f_h~i zIzyhDzQvlX>sjy_o0Cw9X9Z_!4xIM;+3=b3+G(PM&5q5I*G|Ux%pCBn|HEc;!m~A{ zOlb9kx$xPibympS_#92epeez5Fdw;(^v7y6k_!_pu0m0Et8KNOQ+O13b#Kv)q8>eYYff=#8d+UkO^?dsnTKN5V#8D>Rw<#3*dV+?WD38e3^tdM|_5W!e~IWFCswZjZ%A<+VsY zTrdtC9qOt%kHDBvH1$s#4~-3pC0%wEY+P=1*9q9lA+2s(6&UY*N#Eg1E~pi9HGWvd zTfEy=XL*8W%Do(016tJ+(|Gxsz-nGejXv30$m$_TpREn7;X#;gybiLa2hkL~b%C`! zC%rVYXzM|1d%IYA$6$SMosg7n7;J#78#gsdNeblS$i1|f)6%qGZ& z-fOHg)+uJMZHjIbBIDFOSUz-MF>dCZUb!@axu(NlDLZ!qn&cLH|s^x_6513P1rLV8%y zb^&*s1~tJE+{yc+UJS_c&YrwZk9G%k@#HnbL&e<#+tnMT!E;LmXg6QqJ(@4lg6!@Q zCEBc8u{}bl_6{o|g&@ovLs~*S9d*sxuvRa652m?{VA1!e39_+wz$M>zI>%PV+nUl~ z!`%~X_cm7GKqXXC8+kX< z6=HR3&r*k~AyZMTyQ{KVZk&0?Sa*oZ)SL!X&q;fT9>_vW9hdYpGZu$p=n}CPX@n-U zM*5+NDR56{6$Rwo!hPuEJQsD2sZeiIz~pn3d!bW8OUJFe`Dkyj&l^Pt#eIOOUK)Yz zi|pm8Ni=+>XPMExy(Ut1<^8dJJO>%;@&It(rszTqkfQr}sTDM~gP{Gr&-dEFEFa)O zdRug0JOnz>GlldtGj>og?e!{l7e}%2M%$VswbI&L$l1fd+bP-56c3W-nnrU zaCp`Z0u6~GM+DLB_+yYGv#d4Ao`dVL*ip@9b{uwe)-FjnRrh%Cn5=rBCN)CG2GWk- zPK1sNnd)hZlaS*>25s!oPR33M8MGyrMW=u#hSb{BF;0a}3dGyHPJ>Pk$t7=XJRLhF zH=dO~13NX_(DZDazg7QC{IqQKX09ih(bGe*bqRkqaz@CSYxr}ZGeb7XT$*#SvqD5Y zJ9!>*b_k^h5Q}y`c22Wt1Qa_rv_bmHx(mVcLUP(cU4)z;%E4~zV(5Y<(zqvdVMr`} z2k@ofMIl_Twt~x`i$g*55WAjW#x4nYsWp@vBXDC^mjQHIP-W6i%_7>NM1Xnhmi+7h(4+H2=Jiir`cGV^ZqFIP_U|A zS#OVF4|`H&eM|6h@2*3mP_ zlU{8J&*wc0J{2ODqfUc{dBiX@i}FG0_H zGwC$qW#EMnFPVAu3ie`1U8m-+A}@td$=l3d!(I-dG@kW3@=7Rd9P3@?>L72xuZGBa zZn^tb2O*x^O;|Rai6gZio{vP^vlO|ux zcprVIDG5}c;eg+54$RlZKSbXPEvXqc_7U`cAWj88hCT=t;9O9%9>EW%K@BT{AB9{L zlDd9|ew+u@38&r~2!0aQSg(Bnd>Y!lSJNJm&q8DNa?SG<_IXIFHyQu=8vG)ZxmQj0 z4fJJBiux_|Rh~iBzC*sw1F2ZwL*L|qV*3I4Hai2*xzvv=f9ECWX)U*vr`S*6_a03X zy6k7<2Tz;#vDz=tk6xdI>{slkX46{(v7bW@<)p*>4*n8y(5>M=kY7Wvu4IDEpWtu4 z26}|7fxpn-Lt&T@`8V`O9$P1@`k*cNXUIfH)qjz{Liv)T>VMeZd8mf<)9c$O{*#B| z$kHGCH{{9wZUFRO6ZP3ZsNe8OO0r+8^)MSuQyjtm!_#pROWxWr7#c7<9pcluJ40?m zuz|zViCh}-V{y>%bTmYh*@m$^cz8Ovkj%p$jtm)|j*s-{%wz;GGz3wpr$dJ2qVL%v z!?T=wT9P}(Ghic{Ogr2evFWm!5RNN=nZW5ojvCMdX2`0hCjm4sX%=WkkJzepri1*f z;7r5Qk;9%8^}Ozv+2=fY&OqD;pi80fwOx+I+~_6Vsm&v z1!LfK9&pZpdKZe?e_n7dZ}4uLkL9^NP&Ng2esCVINQ#=*h|cR7cP9_LEr`t*qGKAO z1LyY|ck6)|TNqj(#7!ojFM=)T?W{ffqQF89rm2m5MUWZ4i;x2zhrBDP#81*WsAmC)rwT&=*7$O<7YL-RHYTQS7VZUl@*R|*Z4 z4&sjiM}`D-J~tK_73#$sN5(;;n@DdJgvNwylQ)ix$HwMq_1P-OI1i;Wa-CXEz*hFc zC8jJ^MaO&L64h%3!Bs+ZJ;kEOkg*9~7ELJZvNfPpLtK%zvo*ohLN*C%b|bp_@bo@| z=B?^OXUrm48(zaZe8bjZc}-v0I>TNUSj$VoJ2CofJ#1|+NrPd}^`Uh_Fp);-uysRP zdIT9*FT~>lWFu((5Kk1_#@GhAu~~0|ZJ4!i@62cPaHm}zxMG(+lr;l(-trCCHD^Q z*>8<*kv45nXF}Vsyk*)PT8?d7U@MQ-)>bQWe`Gs!>$Ijt-7)F2?U8MQXa($mY@1d} zVPXcT*{@FAE`-vPB|9S9d-|0wU7+ZBkDbsRrcw3HozY2N*e=_J@ltRvWDjIlPYJOCw40{{>49Tx_oiSPn}hbqgRwe`Siwu5 zyp%wtFQF}I=Poiivyxo3wZW|+P1V8Lp<;+HV#n8kmeSR%_^GPp%XnKTK|8y;J+bz* zpAa_bWu0J0$cYypRiLuR?M_Zcy0AUHxvbNwK&K~W>4!RNV8s&?nsq~6IjL-QsG3LB z`MZYaz?wI|M$arpx;;yks{z!#1>%nMv5p?pJec-3lYuyZUNZBZjn>*;;1th7^ZEA%`n(hN**+{!^;-Aw6wSWSUY?M)$mG)< z;NG5)2FCUW_VKpwYiKfv9f0kdo>~=K?LZdy%N9#^5R3a~N$_ErgMkAARO}Goz^sqy zwI%Ab9ePUUQ23w_&g?MY;H;gJ&wn3|9FkQ-Z?e&6jE{g0&8ne?ks%yM(PN>bvL-0CYKBMAj>C>_HjTw$$At9KNw6n?$9ko7 z9eg5iT*!dYzLTKio5<{B=!B4#IDdR}BGIRA(bK0@O-Mw~@HLo(WtVaeeCD*Q-gz#3R<=(_ zQ%-g&=iz5h8(b$3=fmd=Pj2uRv*5{a&;|T(ZYZ{um}-9^c%Bzf--AzzeGz)TH$th~ zE(R_LV0H;`p|=YA%}bGsyfNGKVX2~BhFu&?bEHakId+NX*e26xm0f{e>Xp&t9=-Zh z`<5%=%e>5bzblP;6?l2TnsE!~YVe9|lhtclu3`B~Pml@U$t2io;j6qNdQ$#6;A&5d zsdoCV+x6fz&8R)f4dAsQ#iXfJ#2ex3ygo`GIfK0ky}l_0_U<=>H+XZTyNp|~8*@`z z*sa)2d6F{K@HX`3Jbx*N+uPw=re$@eJK$UM0w}b;ZY%zh9K`!~F7qul}k%!t#S&w5lG_eH3{ph;PgC#{Mnp~phZ1`oDBg+1;ot)}~#48}c;KH-s>Icv{APkKOzsy&N7 z(PzJNX(aw_ZA>x|^C7m7>|aJ-m`2sA zub?l6+!a>ide}=L(MqjPiM|HD?2X&4Hw(Rvyy9YdQvVI)RgY*oHBu4Z1Yh&@rU5;c zU-ujt7}jTWu{XSF5~IlPz;Akb32QhHe5*Nr!`{Q*&I4LmqX6%N@8pRJN%RBwUGJ>A z5noOx1$~IW=dG*G)3A@A_d~KGHN?l@2O)ol@Vihwb-d)RhW&(p?|T9!R%LfPe&&ZC zyjN-?OFG*x=#So>x`?jWuh37P9i#^O4gA?-gS6%E=r3McG`2smUqeVeaKxF*pXhJC zl2lPm5md5&!M}&>_1ud64gC?)w3?2r|6qT5nmPj4xP|vG`jN6C%SxH0{@mHa#{xD`}aRGT02zh!9MqS$)`ap1Rs*CSZC`ol}jOkr_NCy+gX) zW`Sl5u{v#5V5Y3nYLwYnp4kHwt2R3@iwDqS%q-99iRv{q6`K>9&Ex0}&RoFkBPMAD z_ABbqd>yORcjiXt7?E7B2UXk6gU{*hOlSq37o00kqO5NdXp$Z}cOEoJcYbglFJZea z!1BD_TIst^7R2TYmb{I7A#8pxwr>6}3@qRU)RTMVVzLWb1Yght>a~=M0tPhL`dP!h04-v_(mI{gKHu`eN(jhUr!1B;CfyiJ-UwldU@?=Y$b2GZX3h$NH0us+iWZ}%A=)k02zmk_GZ&~>&n0w zkC#lU8V`;2l4WzMRsqL($v~Q50=n`vsw4ZV=y+d?N!Vm9u7u6ujS#cZQr0EfI^deVLQO-%>mqA;u$U8g&CCPW z&QYT{eYQTh&WQA)ryT2jCZ+VA%?A9iu4k>|`G&xH-W+-nOfubXBXoVwThnrEV`PJn zqNYA)^XfO@hYj4T#GkBB#Oju$CbH;OwH zcX#(voZ{SFa?;Bs$CF&IDemr2io3hJyA*dQ?(Qz{?>jrsZ2JDWpNy|$cXoDWTGrke z4R4tWrLs4vPi7l*t5k$cNo5Q_Y@LF)Wh)YHi;hldx2bQHz}vywq!wtm?SV0VIE5Se z9iVL+NPU9Pb}6;?qW-JY`a8kfr@DrBkUL{L0T~cwFfNXbYY-)>&X-fOhZ@dRT?3&s_%c$vI!}dgXO9^LMwY}io zr(v3CZ+MSXDUG48+dkNydAc%{tO?yKPe4in&G6pom?Q!98iM=eVRiZ7lj8#1l!q0U ztOz!z-1KbTq{+lu(UufcW90SM*9I0+VZ*@rcC45O>?rHjx&v(WQ=EfA7X&>9j>Fqh zUi#RF8`n;>J(X6EXyIw01b3u%SE|jL0a{h=;)ij0mgP{3hQ9#fD}=rISa7^V;L)YeaZdCq2NJrr$*FpKJ;MELrWdo;m9GLAX2p> zutRgBbVp)`d9Bp^M*)X>QpKtk{Y8$5e{Y(;{TQ~7^e9UCSl}qH4@+0tanRA83S-}n zM~+Fcv?d8~tOwz>!->doNmRO%kmEg)PO6iE6B3l{6yQY9JIq0FDs+-p6Vm6**vZ~( zrBXJ?^K|r-JhH~ToB^HcWnl2-B;>SQG>i{9J!R0Xi=$1(&hVY3sk-@W=PYoNmsVZE zX9H(?T6AZfgG|mvorcI+o>Es>FUQUU&-U```9$|{nwAhf$M;OxE@1myj~XAkE(Fi> zsQ$!t5qiECO*cmu0~dH(lsk1Tz683^>j_ouQtYA>uV$A47kdSmbK`R4l9UoJ=B|J) z^@3LHO13Zaf>yE@=~sc5d(t{7t_H5~q&X$7L9R^kw2Zs@EUyKx@?utWo2QX`*wuMJ znZ8K|ukj4@$9n^CZGtYl5xCCFS+kqizTN|AT#WBW?(racWAXrSuh%Rr_xvDqpQqhhZrAb&_7He~ z{F@1jGwY8|U4HNbdDt)r>k;t59Ce?r`3S*>ydJ$}P~R1!4|@rjEAVmT5igKLUlT)* z#tXOElWaZ~>%eg1r+~*}B3i4bW=|teq#$kf4De*!0oD|z^m-P1Di#yt`Z)!BI%Y4_ zP2O|RGbwpJgFTNtn?#q37mz6_EuGK0j=l&!*MMa368L;V-l?|O%is&~z~LE0sFC7z zdxalfOgU+?%U6+?lISt}HRR<~WO^%JhhAwQ4cLQTO)bECHG30#Ej3fGy#>6U;xQT8 z+t3>+o~UZ?U~lF|y{2MsrFt=2uzK~tw^LztyuF9KlZ#gIMW(v&=IjIH-FU3=txPss z+lTObUN~{pK7!uQNpH44hCc9e=wrGtFc17N<)QohPmzyOFpa>}4*LxJ*z2X&p=!#{ zu}>OJ^ATd7HbkaTz63vO45%JF?DK|zwA9z&7x7HsVD82%UVM=zRIC8*l3 z&`+rpt+n1tMICqO&nZ1!HhxEbN$F+lApQaVnu6(|{uB8v1uKUet-rwEQ?^X&`Zx4P z3YR^1{{#M+lGAnXU*xY8Q{O|{f6(6zq&0r^k^bm^e1~y|t$sS}--cispa=bz@*H2} z|7ZrVPrs}y%{syuo*A*}l1-ebe~G@>^!>8#IqSf*(l0nezpUpeAiHp8bVd(asf00H zvw(g3W!-b8`9?u7nr*jaMpfV=bn<&YQM;Aoub?;H{p}Oaw z`E%02vM{uOC(~w&u)UxMX=9YsqR>K~vN~TD0~U7B88-@8#6d%17Y7#gHtXu*-QyD2 zVqS%=uDUIWj7rhEZ7E=J&s+e?k*#}{47_lYJ*R42SG z2QKYd=oWQ(U>Pq3!^c-ZmQ6uAZAD-?F9p{HTM1g;OOahFR>oHFmaEt*Y_I60QlWS~ zZmXgzd1B!MtJR>DJwv@>SshpmT#Zj3Tse}WqV>&3ivTh*#vwGp;{ z+yT8>RERTWV{n6*lFrFZ*xoRvL@&vvz(z6i&_S{pvT;nOt_i4lZ?rkMNxv|5P0M?* zwP}`0QDfI^OJK9C@d0KBHQNf^JWE^Cz)7}7w#dpIME5(Ru`N?5ofO+3TV;hVYNG8i zY;WzcteNRQx5Y+hJ49iq>vqsKDOgyLW_xUmCvM7a2V`3>s)c2m)P;s_m*R%>cEYyL z8iHbk!+d9O2d}!Wak~IJrW`bSiWdo^ck&?WAlVhz*@N(AVmD+LFTIB1X(112Y)&fb z9?-5Sp_=Up?3TiChV2FI?gi(hE8E`K9x0wW$bdb)LDgTx_Fi7IFxoA=0cl$t2Q zT9AD_J0)GW0@RdftCA>)$K%|Vr#2Ljp!D$ZU$Fkl_J-yigu?ondGeo?HgOat<8Gawpi=7HuXTN zQ)oO`jn!<^^|TkL#Y(A_wKgx(ksOUF~eB5{g4SUCQ{Vi z-5;!b%y7aU08NZlit|Yp?Lc(DejL_)imp0555o74`IMR~y>>8iKui)*4;XZy2V!{a zp~yk8iAxeCbr^QAM-1J7heLnxeiVLqWAHG_ z?jOe!#@9y0LQgxy-eO2h2H z<5HRdt9BB4da;W1KGj2MZlsXsB=od+D3)~nIukhE_u+UAMVySB5v5D-J--iJM6qzLERWNg_q42q35UkH3UFMoHDu?zra)A!|j4yf?en-gwX~%_b){+ z%0sdS>}A-+UR8zU;&?fDiDL(YsnnGQUh1vap~K`#*E!=Y&WxcLrl4>0aorc>I@juW=u6x0i-vfd8rc(R)0mv~CYT_r?ROtl2qRvU$%Q#P9Q|@ZwDV#``+Q-31Qi4MDtYMEPV_IT+ z5_>E+hN3@(J)RP@CO&O>8hj!iFy(QYV&EC%NpBlTcyabD`jjUqRop4q)1IJ=nmh+S z(-1veHJ(SG_4+BO2GT)OJhsM>GD!19@Hr1xp+og0=y?y9y;`=H!56&P+%~;}yy(Fs z6y{a%B|pA|-5PeS&NldE&qb^SeW6#p*dmQhD%%_AtG@fADzi87*ZcsDPKIys!|VOR z=t=!uWXXgUc$;6|Nabq}Gv&PlzM0A=?9hznHWhv=B_CNw=)3&zcE7QM`Qt9v(hs4V z@jZTgClxmSsJ=OTYRXx6J1Xc0;Jc{_xb%Doy_bj6AVIAZ0=}QxMdPhF);~r+NI^B& zQkUjWzz@?tQHq};AEiu8NAPFR$9^2uMEYVD`y`K8w=a-S^YrU<*L(?nmQbCOItIT2 zKX=RqdfC_D7hdQLHOvnE(%WZzhgHg9&wPu26)!54R{M_auj96QSH5Tan^@$1vlW$p zz`l(;PG47Rr*ZH0Bm7;=BC-1D@b6=Rv?5k_$oOaUhgf!$=ks5{A7k#C8}3)Oe~OJ` zx)j@Q$j>g^8~u*_5=+Ra52f=5_G`@2)CI0jy;|%~^f!;I1&i!2&0*m%G0O6@Sl{Aki&2~^k2`XTVE)ILwS1m zKTn~X{?&@j0QTvh-L`|-D2Fx782-Jb!51OTa=p-bJ zR=6u$e{j~A-S{RO!1io0A@1@90{!}jE=X?sXp=!$|CqQ;XW?LIK>zGtPE@lY$iUbb zNOoKfg$Jb!%!UDj`-g5yWng;lP`1O-At`pIRT}{hP1)$_aCT%^td*i`evhJ!1c&Fb z>)8q+bHF1SxK~{UMVk|z-AkYwv+yD3TV?l7fX;7cmg7e3chLG%zh0z7_ zG}T*ci(m`p(Pdgb1YF3Yvj{XhYB6--xHl(+sd7g_i^L-~P#DCg@-NOWi+Z|R3YCx0 zmH-!v?K&YKhKopH9J+!WK_Oz?ACJ=n^RbX*fNWftO5~X_i~1x-7VqW3fdu z2Ul%5aA{8#BfR*7DykhN5 z8cJoWU@Lk8!BpK<(Unq24I;1EYS_x2Xgj9XwbkKO;_)^?;no0F^|)E(*Th!ygN)nw zwUE_)?^U=OZ$o%JFPK8tY$IrWZ+S?0Z47RZN~{0*f^C9rm`rP^m2FdO zqm&KCKHd!8IHd{e%Up1iJbp!I`4-rwsZLtofaqJIn|Z5i>bk9f%~P>?h}at1BBd6d zAx2|crfjpt7q$6{X@TYt97u|n#E|=|jjV5~hWm8_@4vq%Z-@VI=PX5nGC39?}X}%33)46t5fU%0*p} zNI4g+7>ra>b`0aMLft8KeL|rJ`Wm=z9!`zW1DRKe8N};09;eds;^g07O)!V_kLg&IH@U#>#bTprdo$g0KRnM5A=W#N8 zMs@*dZOWK*&%!5p5@EUvI~zMQg=Mmg(DiT*KG{nf!s=)~7d^}Cu3LojfU~`*)f$%; zJ0CnJ$09d>7l7xc1a%drNEf2#dE14Tofkppdwc79UA8aqAfeuuKo@#NAPvXEF7ixk z9a=<5or&PZ9=E4O-T#+Emv~%AJ&D+*Uag)9b|r9`CpKRHt9p#N3cNhKIo4M!SF?FV zJe;($cFnFqu8gq|YIrSpRm_iX(sg)Thg}`pA~Utn_2@MzdRDC)z-y;LeXI*!m#4{p z@=e(Fc|cjgZU%3d1~s7`cw?S>Pq=Hi6}`!;!KlANL~Az4J8kgr_Dc*fY?R4WtEu zp{G0pUG3CRQ?RE~FxvPz=ov4o-f%t-JnQ+HE`To}Q#_XOzU)QlIZs^gZ&mu2u;=q& zdYADs^g>EY|1qy1FM1VqHjLN5>#N{Po*Jm0LF{EOmp;CJ9eBml;%)yM$g7@~getxX zz7{WkT3n;2MKv$lTkz|C-1gerY`@{#6YU+g-;71zV_^N)O~u}d>378WtlkCRj^(1G z|2^QHSgtU2^!vzEPfL?YtC>H*-i?hzD6Ityy_ZNG+V&CjzQ@#js`@wm82iA}Z`G@& zPmmAuP&EFh*hhJ&x_yRx?85lO&yi0&9ljR9@4(L*kcRyV{M;+9x2RtO zUwHZ3xZZpNeVK9)Rqb2stCScQmZE(JejP72x-5h0-$TEdCa9Lw0KfHob>{pCeCHKo z1lUi=_qk{#4de$eTR7W)fqqQs=wn&nrxb>SeuI8)Aob`$zj&46h&$|8FJ~|<{|o+> z3ZbjXU&!w%$1eLD_#-9H0QY~OKU4BL=iBUG?5~uP#*F=k{GB4QC9Wcv)&^@SVJgxMHW=*8d7aIW0 zHX!smDwHVf*MV@ql&4r@?6CfgL2ImHJQyC366ICQ5NP0l&{?QNc`z&3P;k&Rr~!B2 z;M6DzN$rNCL%cD~MzB59>!wp;c3_xSpPe)k89pF%MzpFMpf!vdFq;D(F<`9rb)N>; zsuuh(dmf>^t}n23^FJ3p(rd2AJl*5Yjm^kZ65LKy$IAUt0lxVNHN{B0q0{?sP=b{1G56|&l_*Aa_g z3#OcP=d>uYkVn(LS`1j&Q|A$y8jnI3nMPIT#nDAQ3x(Aa?-JNzxv~G2#75<&4{dEJ zZ1I$F&6Wn1@Uk&E*)qtIzS}iN9@|TK6a`ogSlSP@@Fm3Z&@!G?_8xi#Y*~+|zr7WK zOXvR(?LuYqhMX#H4$5}nK%sfTTliiTBe6L7;+ zkbs&J58Wu`q0zaUAseRz^_@?_Hpeze31*`vw*WWI1L`_vTVk7~R_eB`fX!3PY{>Q2 z;1(%UU8zPRTPD%LzYVfgN?k{E)y803r^K==(zf8}cxtMG@~vKGEG=OxkT zozkAu)0FKB?VQ4i6nQsrmsBQU)pp0mdcie(a1UTtPpnwf)oo8`H?M9{XZ2pl?n%_7 zdn0?Kbb4(cU{BAi7*3KVY%fn+lVO`RLwkEl3@~g#_DO;G3o1ZOd7!Elk>(VLJF-@& z#WUo8K#Nvjg%nYTZ#z=-a%zsV4xlwf3vVmOVQt=eyunlGh^84Wu((1vTCQRHrDLI-9sU*?24OOJutDvtPw6AN}zA1f8 zzo_(kK+B_=jYq0^R1T9~td>U&Pp=ceo>VjrojNo=QMV>2p9uA)4AdO^Arn#{=Aqpm zs^_HR=>TYADu(_8t9Br^Uv8RA!VbdrPwDGpnS+r7{4}ZPbtq38hoA?haGGM_P~;%r z?|QC03^+KIg71kAhYm><7pXOl01r(W2x-Y9(Zd>*35t$F4^Kr_Saq*qM>LxH*RUgV zBgJE}qf(LeJ<@T=(Ya`TJmeUU$6)<}oq!$d^^ob~R_Hj7*rQoVPC|~4cRi&ZT@AAN z@lVE2NRc(ep`8Mqm?G=cJ{37BDZryoR;$B zP28E#>G3~UH>{J{KEosFZo|$(CV3=D)y{^_Ohg-=15NgwtkF2<0%r{fuUj?D={z>i zj=xuQD4q|Tlc38k0M3o|s$~Co7h>nd(#Yy|5puo@lNnx&T#!T^rk5ZWdP~W~D;tiM$rPDmG+APuK;!4!XL5%&v#7@$`8Pui6dRwYjkmZp5zhqVjBh z6LNhDrP+Oob~AQEDp{x90^FFQ=}z}nx`hsqQZcfD)sjRnyx5QIX3*A-tzR&JJ zZ%vs>^;s)+n_F2EoYw3v>~=4R;BMd!2VGt70q*pYRaBvSk-OqQwl23?1*)}G!|&j` zQ_Py(58UI$tz><-4}kZ2afKB=L46RtFD0p4gAT@!KLp?JZBXH5_`}cxd3-*B)$lv? z!91%H3y3}nK9pm(I!Em>@Zmfs{R1)p_;L7=JXzgJYT3aj&`13^)c37yKj!5LgQlK> z9`{T{wEolJ6W(U@#yo>OnPSrY`7HF5x0f1W3h;CSEr<&|vHIxqfw~*IUG4#q=%im&eq~yEo z9pKHBym~xsD)d%LUZe}@yWrcYrhL4sf5-RGcTxjpsu6YY)I3p@`2*^j`BkcVK(;@IN_CczdI$l0OK1`wXA5^nXv5!*CYW5lMaf+q`_H*QuTr`dj`7|XI zzTy57`^*#SDcM)R=N^ssc3&f3c(o;2YkUKL=?$kP#TYp7E%=o;T!vb_7X3P9V)i}o zP0B=<#2=7vy~TQ%rT9naJCD^{vY&wO{j8c`KePRVr^4UeFUXHk^wD+EenozY=_2Z2 zgMMzHs{IcA5?e;0I2ZrGeoevX^!^k2E!K`oQ;Y-y2u)n<)#o|Qkqjir*|C6UUv1rpl|K<@zHa+%V+$G9I zruApRJ_EDkgv~H8>lxF`XLPzjGY-tU z>vTZ2+bl@mfmwGQqQ@@HJdMtjlGAfw-DbmP_T)7Gqx#;USrQdAu%bUSYfieT4}fO# z#JJ9DHae`I7lf|xK}i1;kr5e#p#cN4{xt^DFbrx4I54Fa#$F7?2IZ-#!wngnQsde% z92$~{KEM&s&_wD>ve}_wDf!U7G!h%0%7ImF4sb+6Kt0RNiOt@SUft$`My8_arT*N= z94R%ed8ii@^MG@v)YRu=HZL|;N^PP_Hy<>23MMMr{MbBsFfG)z05tEwtd~{;9T#MK zzJXa^tLgq~Az*%wqq%5XY+-1Dfmsi!mfO|SDreIo@Pa97mn{k`l%Q;j0SkK#Iq*gy zi+Cv|x?C@gE!tq(qpN0Pp`EgFWu*D#u${B^)MqZX z8{4~Nhf*824!Z+mJyJH{a1U(PEN=p7C0JxP55@A_b=wQu-Af?TP&H_eoK)j|pgr@L zjOuH`_G$>$ZOzc$+3sl5&0scZLkqf3Lu7T)6tE_*f$lYmK(m)tV68|?F6voB3Z7%Q z(P)Q?o{o-`4xrVWs@uk~-R5z^(?ciJ?s2s4PYLMo0$MW`-WHl9eawt{Ci0 ziD>|~RiKhrS(7k^>sB|~l~R_cjRU;la=WUSCM59ReQcwE!vZq3A&=MX9EX!wyc- zHCjbI2Zw`)cwu_9i;qAK^^7H202Vqd1=2m>QOMz5?YbS!_7Prf1vmyc(o>nJaf@~= zbd;BABE#41IP7RIQ!w?dfya1u6IFl{kYiJ7)bK>;xR@i~+Mfg*AHxXLHYYfDHGuS@WgM<%SCPAmg%5kHrzlk%k z)8noQ#_petp5c+164TCtCZ&KP-T+8cy6hJ0MmJ4MrrYLQv70%7^Yke6P>L+l+;P~$F;~qBS+vKIM^Z#RS=tlOqbXuIW}n0!OA+;g z<0<6v6e>*o^)&WGqgCx0?8#K#YBjtXd=`BwWy{oEQ=q3)7P@Rchdh%4v2@z=(6cFr ziJVU_U{m5QC}|;M@I~-B&$d`>?cq-OCG>gEgU{AqMqcnd5PJoB(er?+_A2&LLoh84 z0ll0DBiGllSCVNKpQ62iy&4ZMmOab<{@;XOi^oW*(rj-5uY1`ldWw7-dBZc+;_G#L z2YNFF(#_dasGS@-MJYV@mp_ zvS?o-pA8IePV|ERD>grm3G?RUYv7BRut2BGH_(?UOwGOpzDi*<0?)pKzK$7%7o6WC z-*_O+nozSJpl@RdE0ETCz`l!DK=sQ1#P;`|5Qoms$Pb>q&aYp9A3d2iEpVpE;jo`P z8i&$v$j=^4??`?Je(?h7@>{k)pkL#WuQw#E+~55P{+1%P+F!u$DWWb~_BZs07ol8W zJ9PNEV<6)USf4?m zd#jBX6*eO>T@pQE^hKs06uQ+EhQVMq6E;H%rB{kGBQp*PeP;@#wzOHGzA0j8-dVAk z8iJ{t44OHmr4zLuGRvUQjip1Yt;qKU{lQs1HPcHCeOHRjHYjx4wCVNgK(_mN>iRsh z#RftB6X~3{!O#Gw9;Wgf0uA)Ud-S=1K|bf;t~EcS^9(!{CGY!FdLSE>1195m1Xap!23| zP`Wb~gy)M*$G@&Rc^1OvPpM)ZwlKIriXTu{q($HbQ)aLNFN!Xdrx0*4aN$%VZd67= zi=-Oq&S!CC(G-YB=_R1W8c0*dL8DSU-DNC=EIw$gx}^K4(&a*-iFrnKjxNnFOAHD< zGvSxk>>Gk*@Fi2t%LOgJuq?JzL)flt2C?PvrSq_S&9FSSOe$7cH?%82%jUtj9IS{f zmx`*PCMzM!ripKcupt3oT~X|~&H$jT{a%^BTdt7EIA zoq%b~8n|jYn5(uXuv$t>3)8QKte)ak!(@AFgKMNxRCQjg1Ff0I)Oa;ytyGF|FS8!D zcB55oeQcdPhwx5(1907RBvq^7seMCqz0_ElYUCQceqJ2?TW<_)kcy){vU3zZ3<|Uw`B)UNmj&~J*o!fv;r+DP*@(k4J)KTC2I$Y zUUsIt?m$|-m=Z0&0JWu*L|S|SYxgQD7=P>~u%jWa#;ZZ&Qn;@0Pf`Xuy;dDUVpq^o zDvRzCyOFL`+_LQplv5Bjg&~zxHho2GR>itgp4l738n|yN8{ZK3K$eFK{dwcTYMzCr zphIe@6x}uf=t=RifuVJ9e2S+F)I_8=iT3w?$b?kbFi-COSiRA-cq=y1+oz(xZf>^@ zMECQMsJ;Wm_V+yWY+(l@2c%HC*;aoVcAytnp=jd=~u zo)L2)s1{^^CV6_iyf_;<(*sGWb`CVz1MvuaE^?L!l4xEz=)F1>)7E&v8-QzLfVvi8VEekbH|t&HrfP?3aT9ud zj8@nC;+ugR289?0@m$5>m@0*GDpON*nKIc7G41E_o6ZKj)ChB zqYtF0C89opJ(!|a>`~w$FMo;q@W-Hs6VZ3_IP^#ZsnZO4)LXX8o@DzmFGE*z4_(go z6#95}8=>cjOsn=Z{Di01#ha~XpeMbwU0qr=$DYNWO2O0|HU)Y*1ykpezV^hP@k;5! z`#kWh2Pzlz^2%Porg-ZH(|mB?bDlxCKEDJ#pU36N?`7+8%omKdaJTvY>-of5%G_$GLTX_wXkCtNq-);<8wfC@h8Uj-M_ra+-cJV^# z1MuA(!$tW+@V&h7pw?wT-}f_uzrl}@5Av+5>K*t5`!G+iN}t%L;756aVepfEhJKv4 zeXqV{ryt^T{F6K*jax%LO$SVyeF=P)T3oOA>?`Q=oYaP2LtiwI**DOasmMCy^=&Bj zRcHkZ-*Tn&)+*{=)PS@+1D8=d2^DY(GKYdmBLH^E3K` z=Z&iS4f`=SW{CI|`^mHBi-F&epFIoH8$0_Q`o$xPG<^RL>{o9({p5PALHq;$nIiL|{a@&>lxg^<{15xv>uQ*GM;}d7IQpMyRjV_g|E46l zW=s$L=aprg!wg8D!Qo$4{~Y=vO+(nQ=>~`1m}ttRFFO6;&^r~CcAg2IVQ}aXYj3LB z%xup%IP_L&Tf^6Yz8unhqQdZZpM6homw-ZVYoE^rt& z*bCL8lXo~WWN_#@)O1;_prZabaHuD*`_9>cVV-<=!!r^Z?h$*NI+)jR4se7=?CrEU zf!RI7UX8|`3mNI{RJXa=p2L&oG_iT0IXx9UTh9y3m7s3(0do(|ZWszSKb!N!P0i%S z=DaaDZ|oKX=8I(&XuLzg76Rvwy8{fv#ur8x$W?O;=}&(Vbir6ig;nz|3N4hU*~ZK3 z#lVGQn=2eUZxpsj9x&WREDkQ3XOn#ew*2A?+V!g8YGi+l|T(>rx1Dp6U zq5(LSZtk_3oFYqQSRuS8c5%JLOXkjvq#n4uw7E(I=6O5#-_xJwg<4Qx0-HT z7;d#ExLXQUvAuxZQ$l(xus5bTJ8W^<0VDMo+g{rBiJ~! z&11G{w32l~?XeaL$p3Q*>lhqf546j|RZho97d|c?tnO7S!<~7mI`=A2DNmILqHe6q z%cxuMeSxyqOkjpoJSBxeujD}ehhr1r>8|3*gncD z!ClpH$kCpIL`Tx`*fE}ijJ#kVzO8r!GF@>KN9FbOy<&w+pfJyrd;eJ!9bc4FQ$o#n=U@uoU7F=)y$m_OMH#i&CaK=`TYr zj@KN0l~@m}&|QvR;-%FiJfIF8x#NBQK^Nwe2(HV{a7+ zSIE!7PdqhY)xLl}^*z&TU$XsKy!q?4uh{%N{@l}SU$gy1Ora;d8vO?PGL|P$!M?@5 zin(Lt{vG&rY;H!yeGh!&iE9=#w!ifNy5s#3_|9_+EAajVeebzt3-JDo{gC3RiwpSC zL4!Gd1%66UvfqH8V}tcD(D`@d7tc#lwLhR=JufYTs7m~a{T6prPq1d)Bimp2@9~n5 zyVf7&hd)xf+!*}>{TYvp9(@C;bsDSoFZ!3anbzw8{{#Q_g6lO+pCN*OyiD0rwbNn$ zHky_o#r{jC4v2!yfb|)Y_0n)oFpK1j;B-T>uBsl@urDzEkgVqjsM}1?3@J>f%?!+# z!tgXW3)FW=)`zAKb7`zuv6(!Wx-N@08#J>AgR0gKn`KDW$<@P17wfkU#MI=#jj%Jv`+!L8shWUwcqi`Q^q zNUTMj831eqG&F{(>rQEQU|1|ooVsizHas?WW>uR59Fb$CP0v$vg0p)LMZNB_xuB7r zpl%m63}bF=4mS()B+P@&>FMb})L3dbOu+V{Lqf-#T6+mL7mFzr)I+u;FltEXo7E!`qx1DYv=qAd zknlv9RDa4#<4X(~tI?Z%GS?b>*(kbY_-V;lltPEbGA@fO<$39VT@F~w`TM1h})~eN3W^;wO*$U3p08f7SzGkFH7FZmtfm604rLU~Ay3{=aMR7{1z&@XVHy;f!62A6Ab$UI~TC z;nxP&$WzhNw=yl*I{2D-D#00?xGq1e6`MnYRBSz9?bwK2bY`rNtP>j%(bQ+qy0OMu zH8ktB-Vj~SRc|mtcj!jw`mr7ObT#|lYh!qW6j;l?a5`*)ZWwn)S9h56e^YFu6q5c~ z)o3$xMAxdf_$f*60?o;#o)e zXlTn=KCMf}m-5?STX|#_JL4;xG3eHL$R1sKw#7!rU8dNwf^7$GGYwX4dvHviW>09! z9nfu4&}@v$j^K8&#$in~jrP_cGkE*N`U45~syo9w{2$kC7kJ0G*Y!4af{jIXN}{>X zcSUyg=Frz1W!nwf#VZLl+3wibMyuK$*sfl1-mR*HdxE=p9WyN0Ug+-AYSs2e_wZte zu$%+?zoEoB{EL)+LRB4g=)v!X!j@3$-x3nEDdRw$==!|Y2I>1&>QnSO?Y#h|)Nw$Wm zd^@prPqJ0duVMP365NrJ%K3ltfO@RG|@{7X{Z{uUmi?1ItM}fd**rqVLu!U9^hrICe;?38pNLKo9l9qKdMUj)V`JmQ}x_;KNfX39UKSz$0=D<6({g zkMuJ`@9OnQL1?05`QfNk41Q2Cjzf=5`Joi!c=(u9kDA_5*$LROe!QudtEQ!cG_8Nh zPUM&4(q3UE*HD?0@Zj^m{BmMm`uI!LPUVM_@^bQn&f(MGlk-Z! zRXZI$r7>{$e)tUd)W+gue`}NQ)BFgn*_mvg?)$pN`c#vlGyH(yQTZ%nk~h9aKJdSA zHh88tn^4EoIoM<`VlZ8&&PC5kd2qy^2c6xJBA;BI51x}}Y<2;1ZXQe@LDcL*@I0@Y zF4RKy<3;%S|L58R7vmTFpX)#O68yrvu%x3k5YUSnG^~7e8G3O;L3nR)Ie3XTy$T{! z!(0Jh+K``^@?D8v){t^oh~+Bu@`jXI80Tv63O{I+vTn_?!|xh?xw0WRGw@yuUgbxU zf^!8R%j@{z>i_>iZE`(7T+@(2h`|l?4g7L#L#1?DDa9M%>l!Lm%a$>`3BSI<$9EK& zvh8O4hK8JRigXJ<+?aN<-pSmG+!Sx|$~w|-18z;o>l25Yn)}gvQm)yWT@QfwCajbg4)-8<-!!OY7Qp*cq6$e?52FvnGg-Uz5w;(U z&DgDL*rUKhv84t2W)*ojo`~5?`^TY2;#nNu*gt_i>M5y%_etQf7>|WW_01~ucuZW> zZcigm#8~>nwP%1QJyuxm`dR3yn47{-k15#G@zz(piEKU-V`U$JJP$n^Q?BYk>IGm* zta`O0Yk zUXI!GSpPckii6gGegk;b;}kWD`%UDvSVD~uc#G}VeOm+R*nT6{vKHQ#y#u`&%QoJo zviVj_Q}g)hTU6xjn1){OzsL4Fu^5bvejk{cJ*$ti57>G)`*|D#dp-o-%kl)&Mjt`% zH&D$!hCayJB!tp6@e}OB+^X8UpJE>+>zO#Q!#=}4_AK>c@^j#mtjOc^=lKQOpJqEs z+f8}~tJs&|XW8NHShcUv&pm3Fea-e4S$m9YV(RK|kT1RP8tSReLg*_mfcC?8z}Fs2 z>)YA)$TwaD33D_20DkM4nf=K2cb<;!m45=h_W*VKne88902a*r1^6*$B+z%L$WI=m z*loW7KYNg(-eUfa{Nh3MIsPBOuO5VP5Pu@S<)Sfg$nUY*Ti2dmi ztMm#Yf5kfJU*kWv|BfwDtc7Pq>oZjNPuv+1HS`Vr*9)p6VtU{|PrasDr)EI<3=MsK z4H*hmT(+v&%c;oc1pjEo8FE+gglT~ddXof^wOJ;^<92&aT#_5CZS=jD7H1sTL zTch28nPMCb=v5=k2F)CE3#9HqY!*+aX#Lrq)x%_6z5}q?JneQZZ3^@o8u}2&Rob;H z2O<4Eh(1vq3=Ht>^^je#A<#fiI-3({C^pDb;zPz^$lzRRHXIp}$Kyt51UA(3)3|`y zfngqvp#vk4;VwFeZ4P9Fmx(Stn-iMd)9%qbBhAi+jr52-^v#XTF*Nk$j?=?lxO12X zoYPybZu7D|mj|k6AE?d;&K=VVvvtjn%o7WN6l?)#-Uh1Lg3x?1aV?{;5Zm*|a`t4O zaW9N55HrO1uUG_JFg8q&Uh6ChEaW9nSE4P3EbQ4KHVRs#fvUDRw5aEp^)4=fEtaRH zZ%mOI0gO+X}v*n>>yhvT) zZQ}~qvMF(0GFC*E^JdYk2w_S#aCyf#OWDfc3Z6xo@3~ijR`il-vi?Ze!_TszCIvq?)l>gZn|TgPMS!FB^+U60w@RBh_)unocW zJjZ%dr)`9+?`cVNaBPfi;8A59XPbZzQhPL!D-8I_=*vgY(^zj&EYtKyg zD*E7STWqwaRAXMC?VxQ^Fp+xnurb*=$>nqhwzrM#Qmki#QFjEl8yb4c6)0fUc0#vL zK^c3c#SOq6Jli@qq`N>ndIdBZV=S;!%(Jz{c4d3#SfO_I;cmz-F%?83+@P_M^zEl! zdh7x18WU#CfjxoUQW$P^_JVegg;bcBm&bM}?=;`VepGFBl ztsYBd83(j^EWUW?MA}mjeHRLJcvZ`71%0R3g^e2;UIB32GwFp6<1*ZtQq$2`K}s%_ zYNvH0U7o&1wCoF%y_o6(ubM#>&oUcpRmHl!)oNB_dtVPzYbojvxCb*&N=I?g#zWOS z7<;`Js|^h=X*xRyre|S2woSl$rt$EdKph|NMOMIxK(FuD8n55=)@wg>!qD(SL&;{! zS-U?!)V;Oh4^=w=p6G4ET=xee`=tUD)!!cG+&Bo|KNUbL8vqBSitcGwZXp&q1Hr_PbcVKJc+-(yd(qp-u{4k>ln(QF?P zV`=KhPCEuUGL{n2QlZdM@nq6xh@toYIPhqXSyas9kz-^Ct=5@)U)|KPXxt|*Q!PhLjymw*?hsJd2Nid>XZui0h5#VM^&)XT9;QlMVD0=P7#qwjpLL@vvvYF8nb zN6}0}O?EYMMNEc??ymu^jJ4I`7P`B=7P>0NvL?NOzYe+Dg|(fpN3My{)J>t!k#2yl zjiuJ>UM?j!V%KH2DkZy#t?P$|cg8Bq&1~L~ogpR7{c;Q2H)gqLTL|92;NTULFAtJ+<_?GC!By&Jg03&AV&dyqSG zQ8yKGSBlhW_W^f%IkWwIKX#Aj-(nSAy6pk*-V{?Gh649_8x-szw(s|fXe760Q-dDx z#38L&fIaACV!ZdG$U`Y2m$Jv8hf^TkpFEB{;@KDK_5|=~O1s{yhlVGy$2=v?SX8#B zpvOI){*#{uo^a6Z-ZQ|HUR$k_#mkLn!KXZw=75<3Je{D~o&%ooB6ewqJdZr<3B`Bk zFMv~0yl#6DcrJy~h-TeUzXU#?{q1PQt${Zw0D8GQ$E`1?;-D`;LjefJ5zp8|&IR6fQ&@B_EoK4JSqFG;sf-cON_ys&Eb&w!6p5S^sj;m9d|Y?>+IbGTP734=GT!V!t3idi|8wufR_Z zYVhBHpFNJien)=sgmouWvp=9;Qy@JT{E7UQMDhMYeoxUz>2K%{uZaHdbjAAz`_m(8 zW+(d>`O6Oi3CG@l;NPB_uxfpViT+8%ssPhL|E8$f;J4|)|E57rum<)S79M#@;d?}K znGv0CSm@e9^~t{V#ikz?I;l!cV|y0QQva>}fmuC(#=5aR+py4=Rcfm1z#WM6%SE%d zApN~AntDK?24e%fKr+p+1`SL^D-4AOC93Ezco;O;OQdJK;lL0NGm$UVMnFSzQcAN! z!#tWM?j8vYj}>Vt*c@z+hyi#*J0~!EEOopCoeLWo>n5w(+{hd$TC2?i%o+18YTD6) z%?r&HTUw;2;i}CC&YhCd+CTFn^W>s62atJFT6*tf3qtdyQbD#5Hh(HZr!5RDkOGC< ztwpc}Q|h|MUKCjxLxYYB4@ zuYs)^4?i{ZnryG;iSsGtTFC02c=(iZZD=#ap{)n5 z9WOD(46C+2x{ep2%{E|rUC%I#VBQc~&r=KC*_sy(T;KE5?28)%8+ftWD*Ad8*)RoB z=N7P$=hb1GvAwaEsDlAxI&U{eH}Qnj6Jc8*n|c8x+@Wje$5gs`Iv;~=?V+rq+0c;Do{&!6?SO4OvnnsEwui=eN{}W) z!?yK0*K9|&xAS!LXtoouy{ALZg6$0L;Ozo&2JC|F=&7Ss8;k9f8{u}vcFwIw|7yEo zyLf$=KyG(rtcS|xkJ|&=)r%t2JZR8vc|^0lkllxc-kef#ypMX^8{T7B_B7LG`>?rZ zOkU%ROkD>}*j~x3N&i#L*xqg$rlcLxg6$KVh$rL%&=kuh(3w(%np3=Racjj|Vp$bT zW6zL6Y<%6Qx3gXJly%|m09rkO=09V*&2uW!oSjg+$I;1M0y-S@Sl$JUi@Qrt<+`EN zAyh^?Q?QyDWIrLrB59pjM|=mg+cF9v_hCnCq?Qnr(jQoyZP3kLt8f>^yoa~Lw|K}fb;=#kt?}Qd zWOuT8Tg+3PA5C@_a(j$}RPAo)ju@w6_po_qtX)O7{`UfR#V7(*?>^}61~R)Jx+m@~ zg`(UKVE4xCWm+l#xi2N&Y7YVTry#oVe;9cn1<^<=;K7s`_mz)A4|yy;zW!6Yx-;-mkIL64Pa=?!PVj~c%@c^Z5o?ypLQS|$K}(#xwI z_$=_0mpA-NPl29pAkB3KJ(F_Pe}a0suxE3l4PU^fr1I(t@gnkEJQ^x370sC3p@Z-x z`1xVs{Y)vbdb;ozJOS>dUqN2<(u7e1uRukT`MazbAyn(&y zk#y_(Ch(eP#jWpK$m=PHMq&YPcpEaSGnIS?ebbv;uM9L7flUS9@}L>&yH50NFT5sz z*XoFm3iB@Sdk$Zp+?AeT2TBVsg3v82Z4=rsY0ZKlBsu z!#uVsp-etSKk`f}o%R{_{7`otTO*X#O}6Zes#OkZK2=SB~} z*Vq@{<`o?u-ymP6Qgqq3z*i|+7_t8y_H~M;v0L9G-=t{W_5<*33Zfg*ACd1;LK;U` zx1X@@y?OM74*$qMgFkpwUX}lX{OHG%hHJ6?ljo+ie*=E@D7?@79r?u@FMP%P2lQ)- zRkJ^V-#pox{l)h09w!V={~P+lhJ>ByJ~njw#; z3CfTehiAQ9idSWJ=vlzNDJDgj6`Co9%MQ@lz?p|<9a@@Yp`c=B3Z-TEW54$pep zH2oxhfdjDFJSiFJ4+Q&pmO_o&g8C27dgOX^B^(S4h{+4|5hpTmcy`~{(^}JXiPa_> ziVljIYVZ-A)5EaAG1JV{y9EyM9CYrC0EWg=^mOU@%4Ua#d8YI}jzorgQW8FtoC6%u zfJI$N<^*T=g3wK=?>VuNDLoZ!Ze$M6G#mxXn%fnA_upu9f+r zdAvO)XqhEG|5*T?*8@uHwjeZLEJawmaUo>>;bFW_mo3cJ0>i^&r~c*_VRON3NAnrq zqHHge)mGaY>QnBp#lVHLLJONKx?+vO7RmCFsjFvkWYH8#y}m_T0$VIAHlZ+$-$F;F zOi(SY2`-+3YH93>nqp~miL7muK&LGOEa?R<*s^Rdqu^h0p7o*TYKg06aGM-^u zlV(^i+6v&Zo?(XS=|Y$D==yY56Y{QvEuS^BK4fHQ|H{w`DMgV!|6B!JF~t>XK>%!} z;o;wwxE=a3l`kz;!&e?2dId7i7TSbY@#^Rm}1CQ|c6h zw)0jil{HZ(V}uH}6F+S43F-vj8Q8(|?2_z)?C57u*~YTHlV@4xnrgdZJ9{l zE`AV%iyn8+yTfBsSj}Fe17;6!S1*T-v^{~{QmA4NJ*<0yyZez+4oFUW<9m2}W?r>@ z&^^5!`17?UXfLmwW?8UiWbag)wu)N01=}Z&sQb+V)Rc12{Fxd&W<|6)Mb>m4n)nQB zNd@m{;Va!Xu#ln$RBsttOwl`Z#8?N`nyT2*-IIL=F%EC@Jy_11x_9zJdrBnCKvcpy zQg-E{p0>-@g^o-6BU8J{{*A7Jw^1WirZU-L*ADjYrjO*c}=flxMQUUcAu53qOho*Yy z^M5-MIxLkCYO*%~ z4V#qm)g8&%$eF2n)pq5~L}cgSll_D?cXhb&!&#|FRZWa~9&~mh^&8vy&^ak{QHNcC zotyTP=1v37^LW)3&1P{CbiRjC`7Q=7NMZO>xCFW|h3T|Qfs0ZY_V;Db#a_{>8u)VL zk|b)$E09Z5I+`vGxXcT!k)*n)Uxi(s0%?6t;EI$Pe;L<6SEi(OAY6-FUJNVMN0HdIz8hs6KR1Hcc>$E&O~Cajl+H(8pl$|l@WY4h1wJpo1-&t4 zt1NCsZt~KH9{$^)n^Q@`#J{&=x1>xpehj!ZLCx+2Zc7Q#40l1dr-Ic&N_T^I`1PX} zQ02G>zB5JVPv~CgE-x&9sP`dvdn?xMezx!N5OqEEJb>Km#j02ALEt{mu3jmZoAe=} zJp|wH;WV}K!@vWvtS#zIY0^|zkAM%xMvSOVH}s+Sh*7l1*nBvaTyOt%t~?Gs5>qc~ zZ#)4!8Vf4WjA_VYDToG<0guO4*EmDQXFQEP5euIgN5C`ild-HBSM6EwsXRUbr(jQ~ zY}J|k9P&&qHG3X;HpSzetG$3tNf}@Tdl7stHii~#%9g@?34K1*LsO>#FQmA&W?eq) z74XFrH=vf}L|@8-mMTrH_8RzdDxuDu*O6CpsoNXKs~%AU|20b1-UMIs#>!Al{}%kZ zXPvRS-_SRv(Q22ygTCoG6%>0a@|K5_l$Z+PUG#0=#dUivfOW6x9% zayl?R$3F4W@*MXC@~KDD1^Y|jGY?Wyt-nG(_sk?3Rt9~Mh#uo_pf5eUQpoOG>?;oz zdMCbvzV={{dfu>aybKyf`vdT;Hv=;X=+^c}@Vod2Rn#nFS{X&3(ArP<_bISGS5TY% z4E~UEW+c=v(2wyVqM@O`vi*}EHyUcK2GaA{Z}87v7}fQ6;FknF_6P8*w@_LA6@MbX zd8M+|R{z3&_hOlb%<5|QH~5FwK{shS-?=RPga4Tame=fG@Gn0GLtyQ&|IoiZcgE`V z86o+{bGPtJGadGCvaTjQa!-%_mtv|(WDf&9zx$v+d!nBmn=-Up#?p`{wbI)uY-{RURM2MX|x$MFa^>I zJwuT}BeDm0<)+DjOS<7L+c16@>}{kyG8`DnLN5Z<`yOBWB0Ih`P-K&FR@`pta45 z%rzqGno)L@?34WY;JHU+JvoUr01clf?moO)Q4hlc=)4Wu+oV6}1=0B$l18hx5InzE zP}8oly?__9(4;xD7l9T`#7w%2LJQ@j2cE^Cg}pKby=@(ZERq88{qN$?q8>=&4r~cz zF)yr^;tW@%CDBpd%(~rQ3Rv7h|Nlz^OC%`TGQg6aG(8;4B1`#h;Kdo}iq~n& z1Iu_ROWcsI04?kLv=j!ctcWf5KeFyQFp{k65>0pv{XHRz~)3chj zW^LQHZQHhOzrXLTI;T3%`)7Unx~dDeZrvNn>n3$MWN9yXSEmMJtDwuH%S8HJwgRwh zq_38;6`|!K{q!B{O4#y|zAjrUBP)1HI_0hctmqkb>losCu`0Tfr?1Yg7F!Kk*<ko`3cpj z26Yyxbz24LDq`xSsZ}|_?)(T-a-go;d&A`-LA@9@sL~L!+%ZArslq))Nd0?j(B6@e zULYu3FKAI0bt-C<8dmi(wrRDWKBVTUwKr*@o$*kwx0BA{bzq!_(FE`nIQ1k!pUCt$}$0=;QRod_Q1?W0rCNx<>>iA+6a z*&0J9qbGP7)7kbE=tR##4-nb~r(!30tLe4Aoraw3$4ZZ#&h{xDf>{>MKu-0fB~?2U zI?W5E%iCGN=^jR%t7SVII>WQiM!BDZof*M&rLuFOvz)3;y3U;ko$c*VRqweDs`J5f zJWHMSF96Q>(bzTM{79QgK`w?a@Gv?DTmoF^$Q zl)hru0ayFMpn+z5d1%+8*EDEdQvurz=(SN8{ZDU1t}DXXm=4{J-Gp8r<%ihDbJOCzW9-T~k0e;pkH&9DZx;kV`Y zN0r>Uak`5iZqIKe8h+3>ocwY}zZ|;sAaW0UXTKc8Rl66ztFiQTwcmZ{-Tf0$;QQfw z8UnX-)_DNE*NT%=|50qxxe*${62voBtk;goF%^A+iXGJM|m9R9Q)=8B+dK94=qP!1aJ1@PG@u1;MqBF{xI&C#!6jW2=E zM@1QN^fL59lvEQ>FaY8e^u>rxU90vg_)=t|FDza|UXJqVpWb4xW3LoZ+gXJ44e-^- zxTe4Vo6u{Kajji*s5Jncgi@fETj!P3P+k4>K zMX;*9kGvD2JO2-mcfDYmL9l8cLhlu%=KTnI-{Ud);K#@Zg_wPUeCYpx2^yNp-MW1W zf0SQBdX}@#*#6kJ+wF6O1)-d0T$Vc$Vt=Z&059dg(=o{*L|VEbFoNuvdR1itf>yOj4& z$oC#ZCxM@VA0mj_@)zXCkXncRiu{x}Q?L3AHR4@o+uz`y^S?&08v1vB_$A-7{J>k( zKfqrb5@GSAy8Q|NmLF1ttuf{FU+C{%1_np{jr`&D&xSnxgZ=6G$oO0R3;yL%O^>Dj zA%A;oN;E=lfao8OskPO3nwuQ_H)3iS*c8Zr9<$0tXG&waS`=(OJZbm-IrvTikfbJ1nFefDV5Z0- zJJrkv&Kxn--PUHaW3x1vhK|gE&FYy>n4tEX6PnF~K`k~HHv53=`AZ*yvN=Z{q+ade z?dYmUrPlXkSM_9cfEoi)IH(%uBl@xR@uO#(ysb zEnbAv7io1{8eAe0?B#E}47Oy%rPFCyXeloVFI|>HmJZR~?()bop8mLO&ej#cWwZZ8 zSzl5!k*fyBq040lRj!;tR)UxJ2GEq6D+4PyXhzFbfEBX?R`(*Svbj?JJL{{i2CSSH zU7&>3#{gH5Bm~{2JJ*g^~H1*lI;o-QuqWt)ACWQS}ihvPNX6na+SUBZw~Z>mqAK zIy@?@2d$kqLP__9+P8IEA6;ia>Q^o&)kL&voDJ}GJr8wpj0V>86w~(t8$#>nZLO(& z(m;!izzrgsgz9-iM|%nR*l!bLL$7^vr*gGTv5krZQ%N=hH;#;X$l4s*B=QjPT5bz; z(|mtwuH&Rt+Y;W)Yofo{R>0<-px*dsIRj`5FO24Z*RI$G+p-9zv)8uJRz)z*{@Y<& zd(C*7+#cD+YoTL}feAZ++j^1J{hH=L-x1v|RG&!DkFyiHy(geg`&b=&XK;rB=}kwu zlwmuITJORSJNh2XexUrj;yZc0^E*dB#(Ivj)TnV+@vVy2nxy*x}UjpA*=nmlnmC6s_M-sI{(Y_V3TIpU@K z+OU?ew60w{R&vu92Rb`+V69$1?F1W(w0Wuetds3_kJYDkE?XDY;jtKP-i?g)V)gOg zEkm6VNPn*i(&am?PYeI4^gUp=*RW5++<>z0@IGDLm?F^7iXXYTY7GLo$77FIXB@D1 zUXYG43^MD5EU!VLs*S^{c~LNSQXg2$6XUhjc%au4Q%AMF6otm+wPF$^n}GC1tWMhp z81JC@HueST9*6fi`ymrN;q+zc{?I;o1^HH04=p-#9suu~AG*O+n~3k1w-R3K(8cmV zbbrrUPxc1^2RP^qa4;~@E3OgHn$64(0S^pRr?e(hIutx8KZRH9Fg6d)i(DB~(O=6&-;V<(1!yG!;22Lx3jjCU3DB`YyqY-?I?jvE>eMG8 z$9qDm-O0cS9)#!2Q;-uqo%ChPsnALJj?uJ5O+$*X`WVwn)(bq-CLoSo`KKC&hVmf={^TJGeV_1|8udkytsNwDcO0@*%3@s zwezuaBHNDEH1hod^xXVN%*HdP?#c`C^E_*P|6&&*=X*BktNV+g3p^W>Mh$1Y+vhvbingH&5hu7 zd7JfWE^508x!yBH>UJ}BgI7XUwOgPYi(u6CR_rFvv1YfieY1yA|E3<;Z^v%Q&n`Ok z>kfv(-T~k0ZK=!4oxp9LWoqxcpxZq;NWE*=9bPFJ<+umDGe2SU*}ZJum2cMVJ~r>p zOIdHW``NxH&$`~DsnLLYv!{}ZJ;>I5S@23zr#-~>{aGor&8%7WF!q3lF?)pV2eTwK zC1uqf1s=+pxzbeC2V##Q4|_7TZpOAfjy>Yh)Cf-ikLJA zYAhT0a$`_-{p;u}5me8>I$OU1z8XOjs)r1HEjtqVl$X_5RD`$S*Ygybo3*oav3MJN zBTu2ZxmUB!tNq?V-;AKWJ-lyw7ksN9YD^pWcHYv&uiN|3I}J&ubK3{#yZOFsW|G7Y zk@q~RMAM`}?|W3eze%gne2jjOH?r27>CxZn6YRt6(AVwfr)++dAEzxX_8HqB=ecQR z&bob$e3Bo6ExJMe0{ApfRi9nxX~@3BKFd>WwXfLxJWsW?$-ZX$3*T|5lkJkhjxR*QXyeVw-(#_s$c{3fqyj+!M6{WfnBRNZILcR^fGeuBP_Od$Ii`yn#H z)QblG*nkXo`xX2ta;9rWC)(f8pF^uXsg%E?zjy_Cx&8<8Yu-Zb_9vUa7{X43@(}qntWjD$*>~2nU^5O(-C??&E7`2TOnJgATgL%48#uG4RI=IGp2cISdUF7?=B3t< z7B#}0&}@0LlsO&Dh0N}O((AFgp*fs%8B#|WG^ZD!OZQPa0tbR~4NQHAdfS#zQ~#rL zH!6i4j1KU^>EAg780huZEuF>=4#fs}jcPWG?ZF;N9k;`QAzpmmdyGJadRaBKvU7jcOo`MuXy$jyZ|Z=VBUHuB)Ke#F4Rx8>lQ;7ZcISW>x+Yn zIQHnKUxP)L02lT68ETXoy4b+f2UF3^pH}PDXaWsX!xt}-NVniigG)4`j_qZ@CB2c- z2j$B`OL_GnjaS2#&YRzCc{Z2HV`*Vm?fDg;W%F2x)K7*j=cb1m?c0^G`q^z&WF-%k^}Mf!t(;ehP&D=G;3^FPb*!#|t?IcdVAa-yR*SfN zoTM%@aP|DSvQov?2G;O$@H&4TWKB6`M9Rc-a zpgPd74LxAFDLt=j3U1`NXQ=p_!5e!~GuGFn=qBErX`$CGpiPTJ(*U9^!Oc8&kA^*S zlGqB}JU^H;Y>mw=@*?WaN8Jau4Yp+-F*6l>TXd^DqP`|&bL)KHq`?T=BirQV%Elt> z0BxJ61Zn2-lI@6Ym*-ilRaj?xCv5w?2ANfDXK;u7gr%uNZ10pu>Q(O) z$M|mG&Uxy+HHPBa?$|E*Up2GTv%UwuYb4N;PAq$ZyF~&mthu=twtEpUExOqR?vXbE zqY`ZlvS$R#7Se16_sVx}Z!Mw1w!lq!bHVy*2O1MOi>lU&HRny3f^qU|Lt7%FY-+J~ zuoNkB{B}UCL0oyqLT!cToA3$9~Y=o1{9|3!NhvkY`b%rpmc1?!3; zSGD$oLiV8D`QKDgRo!eT<=*^IjwI4W$gs*lP2x43Ch^i#B&fR9;GR5t-Xzq3y&ZHZ z>IE#{CtAEj-D*fRf|&IIwY*}w{n7U)RT~fX7O1=Zn$^K^4aiuN31DBoI~Bc|3%8aU zfXC;Dn7A}PTTyslyk2;#zWZa(?1xXt8%g2Y^cJUV`=k35Ir0sI9RTjz58G@axL=X3 zLZ)7Z1M&SE0yDwtLEr&JX6Gpz!{I}UqH$|Om5;y=ElPpwQ&aS?{18<`=z?(+ba)Z4OlP7UjUG`Hp{GfMj3$9c z7N~_))h7oYRbW-aB<)!6=ps>2>l2`pin3N~dQC4s9zUiCUDFWpvYmh)n;)twE4NT5 zV#gJYpuwvAQ%-`9FUr80iFPu0LJ_u)Hv(F&06sB4ul04=scfH=AF_QqI!^;m_5fu& zo$XWdqr8vo&OlBr#OzGuw7gvvMpxalpwshH_;@>;%`@_B#;bbg0B7b=#@AVc{#@v+ zoYE^?I}bWLZ-hiuJ0Ck|V0wMqV;8V>ZdP2q2(}B^J}=8Cy%@d-I6q55!2A|=F?K=L zV43M8c?o!7#O$<7fr}z$CzrI#u!|!m)?$}~m-NG`T>)O2?N#NWkQ|p+qL*cLL>2og z=<){A!ko|*Mb4ZzufeV?jMT2huFCcVsi{v6x!Rkf#ja=j8c$ra>uX9k=vq%m3(hE^ z8?o!MgGFPCu$pdLZ-TEcVyjo;c776o`=T_{dM$@OK*v;Nt zN`eyH4&G7(WX$v(*sZ=tbWXk#xXnSk<1XNKZvdt-x*NH}i`-VxbPxAHcX}+HckTu5 z3h1)?fV({z9*yru?uj5}djPoC(@hts2ci2SmTul3Lhkow)XXAdi~$=D!w-1fm~cb` z+pq_{5o9#Nqu@h+%w(ueJM`fqYaI-aLytu9%$@)qjWG0;JqbM)#nXTCDdh1ehnB>w z+tb(+9#JbKY8eCQNe>38(++#ei>vjN_=4&=@M({m-t9gQJ>vz_@$v%jY=q&i;6>;; zPoMchG`So0e8lUvmw^|&^j(_Q>lNfhFG6=uJ88WNzT_>Ip?c}im;GN*w%6Hy#d9n- z>jZ0WV6S>|$<#XszUH}>b@04}yzb?fRPAl(4KJ(CcJBahMl8Joco%sqGE77H-^1RH z46~v9?}P6|@{F+g0D8AbO@r5v_aYFV-hTwWAH-?-W9WlOU*A<~Uw#69=|bD?HkgjsFR{;}9J-IOub|JP{Mxx+BVV}iI`bRk zOD~pW409)ai+<%9NL8!vu&+G>nMSoi-!w!{uXleyzx66;{)r!f?>v}RX>RAf`6u*y z|BL2YwV&Z1vTL&@j81|1NB)BU=-F$Pm6H7m{p9T|BHVA_&qaKFDwX_q{FfqdXGsU` zAK&^k3gS)}$44Z3^tahEh}%a7u8pL8+5Zh1QE> zn+lqIQ0jn!+S9Y>)aVq0QrDkUyL%dB%7{vj$h6Q@gHqQYf#?ID4x2h6>dnUV$TWje zN1Q@++6=(75kzYM&WKDGX>;wJ37S6APPdLTV>1*HHRcVOaZq|-SK4a*RQg%rnIb(# zJkAEqJSaU$D5So3)oF5eaFz&XniXRXXx7MAq%}UV*}~G zYXCZDMAaCPE*pr=RfLr3c6Sgu_n_2)p#th1Xh-%|VK6@o7?gUL;s^a-hTsGLCju{6 zhw{UqL4IUrDW$8}Fn$>v&81(`c*o)JkcI-KPN)&+(1yTSKh#KgShN{$$VWlLi%ROm zYx7_uqP=VB)^lEPWb8mS?R?0nLh3d@GLJ{B>VK_np#{Ksy|s16zaTK5XPJF9y%09P z7dTn^Xcb<-YbQ=0NiTvf*bo@4+M@76Uc2-G-(t|hMf_5Vzc{)`5kG0wmVg&+2y9i2 zAX^e$tRYpcr~oZqRDgqJX>5t8dX?TpTLxS*s$R2Yfu+33bh)?XkfjUJXROFFks-G= zD?rOOP?xO;Ef-}-my(sR<%?kY>#Pi|5XI8)9CdZA01VeXJ7{pqG8##3$mlPiKNALg?4HneZ&gw?Ab95 ze4Fi#?c!uc7*VO2C)^b^8I$o=Dx=*lzJ+YY`u7M@pV-dS%=JwR(N)>4G>GYx7dq zvuO!B!FJD1-!XOp9fQ(aww`9|W^-(wyKcD4YN~2i-IbT7#8B@Zq&v?B;bWA& z!E%0>=BPh|!IdI*yH$~%yxau-+@sSb_&-+64pt1(8><3MV#6?^M|T-D0Yws)2tan=gJwRR+Uma>%A6TDqY>DV|0JJBo5@;|2{Cq=X_I}JEFlIGYr9XcgaN^Nomc50-gDxHa( z7Af`ES-|O$Qjg}EI~zJ9Qc5o?&cV)%c=~7+I4c6B_iyK6XGeBAZJm#t6Va-60dTIj zQJY=J_IVyaA0S->obTy&r8!qF#xC$(maJ-*fERj7WxJH^i#(zpiF7i*47=DXS8mn= z(dE!35lp0>IqcFft$43V;IPX)&vHv|mp;e23cNf5wlS&Y)z}pg5L4eAc%@gXT()b0 ztD+=axUPe)&Tphk<#M&&rRgkm#9z-3*LX9Q%XPZ}xi*rk+Ks?fPua{@=>AYWKi*de*SMMaAxlT&f**A9A^eE?5787 zl{GOCKlD0>cl+V`1m{Pg)L^h{*bK`(<&lP|!}M%8lGfIIxTNP&yqOVIQA)1WTV z8bX~v_Pxw6FGRsK9dX%S!Cs66m;(D%=%peU18!c!UXF??mV$I%T`)v=w z#pQkE9j{!vvU~u&8-cVFK1AO0zql^q9|7-2ES{x4hCcA3XR9}Vf_>=O_t~dxf8^=t z^MTKRkMp~$R{NaIPx2d^Wc{30)|lF*;y8|cftlA7?gWZxoRc_2yEzJtDwKx)wMk#F*2uU#Lp>uZsocKZST z*30OuR{4=1zWaYaXnj(C_&z@bBO(4rKl95E{~JY*JHPP5kN+Em|Jbkm@>70@L}D6> zhyVP)Ubo-zU!tnabN2`IYkoYEM73S7@cu-9YbXoKq!WMf!|$Ggde=(!H}r?Myk_!d zzQKRMKl2vUpQ&R1B7fz_glhF4+kfZB1kZYt4Ho>9C(W#48mkBW+dyVhK>syR&8CDV z8=Q5$)ey+Dlj!PMG=hqlWDLi8%=Y|VN(sxy89GW{dmaKMPi!ge0pe_ zAV%HH08KkM>woKL7Nr}f8R6*$XI*|BT0(3lVEV{2P31E)HpAeoN3J8S(x}Bu!5RCZ zR($|x8l3fjWg#^aN6BWxXC9n&5NKfD>}=2Cwa^D~HU~1R7g^F`b3(H+ta zs@gDYu&3PCR=45E5KmcmXd{53UZOV58lYRKk>D^-tCQ|>8-)$`n0mu94=}<($H~0F zNY9OFOfTg1(QL z7g>WZbXr;zU#Lj2qBH4Y*uqgZ)p~Jckr4IcE`cl>S_YSQv^Qwm|o3+yCJr|7l4kzjgSo@RJ!Zj7#rPaW}9FedVT0X z)B(FGxKU%ks%?gC+z^m||K{K(o-cDyY=La*wUnsq4cg3OPS6+>UCy@xH_v0H*RxwA zTjVj37TX5eGLH#q+Bw@6+$!J2IqE}JbnAxL>Uo2*;Dh@^O~+6CG%-!J;+#dZaDidZ@bLrUh(@7R;9(DWPMEEV#{U#hM$< ztPN{vNRlI_9V`_^*2$&=Y7OFYH5O`%;BfS`;^-`s2`Q zkuS$uA66?$rvZ0JZxM>8pgK0L$V8@2Ij}Eks(Iyr@liS!Lf;pvH;~zW(1b`oEqlH{ zwohU7Q67Np8+EA5bl*G?-7i0JYHb#c&SUlprz58n(qm^Jrxv0fI^?t{MSAOZ7Iu0hpS?3Y8$2V*uPn?C4X_^$ly5?rqyBSrsg~_g_#w|oXVb@khrNa@&cMO)IQmEgYqKYS zN4>!`i*VJRgdX#wKr`6uD0m8c+#4~OriTNch{Wpl4De)xs`Kb%&w@{R4(YT0=b)!O zVx`@l2c8K~M;!31H!ZJPUqqgZAU*aH@O*&T%fJhf5o4WSfnN01?y*LFZt4$u6L`(*+M})t9nWupuSZ&G6v*4y8xc^ZEAKnt zo4$8^395I|w_F)U)T9ph_t3Y!Fm(RDkGxZa(-*P2b^8E)*NdnDK_3F|dCq1XEFt?5 z_Kk-VM-K0!W=c(pWr-KXG3UNw!Q(NXak_Hhv~eNgo|_(_49U$tgmfS*Q* zHP(U8FTO-S^J7_qGMdsU{44nL#uRjk{~G+l%dIQ)H^7%(Zcb?5B3~7v&Nt-ih}3T1 z1K;>DlHOVW0DbGZY35dH_9OV6mn=i&@Du#KS0iKfwxK^nU_D-^kC}ggfAqh$#=Iof z0;>2=kzuuiIZJ*6f9{9sV*`JQ;%gio@T>0x-Q{S9{)zn-RiZ=oFX;D3tfo19TkLP} zk0P9^^AGfA18LwK^jDDscO?H|e;3AyWU?W$f1<6mGbcy>Ekbc~GzIpb|Nqpz#`a`G zvaU4EVyM`Aif&yLLE_0cC-a{#kOEFMqhgl3Dx(@A_TZ1zY=duVQC4v*NU zlf4aq=Ja-jl-fXWE>8{A06A=KPmQ^K1|tI^REjwS8yLxH6+YeN4h0A0yGcVxv#aPZ zbZ{Ot(~=Dbhx7}orW=6{9g_8PWtTes5+ zH!n0Y;-(YIeAuWVS%;<$%lX-!XGnH&wUYiGOpc-J#RB{=ua~lH_k}ZiX7Fl5}wiLK{Kdjo);1W?+g{1t;pi35M(ow%Gwp0;4T_TqQ zmk!kG(K^PL2bYQ1!kVpsEgQwhvU;t^56eXX_(7Sigf1WLic+qX;T0OQ)Bk@JaK#2> z!i!bGl^T)-Z8db|{;CC8(Nzl7EVe4~8tAG;iIZ{yT9Y4EE6SFBsM=ca>iIt?Eg7~p zvPRJkT%Fdz)@(E_%!;j5R3KfD)&tjWK+dG=gX=_WW^A$zuyvz27)>=AUN7RqRof6; z-~TBk&Tbn)8x%=S(96S(vC&0bnZBw8H;jP#@ULc@VjIQLTer=CjlB`LZ{Hl*#IuxW zB?xHK2&D7RmdIuih#@FjL7V$d(b^csN;_MlTNJA6wD#*Z=#~+kV{lt&E3ZO_NaI?! z1GkQhRe|l1ZM;(1^VJU6wvA@CBeq?UTsj(e0=F-+)v>rUv_s^e@twOMJ4Q^+53NSu z72GLe^2&8LXy*vW{Bc^H72G9?GLE;HdtkdZm@Z{|V!K7Q>IU5l**$WYpk{A^_VDwc zX8UG)PtUP3MmKnMYX9!WE$s4_*-cydn5;`VAHkWjCw_?qHEGlF+VPG5F z5;?)@)&olo@wJ0Gu-1m0(iq{fXj?<7+43EoaC?7NLw3O({nF5G=tjpzZY{k!M#@-c zgp~2esi0krA#2uybw^eTna;_3!{w+KTr~qLe%`3GW;c8~Z&dlAXGr!5nH4{%)oc8; z_mFH*dBaat>*W`V(zNnj!#J!Oria3A>%(e(*~r#Dpf=<2-Y8;QIx*G3aS^z^V~qYs z3OoVti-gluM*CpnqmeM}=zY<8GziKK^M3Gzeu35Y`=k3r8M5h+4gmM{Q-}KVCIb6K zTwO;w!yJh2A4SbnEp`xoKoMHQcvXRe(TPQ9X&O9n2!5ddKX^xfC~{C_-(!aX2S?rY zbwJ4u#}4s7Q5sov1axTRtCuvq&pQ%5tiPHag&rP_l)~yp;%NAYeu33Ulh7mk1?H~f z82G4$BA6Wu9UaZ77e{K3}lGcZ};OPw|(=+qg*clC`>&iLUnGH#%w*u#)XEo%HR_#3a?0!ro&WF$G$Ly;M z;B$+lP1Dp>?LzdtBCwP+E`raGCf9W57b6!$X?vL=>Jse2uyIVVe<^lRR3B@x%fO5K zq1J%~FNp#vBspJ!UK%>Sqnk_8mFQ*p)uDHcUB&k0`GMD~nKGO0YUql*Fq)5@#IC`v z^k94!Yu7?o6~P!+cO7ymO2-)TRnTGFt`)BEmH5XyMWsr zx_j(y;0{km^EIiB?!oRXOr526FLqbHzk0i~nW^ss@AeG4^*6j9xyMD*2-*Y4y`Gpv zx3v#q_jzJ6a(xKA-;1X8b{+;E@V4u*N7#PQTSkL%YW67fkS7l5GhFQ9!gxr29DBrz zMbFq1$fI5abx-m7_LF zrhl;Ku%{zutZL7L&v*fuSMdep*+`8*FG9~nYE>;iV=rOPdnwd${4(%@$E)&|;1%e_ z2GXQ?&`X|FHSK`cu$R5PYxX+ZuXxgG**Ab!JyJH%?M>`8Pdm-T_!jiK2ZLJdZS0La zSUs(2`VRDF9<0u1M4BECdn<37%rre7`1X)Ak-*;eK3nf(DebK%QoU6F0D3no=-$1W z+u}puy*x~>dLwK05%hkRo5HA_47xD%DzRu@&Yr`t8U+6Uq>kYQ@%&OiBKhm$^L+S8==$%^ds_JgzDnL_!IVhgi_sq zMt*QnN0WvL+0z+F$VRSs_wv`y2VgvsHRk`v>~72voIyk-v&O)O)Ko{15#*qVo5iY^dm; zC|##b4*VNoc-J-s^qe=b?HzzQggWkK$1!JeM3dNx&~gn_wUwadBf~V}U}bCtPpd~4uvL&1J(NUEv?{ieCnl@f zYS79-Jm;(qt>TnU2x~yAdb8*bV@+T+&yPORwUE_4R_Y5~8(L#%>X6m&0u)t&6VZEnTzq*k0Q!z!3T)&#P0-nHGi2kTXc`?$LgjzWQ+q}{A#VfW&{!bBU8A^NwkNP#e!OS`>TIdsz0lpG5}X2> zpgkg{ro038jMAo~vKiYeN}+nTAWcyUOS55>z%h|L9gVF}v!~Wv>gcIi8`k1+lc|Fa zEHwnw19=D5+7OUl^|4@EKh)Seu-&VxBy|Y4Sr^(-1WjLVcY|Z|leUJD_hb`Em*LJL zg`RX*S3$eHMyXr32kI^oO`V2&gXIXvP+fy65su-tTG4#=DaPNMot~k)5Jc>?@ z#-Y`r=`E68B30U$8=9|z z_QmQ&bnTG3?FUYX7S+L=+HQY%pD1h2>Y6-&ANGyxShq?wn27F|pZV0uB28l%4#fA5 zELp5k^WT97L=r+ud@wpOKOHI5A#5M$HBc9VZln&y4)T~SnmCVUISf76o3DjFyThSF zg6P6M0y@HqF1soorQF*`-UZz%?#P*S1CQWdyk3)~aj`A{Lyi_zZ#FU1Ip~>D_F;$9tDR_~Wg)!WhAs2gU4ENE5dDtbs-(-4ur4#-Y@TDHP zt7*JliCk7hX6f&%u*(~*YFA@dcv%#a`d$NG83EHk)@!k=B49R>^*ZqCeyD{kz-zn> zyA+Z%Z$Pj0cF^~6t#%`HT?FhW^XdR1?)^G*W2M+yy)G$O4qU$;H?oqEiQg1c3b3>FE4%$EeGmf*yD3(O3<88hXNu4=L+sz$d*xAp8AU^eL}BTD9k}rz3jy z&BpWKGZBzak`==AjArpjSOywbaec!kgf0UbGDL z!7lo`@2RBZ{x!ZxyN9xQh17d+6Jt3}4U@^gjAdB#;I* z*azUdeoQd)a5e_(L;SsdN$CIg5&C{qmid)FhCYaZd~vM9;1l%2sF#0-{3-lV6hV`~ zt8SlxAN%2_XZg>8Pa?8LDmG~`VfzC9w1Kr&qWDYrv#3E$A84d=#aH;}QC3RzHS|R^ zYq~IhgMArAQsnHKtDRA5x2iWKY3hMJ?7Z?4gJ|msZZESI(U8ue~F+O z+8^kz{Z-TTp}%=aQv#&>7yP?dN#DxY-^d?PBu&|C|3H66=GiR1|AK!-HbT<-5B=MB z1{3~jaoS}x;EaEdc^sh&Th&%=QpGQVDIUhFJu>9dpmDaH}6*l>>tXHp;9+;+v zrWlrWE$WH2V$&c~4$Hcf5dB&7ne(*pRC!!jU-d#$dt6;qYS4$5$5V1|Hpn+2G0Sk}3zo@@=$Z?RdynLKU!O6xWoHgg2* zthP7x+3esfUM@|KTedl%Sv_v{a&Jy-HqTY{nG2Xbuc6Kc`j{7)V_4QRhv;oz)drw* zM*2L44}|6_qUvK_WbP2%OAbZ`)^+?PLJrWrffwcObje>@Ioz}anPPw0GKzR%@zda^HlVyON%W8&F{r4E9-@k1tM0bEdng)W$Cd+*4n$*k2l2BEt2gf7~+Ql3oz~ zwUz~z@`CWTZ8>CVPb%G2E)Ol^f%KoW6_929aFUE^(woZ_(d8PIPT`f%dC*#_XcUNZGk zjt18A^v7ryY>2EMq8sszkPSlgo!Q37XwPpq{zJCN4}X1kwGG=_dbza6 zwgtBG{L?Vb?Vznal@={|vOTg*#L|>`z_uQQ{^}i(?L3HTwiB?u2Vq3R&d3ghjJI8o z9V1eg?F#JVrAb2%c7t|qAhX?}U4~^HNSb9($2>EWRBaD_*mYR;fF3{4R(tZxZr*-G z(O10K?u8Mq3ELyesH@-@WKVCwZfjhZdyCk1YxFBbLB1UTjTAqoICKv)8uL zYHdJ^htcr17Hfw}o_a+iIy#V6FJ+~b+5N|2ZJuK?bsvK5k$ksx0UZ&un~~?;*w{#4 z|FbgE8KKg1eFf`^^f4`!0d_}exd+=DDo2(zGoTX5D@YaT@gO?i)_}b|2=5(w5i3L= z>mpUpDvb!~gKD0RPM70>UXR7;vW|@NQZrauJ9Glr=Shh=Y#(TR5T}fNp?U*pU>-EV z+a()nus^m>m_EL3vIDSvy&Sz-#dadHUx=<{2O|6DS82Z4Z5pH1L+l{<08dV=k{pap z^h(I6#3A5;UOb_u(t{3)Otgm%Lk^BWJk%Wy9nwIWNe?GMA>`3e|Pm7DnQOMzb z(D&KVY#-sVw6?J>CX=uuy`;%B_zpbEli~~8W09l14Ejt$)8|2xf~eu~&@sMyHK)c2 zz_FfCT{o^LBFE)r(r~Mj*gif_rFRT3nNEgI$crf=qh4S;9L)r`sFW#&hv!m3c3h6-$muIi;)XFS~?A00$o@H zs@kQ)xbevZ0TI{OA*umFfS9`fs>+6AQA}zAK0lGHgrFZK$V%K@S znZ@iT*T=$^&fQ>j89NKM35HWvBqUv3Ll%-=k9ip78uyOYHh* zuqQn!y&!)Ucq*XFo&%oF4tM6S&sOYu9(^V|z2#c77r(u&m=Rkv@^ zue@}+3-}KB+DoUKfG+zU`o=Sbw0bP|ZDADdN9;RqN3Q=rA>T)yEq(1QlJ_(EL-v%R z0%ocj|APPMHI@_jSMaB(O+wY?H~8oOVU3l8e<_Mkw?B|yBhe1^E7t8#@V6+JzLLBLo(T5H;4LIfS)DM=> zY+7il;imrr1x_CU zYkWjBGd6>pCX3aSn6qFrdgNs7HY+&O@YHQqZPK0IY{1N3U2gqmM`rONkE>{(&H>Fj zJarUR)fYA=+q3z0-R5F@c8`+Aa?B0Q;U$99#f8o3nQLIpKwz%Ch|Nu{t$KZAgTT3m zrw8R~j{F4%;{)=NYSIZUIDrfto;uPLQhSs)y+hGKk*H>F)9|ig;NZMCns-_^6E++h z5&`wpri;o5aA@SB=arGju*gLN7PKawjY5a#Rn|Kd?Sy%t5fRl|ZC+$#5lByS^FgDc zAbRJZt}kq!BBDO0MdpoAe0psQV)I2g^6BswLg)9mtcJcYvVg}$>b3~BpcjPBD_ay= zs36^YECwy?)LzmFb#Z8spo(VsUjkax%h4_xqYlR<(ZzhPq1oz>OW})qX|(Qu{!mL} zON3Rcx)3gdE$KT$@94M^E(F*yWQgqHJM zE4-Anm9XWDxH9#Kfh$B78uYdbvSR+1)!Y}^LT0uqyi&xjY1*+CTMb+}Vr%H_>c}cZ zuo@#c)&N(HU^)S;iL4eyX1eyZpw+#pH9lSMy;^K-bdCP1v32O0o}ea1wRMrTyuuPy zcs*=w&qSsf?x1ywU`$uH0k&?D1*gy!8;!2lU)4i~uJ4Vb(H+^SwT<8nBBzo*xZ4;S z9q~o%s!h-hi-78Y-4xo$TTFvXxX5k>ZXD%IW7{{!Hu0m5fa#=WTi}~UnQ%?3YFoma z6~X&jjBjCWD}3`JX&zg*#a_!HzYV%&XuiV5w&+$7U#bshv8`i&=y`Q}WSgi} z_QbjaxNRhuKeO(LZWjqkwf+RQeY6vgs5?VD6v0|s>$VHFW04@^Qnd^Px|5ftcTAJ* zhV1O6>Fw&_fn;}Z7mp0`+3FtXuHKrtR&7ssxBM!gGgdsHKJb$4MMkul>0yP>f~+;nqT20M#vG{?D) z`U={Wx2Z;qBvl_9+#Q9i+1@}oa$zSKREff*`{gRulkZ;*UDMQf$lgV$nl6~Vn1%J} zl4;|xYGK??^kFrxM{}1x%F{?Zu-8k{%*(SnG|uBfnivo33rqLy`(Wd}3DRA&_VB)F zy`L(Z{m==KH}SP@1-MV-%|Uqpv~T1iYO#sfei64feK>v~xW6}zrtr6ekOT75fWFLD zfzqI&gYk)eK$q+gwh#23q4W<04)UZJI(HayuoogMAp^zcYuowG+HM?}Q3O#+S#P=_0El-Ie9)m1bV9(c4DD-EtY4w~dS`Y5{N z(PR3lj^h*1WBuTaF?1)w$9djet#%S{d}O0Fr0rzr1h0LUsA{KRCq_K2BnzArfplbQ zY#nxTVf;x>$4>F~;NN)$a;hh#2ih}%(>$8mQB$a$g`Muvy0zrn*~l3lO-JrIz?mK` z>tsC_JIiZR)+a1>9&~mD6KUlN?3@V3&C3PQxxNq5JR>E$5IoPDHp3RX2tB{QYIZ#I z0&nmXmik`;Ul{2!bW-24f)_;!3H326da>8JM^n09j$GoAHJYDWuPeYyJtHtp5OgJc znb%1pH%oREba@c_@oMM_Z;dqdi(P|V>80`MV6H{4^3pKa<~ro+NRAO)*F)Ddkl78; zwULA3>5EqEx`tpHF$Y~=1Y^>So3R_bx*E#OjJ3CbH+ppw@;%6{@J;=~YS&G zPe*t8CA$y0+tbP3MBIt>@qj1uMB4R9U)`RCp2~A-=Oeo3kf$RMJ(tfz&qN@-@q7V!HUe>Ndl7oB zfi&_BdOojjd-i3+%h(G=Fg*vp0=<|wfr6z;MPCJ9D&jIn_-ok9jaIkUu~&++X|_D% z)yRQq*58C)i$Iz!{VnA62qdw$p*JFRkvj6QHyeUg?Oo`t{L0tf)LGJk7EFHh9{zR_ zog@E!?42-XK(`ODccZqL)|>_3i-6rdm5P0Yy&nO)O>egCWAKBhiJnJ3K|YKk@-pyK z=%WVGXRgr4kwF?#@j3QMq@_B1fqWVfxgYov`mBMpLgvE~7dA1^@Db3Dt!M{hj~4c|e(Lgy5fum<^_y9Q-%( z)cw{J$bTUkdpadD*@&!nuDw+1Y0*qBQ-PC@$a?2MeeMcPF(T{f6PZneOgSR!uR~Of zX|bt_)KdAU1E(%hQ~9TdrYTZW{~a=Iks5dEGh)*f5p{Z>37S44>LzMtWQGXTrB0$* zpc#umT{bH+QxQnFLbE|LM~3=PT77reEKz){YIA_I7IAez%?Zs`#HAN%E^PK9ZaP@z z2Iq)?x--$j6WE*)kQIP5K^{0)1hfuKSTP8jI|63?BZI*KUYd@Qdi;h!14m^2c4;$lz6S^MWXGo+d|u!z2ClR5lMbq6nC9 zUT|cQMcw8@MvcgNQQJFuY<^&#h}V<)pcVkh zIZRJ(x^6FmE*RA^9ZQQs3q>(%wivLmr_U*OabyuspW#Is%7!iK(JGqy4p_`fU(w{L zx}RDKT)Y7(#M0mrk%5|a8Dz;2b+RmrEEN&6S%#Jam-f=D^Q~&jL(4=ldTa$?*(hr| z&98_p*J$dr!6ix^E%HRqO$RV@}xMCDedtg;$r4U^h7WCLW~h}Ek<)M#kEXbh3g4I6^%N7)iqZ6kDpB53MO*ccogNpii}1lq8H zx@=QuqexJX(6sqx=*Cg8bQakh+azkyXIlW9My_dT726WrY((~Aq&=;-LV{btn@7&6 zZw}g`fy}mnwk#r-jE~}MTX?G|qwYpYayxYEC}X-K-5%Q}8dkTYJ0RN@g-oNGdPzV^vMaii*TgzBs$w^2=Yn)J?hfq|S*R~@4`kO! zE%hbVZBKN!h)NgXUeNB|^y+4A0`~B_YlbQv8e_0MJ+)*iaWlG?S0kxDnFX5~qU+(k zgpJ9K70)#~s}*Z@(_BcZY#Y{+C#cDLW%^_mEae4cGLa6T)j>z@SfDL0od%g^ujf0_ z_6AMk-Mi2Z&$fjbO063k>)9sLs5h`PZ&D@NVilyT2$s$RJz#f{3#eglXt{{28SIcs z5m{YpomK^VipZck=Fq(xgX&FKFKGFm%jTRK2UVjgDaAgl=53>)X5)cgFG*W_+3Lu+ zLTWYv>GQNThP!I}K;uVb@7Ow^QtG?f7q3V98UzPS@Iq+*RlSwmAKNF=W2LU+VC*7OGPV0dB?n_Bh3wz6g}s@b97 zK~ZCM7aoQjJR*BvsTn;EXZsM(fj3i{5=CFa!iRb*IBRM;{ILJ^svU(N?yaL8d^B)G zK$lGdj`X7NW%@D5Q4yqK#{x%35Pbu295Tt9q)WGFc06=U6j;>9W7-MuvEC%zT8_`` zMC>?k`ee%XB=mSsGYy0|89JeX>UIirqBo}|&!VsORPdxoFipGOVyB@e_fvIzosOO| zB7F!}?daBLHR_VjW&}QipHB6CpwC^|KF#Y*U({L1>E1NjyJrJucpl}pCapJLvUAWg zz1(Tli*uo~ydBDEB8>B}vx{IlFP#sa6ItqHa{+R$S2+JR`$F)%C`YGV1e_nqr-d3W z#x97o^gq4?xzH=vW0$ghk;lqz0WZTY_E=2%bvbg0XQn%nD}YPA+-dafmC$9LQa1SZ zD(rI4t!7uVeTAo^gZUcZO3#YVE3ZYa@|4qs{W|DsPg#>qsnxH?u8EYYb^~y21ghyT zX*XimMOqA$xCy$xA(+y-8N0#vVCt1pck(Uhjo#|DtOM&-@TS1t^qu8x;LQ10UBK<$9ust)xf{8o5UnYT+?hw})R5MDfxGhj z1=*$MKJ@N9qO@f9BlqP0Ph?CJlNCGw-`mI)9+)14?;DYZt5;KKSS5OhAMXEuKbSqt z4-bq;zeweDg0=-xS(uh0cr4T*3 zzJ|Q)M@C17y$-w*=_nz41A5iV-I3CM6ML;8n7&AbUN3^tUT6v0IAKyOAF^wQ#8 z$cyq zU&3_pZ?QkHU-OD}X&CRHz;AiQ1e%Zz`8_YSp5k?t`WyNqs7n)4{R90O33c1Qz+ZW8 zdZ|t)#DCb|5va!|8!7lFK!fanecC0QPt$=@d*Zy2ogSGcq||9MAk!Ao zV>2Stc>&ZtsDJZJ*z}QnDRtq@49+kz>qqQL{fM(bGd7Uctbk@JNVjXVK{FRQ@CZFS zHp|HD`BsVPftfy)IpA3vgKADXY_>>Ihnmd=%^ty+ufXQU=7^eUR-ysOoY881HV~N0 zgYcqk5Hh!?onDg-h6Xf{X5@wjHjoC^L4&*m+3T@k*kI33gR6%FLn1$was)CoQli`= zpg_+Tre`wZQP>BLXm;`;(&#t@Z8le4lUwTY12@IC7?w; znntrN2`uI*YyNCo3Ryg2@s4C^Xo&_=KOD5A*C!hUw=A|)5lm<3<)Eb_znU!%EECXY zD*($zLaA?LMQpi9s84Oa6104jQ?G$mMpp2Yv})cez=|HLx7x0G=%AJISXyAb(^f-P z&SQ1!QF3))l?bBK^%}^kd5IK6clNd>wpzYdlIcNkEpYXSs_Vnr$Qt=yPBWp@deUm+ z>%eP{%pQM(wW#UEhYwmwc8fRrk+%VcZ^#?n|Yv$`Yg9XHjhB`8*dG5(Lm~TgSKp-s%;Bx)j)OI4%*ra zT1n4S+hf}lIo53lWZQ^XQAu}%w(}NLud!YZ*iPW~BeS;!-5F{q9J)hje(l)>-7(Ll z%<;A>vQwTU!cN%@+&Rx0)XK2XE)i3m2zwyAhNzA9M0U%M-Lghp?gi`~L3CPZLiUL4 zdeSh&G2otomJW<&a4(NrYU;EWq$zJUE%U4#t%Qy7sL86<3O0N3g;i^VT0Avg#gm0ro$puLOYwNkuZ z(7c7S#XrYk)x!Atr4Oq$m>xLCW4%Sz-IX!?b?flBXepf+Cm?-A8|Vtrw+C^K0)1PG|_KNw+m54&A57sBQ-!`+6z0BB_cq5!=rbOs20oDx#{NSkQk8wC~NM2ZhK0`$g zEu?NoB8NpJeS-=d9x11u`=hZVBATA&CLu?9k(zabW5+;8<-4W=rGxQU_~;^}`mc_I zCIzun(ecnRc{@~^MSQ?=0(xwI$mU8-PsES&LhJl;5^%g1kTc22$O#@q9}J%YoajMX zG<*1|$VnbVAHbakoa{kb736f}ln7F@Gk{a`ov1IsQx(qyPxDYZX`Ka}?iJJl+G1xz zXL#ClA)bSr>9O?w=v?3|Z+X3rRimDVo$dK0(^xq0oQPSn3xIPyKdpqLiQ}O2B0t?D zUWA+<$un<+U5s52$!FgZT>@Sh$ye-B;37}Hn@@f(gD&=n-DA{aSh35&OFW{6VqF1T z8lm(KLG$ol30~&q=q@o~;#Jt?MZomY!PVdueoSVYPLgYJ!3x0a8FJmthrokBY3ie`y>0bF&?4>A3){*lX_;NIW-Y~q5yb_}S#~aA25z#tzRN0&0 zYf*!Q>P|yn?-#UcZ-Z|%2Ic5|2YoY&XneQzF7{RtU8dz7z_+8Nndj<#=$#^5m7$Fv zfbT|vHKum`5PL5IVw%R1IKSheA7TtVm6$td_$UIwY?RW5(sD4S)wwH7cus`r$i+t3* z^e6ONa6%L@>c}QhN1rl{vGM12k^;8$^P-E z^x{m8{Od)NXlxwxUlFKoQzDa%%DS&K>$|En6*PHJn+EJm4NVcmP2DumlntcG+n}jN zWlzRE9i4TX4w*VcO))((&8VznuSZ?XN@@md+EH1z9merIBRJitthY}+Z8j4yeZiA>lUb3OqO6Qno(-Bg0_g-ZJ2FcVNX<0|G;2XxB4kczw#YGi zQ8gDhyNA=iyWQr7=J0TkDmnn1vmfdUS8%RT*~6+nSjy&$7=+F}D(eZ9<^w+m!vnk# zx|-@rbqF@ltD8|eBcWlQpj5jp5H(0P%OksK?H&j*bv0+nrkWFBu5O$fjgL<@lPHXujDg5Z2ba@w&A zLGwp)x@%b&Ss-F&_b7{i3r5T|Fb-O%f$Fvxw6Gs>I_eh(7Kya7L3T@ki$>*ZSttLJ z=weY2HTY7<;?Z!a2YYF3i3p`<MX;fNZJL8E$$8ri@UqKyTjrH66g@6 zCLsiO_r=}a-QC^YZE;&%?|XW>YUaCt&XYbV@1BMHll28EFYOxgI%vk0o74ySw~ z_}vsm7ShSd0^h0_rm)UbjD#wVE8^{Bd>pQ*Cpr7=MM-2aovlQcMPO0IjbgY|X;cYa ze?6gp8Ei?Di3F4dU~#2P@nrVEa+uO4Id*D!R2dUdlzh{QD{D&UAI!DBBC?#x*Os#I zE4KXqTBRyuE9f7OJQb3FjFdxH{NKRxaVxgc{{*IB)zFo_${MDsqpEn7Rd!4bTvZc{ zevg`{Y9<>|q*n`D-9#2EJ$ASnP7B#=ZY~`!! zxn97riQ3`o=n43BTRiyrgnK7BO7>PN^zT@8hVA}txQv7Bd4}f689Og z8Md)0i=Ur7AUDS~G0}0pss*yCi7t}H`WxHKlu;fCTVk4t4UzGd7`RL zERGkBRi3Ewv+s^_nWpoXfl`qw0vYBNJA(fB9@ub`i(DlmG2Kmi-1+oGMVLS`$g3Bo zhY7?GzTT)vPoO~62h-CNNUnx`QN2tc8QIwn)7u1MvBLhSK1MMtY5=OQi6^&-12O$f z9hr1a=7~f0_mUiygOLNwUI_LTHv~7(lZBkThoT0VeJ`Kw%G?(+Ob$KRE4F+GDGBC8HbaVH|jd4kDGt(fs16Q(9%CU}zPO@oBVlp8X+(@g9%lN@8YW}&8g zB2%x~xEWqft|oJEGtHi5_!N`!%tg=gvY1Pe!tc zpjwEV=jCL#EyB(BiYkNTPz$_*Nkc5bE%am?P6d}D7nx&42Ff88o0#@Z%yQfk)2MO} zvjVeJXDN3kGP@jVnGVFW_bSYCParwwR-;yU0tKoyn3cLz&aBvLk*jneL`sj-I^1ec z)F8DUvqrbBjNh=IG&f+^dg3zc@kZP_PbT(2noY>{p5n=8-U=V#?>Lm>qgI$wKFQ z5IapN?75xy;&z#6QlEX8-6q2zdo=5QE{37O@J|yMjZqHYJ347i|4V3fLWz+>-1G%5Ng1BhPD>qoa>MHJ% zw^QmG?y@Pa`&4@!dBqe&4xt;Et0qe(m$`|$W@5VMmbrzzu4S`mX0cphP#KntsB5CWtmp&9h0DRMm@mXHAaq+hnRaN3mK(gsYkf`CJUTP zJw`q-QRU^q6U;+hS^FIOFY1w(a;c}N$DVkR>KW#VF*0w;bIiY{5Ft$b^#b?Q6H2=6 zFwaclJcGYNJ@*8XFIO=yOxfK-L*5`?dSc2?=q>7%J`!Xo$~(ks(}Xfh9wTtxW8dg2 zOo-dcbad#qroi@Z_XF;o$=Ute{fK;T3Md(W!u)5VD!wuJjQe0h$;IUh=A$Vwhviq) zCsPzwWD1eX=Qs3c{gaj<S)gMgqnmJi85Ii^Ba^$oNj~;rW=@%f4*N$YcYl-H@*9bRj;RxswLjUvagnhy zxnC55$4xLR?uPGY-;|K!jn^kN`pz6$vu2QN*PO|4SZDwWI9tLk?gXJ*z~61#LD6fxWBwo zl=T@<8N4FP8&m1d!)ElhB$*YPDU)--lrEwm>Cu$}WW#6n%1SQ#*>PD+b>tK49GI-0 z%w=iMoTzM`Z2aV-Nu_dOvzs>alhvYqPo2c zCONyax-zbUNzOfHtO~NCC$8M*R7F+tgtG?+Rzp@cyTh5Jtva@fCmTk(*T7Zvq$n*_ z6IIP9c|ujSP}NO(GG7!EP}RoPFuT`nWwBUvO;2Ek;kZ;?Y%NbNgpE-3_3otHjWG>$jb#!qxi~h#HPj7eJDIBv*~k-+ z%V0BHW0M0bdN)TkF*(SyRtrp1Pipp4+TX}#CYba-wZt?x>33$KgI2f}CaQZpT5IIr zCR4_vwL!HssmVE@EvA(TL^n!1RBMx%`&ChUWE&GxD&GOq*2JVk(FfJeC?%EZh-$Cn zMbfi}>7WCN@m0PkA1~!neyEPRmV67)MRmq?@^aEg+Xd&V3oGI1Dz%V)p7iAXzdx$8 zK9V9;NaqlFVc!+oC6jw7iF^qhz_#V&5Z6Hq_207&m?hftVu~Z-qd^^_EaBSPhA^vk*Y7MmoA;m*Y93lrXRYu zE}gu(>yPN8bM==CopfprK=#$`?Uphe58F?dg{g`MVfyQQ#mLgIr~zI|mW4nK^u&|S zKFlC}zyzsbY!B9vpBN_v_JQd;dtl~ z|D*XGj6sjoIYA_>9$2lFCT8SSFlJm9yP`jjKu;b;Ku4E3+^n_5J@d2V#k@R z-BvpL(BqBnA}?WCEMyXTg07&fm^2wN(c~Q)m8q4xHuaC7z5V0qONa zF7=Ysb0u<_$x1PO@haSM9ZzgB71i0D!3OMF-MEgGU9=IuPVXY)U1}5mSpWb2LG3s5j}5v}oER>(1-(%>w36i! z)K=6cPm#HS--g@laiq8%w?!W~N-nTFFk6k0N2{HfZ6=-=S#bil-BjOx3R1g~JB$pq z59vL~ou*A}>8ti)cbTAyMR3$UIgu1JM!w5?jCo)(l$j`{YEO_4buebL`WN#^msO11`aH!w)-CKGq@E$3 zm_YUw{5kGloxVR#It-Cdy(F2wL_RZ_%2WC)%yUmL`=f)`$QRDB9pN8@ly7OVFFiqJ ze<<}9`O2gydA!5CHo@EzG`vT?(Jd!MkxoO@TT>JJlgkgdcP2Fzs6Jxen^3Y`rVQ@G z{bzD;e?s#a`N8C%Z6yERVz@ROjP%XtZ&HACRh0^^E>8?iOIwN57bu^lV^YF zE<}Da4dp!K|H6LPL7hqIe4=G8TlNf zVx#`hDWT-6T3pP`e@BfHC5rSBvK1?{{S3mWiFjN`#J|+3sKRXqoN~pTJ#E+sg;FsDxfh2KAv5c_|sxhf18; z?mD$2(sh~~nZ#X3w{)o#xTLzw^8B02=EX z2OpVGf9Y!Y2X^5MoEe!xw?4x%?e4NH=#0APtUZ(!lS$_w-8I<|nRO6ule1&8=%NSs zg~%g+4qR3*Cu65_;<9;SN-rNKyT{0Pw3r;a{C0O%9#l>pC{PZjyqH`%kgV4frt+b3 z>tf2`l^>DEhyYapk=H~DkdImlqVnlN2HLq5!sR!)$^I>jDPU3xR7DU4O-hoPDvBy( zQj$l^FjWj!Sf>;u-vSiJ6w!x4uqwfJQJs+7Jp`$esA4)H_lV$9xZ;`ZPJMYAm-{gu z2})y2=soCIS(X)D(xk~pU}aIIOq%xWhvjgkb*1F-r#zyJiN|Zo3aGOBz+s%7oEIx1 z%jsGLOZ!y9lsC!SPd}A$6?9EQRTZ`?>g?osqbj13PKI8^YM9C%<5JZzRdoKLv}vTO zfvl>_X!jt; zNlpv(admXkDpWN<)YT3$8rUzriey5 zh)deK8K$u@@AJYPC|cs0d(xBjBQPzr;a;sZ z=5M_loUzJnkS%rkVPP`oZ(CF=Pq>f}*{ki4t#zh^lZ7L2ZFF2&F;jZxP;Ir6cVNNF z2h~pRupX)-+wFDh^zc`m5FPZ-q6fX^@`BA5>!UO5;m;!;@bmAdf>uMN^%1liRrGRN%QnXMCc-PvVwV; zrg~v}IES!&%pula9zS}cBb_?|qg|>GzNdF0GLOCLi|yr|h*+2Ehwh!(ew`UcA|Zhx z?uXa@{HKps1pX7L2B7=ueJq3T*zTumn>Nx zp@>1c$>f1f&f~*ygY|y0ov#{>9P&R>sS(JbI#YLSnUM}VOc#z3DWfpMO=R~|(P-od z6HM;1#$ZO8VD1kZ$0A4RDp+b9+oN@!fqt?&a;T-oW5;-`drE}~*s;1b1O064QWMeR zJb~SVYyQEG_XO_lZ$Ar9LQl{;JkZarW&H^JMBS9~d^H8}k8Vo$CnZyHlXM+em?!F`>dkDH^bz>Dt%n7Jlmu$-nAqUM?W!qg(fd=rL# z;l-#0CLy`BEWs?)fn-3Xk6Maaq-&>GIwMdmLoU`0;g&K-9d?N>tXMgltiUZbPSHoU z61U9D$q<26xaB4rCG&pDWOc|Do`8H{yau<@WG!pAu=Lzo>?&PdxdT{-SgmU!7dBtD z9<@fN=Dx6Pz^&E6_yAOGM6J^$ab}3vgj}zis)u~Lw;8iR=h!1mZ9#0*@p{Ola3KL| zD{_<0vxj|XY{P9fX-Tth$87OLwO6Uxf!yj`R4lcV&23J8mXE*Mh1l*y5@9bZwi~&_ zsgLC&bF8X8sGTNau-c2*<>YVq$W7fo%@jh})B(g^lRl5T z2T}V>Ie6Tagvmb)(96INYw>N8a(olzY|(sJk8|eOeDu z_e^5a6Y>ai-y|kqq{}7qG4g?lsiZgW3F@IKyetlZc%;LG$ecauDeAFNO5VLZLp{;S zhe!{xw9Rwmzq%CivP8YWJT);1^b++As z>JrPr@)q$*XTnET>K*E}E{UvTr`}`Ucml~0^&jf3Cs2g?fO%&E(S!IA_1-8xdHsa? zPuIX*_0PBux(2r6>!vU0k0uNGUivHMlWsWqTAH2w4f)xViM*2gj{4%IWN;notEqUn z`ic0aQxBK98h&BE>sVrBWeC&{Cy*>-Wl?y@g2(2@?ku zGmG7;hH|O6m{=y3OT|OP)*h(ub*WjGlkvCfcw)#R8Y9wWVCn4~&k`#pI| zR5GKu1gAnJ*98r@s|-Ul^Kyi*D6@91}Y0GqX`qHvLZ5> zFw{I7Dzga_sj?%o=+cC$9BgORaqMwXIZ@ek0o@%Ixp3KaI?{)b8<8W6Ohy)iPFH_9 zbL2tg)K%xu%Zth7F*3LflUrx!a>=7nepDWv9ZJ4n#pTrzSsbt+CZCQd##a?W<CoDS=38% zsws&qrt|QXXYx{*;yOKh>3LNeS3*a%o%DnuOX}iEHyGQcoEBydrgDhVT67Lq6c zj4rAI+huhC8R}6HQBLP0eMa0@Rzj9{8cvQisdHsi1urEluc9h?DOt+_RVjSJgY9+-ucDR?`g@5E-IsVXA9GF10Z=bXnwueweC* zs%ZkrpoY4bTDtP`)Kw2rTenV-s?T;Eor?4(gs29ny1JAonMn>;PnXhmF4YKG-#eg7 zHO4j2=?AMOY&X;ekRH#bh(@}M3_5LwY3wmB)g05r7`bt2foZCXDRX|y6mqC$I(L*z zAct#ioPA<#g==9-AfKNi{?*$B==p9(B%p-^Hq-z(dy0Go5OGO{8h4It1;p-w9riSaR+t)t7bwzdYQc4A&EECi2 zw++Pk>m$QH4NAutva2T`Kc`?^fNoVeH-;brbs$;bRazhv7o-D82Vyrwun8oe2FUaV zxDZ_>#VXU7P+e9r(j|uKrpqN?69>vXFX2d~ThcA%^HZ$L6EuWDJrT&TEbcxh8L)!1 zSC>^i_)oYlqcmP5qPxyr1}4cDsHg}pC6mRWdgz+SLV|K)?2U`mJ1b1}VY{cUS(rS_ z_r>(mvBb!YcRyTjT>v>i`Xl<7Xwr@YFnx9Y_LY1fs-KDFQiBlvb*ylCSR9NQpfeLA zH5q~%=FF`(Z~_{c$bAjuX5OwpB&2)5T_cc~fZsU}nDGMtH-W@5_6rikgf6(ZGawrA+vB17cu#T?X39Y@B? z%tg#{ZWTh+JT_-Lxk%qofSQk)<0OKS-YwKzrzu78`&@{d=d^&^$?~Yk`6e~Fkzb5i zkj4F$SUz*{x4(1^R7>y+O_DOIxRY9nTV#^N`Ke{d#U@F)LtKtoVxr0=R;d-Zr6wxQ zrB))Bc}bdo6>_<$9&b)pqgHsb2v%z_D?M3o30sR>0bMZ~s_0A5HmE_fC#0KZ^5D^-}cN}U9dZQ_{ybjok*<|t! z?iMbMwGFx1w12RxvrJzpwb_o}VrmoYk{gK~xUF7cRko!#~Y$<(W*aId(d+LcJxPxX_$?V_iDC&?&kqNP@W6OvpSe`2&#*ibGj+SNOu|PyiQy? zKrSOLm{`&eWZ6SmO1RyQ}kn# zrkt*yVV>w3*|&$!QU7|WNc95s)a2YLvXkWe68X#|CVTD`=DEknDh-$y#>i-cH<*{Y zC1jY}Tf{5fTnuTGelpx^lb?GbiucGjrlz)(-ZJc46Es-5M_uXz@}0>fM14fOHzHJh zLi}ec$pGljs1F_`pXYu-eKchY50dv{UvZ!GS;!wJBNgSI>l^?0tm`UYBdPD0FFLvy z8PfFw_f?nIb~3mO`AugnGE)6QeK+A`?3bLieh6qCOdE%Ymet+sCBmg*VxpUP?#Y5; zA!B59cVS6KBd>pAx}XrEL>q5}4%Wm0_4lf=g&zuzXOflHwAXnDU|}878sG zk6|&%QAtc7xi?LLNt*TV==LDGNR^W9WIB>P6gd?txue`KZ&RaEII-k|pEPWzbm|c< zqoL9wQaKrkkd+!Rshw29?fHt*q0*RG(v5~ltA(`pUx+_-B)hpYpwc;YcUm?hF1^l; zhRuZeOBb+<%FK2K9jl8y8AcXdM&sU6JNTcRb{)9j?_cu^;FeRm396o8E1y8lGWYK z*Q2%!y_5`Ds*S2)RIt3Qs)MTOR3kzr$*YT~ z<&;^3OVz{F)~U-X4)o~PN7ga%f>i@VT@z0Zq=uM!CZ4=~lLM&{vc4ynbgE$*m|*UK z(oK*JO)xn%HN`Y?nk7OWnx%c4;ToH$?)U1=kxg_Z?WgM&sHP?^1H{zdxMofpMue!A zY&X~2p{fJ;znv1$2rJbE>FdM_lt&t6Vf@@b7J26-`1{Ibu{l zOmAIKF&$-ZMXLVjKHl2y${m32>!rJda99pR_seScY{)lKa{R~*l3cL{;rn|9mlZ*z zpLj5OfG({BmwsRQ3KTog%er{`ITSm{D+#{s^;N^rgS~WwJ!WG#dWdcuiOzt?5y+ul z3EN+QjKmJp2Z%3sq@z&7O+Y!S=-V8P9ijIbV?%<~7~Du5SysggR%214bctl85`X&% zdK`MRE>Y(YS=QQDjYp2r?Ika9oWK+CV|7-R4~uV3#EsL1lCdc=pO>taiXE>*`pc*E z!D~A0Xllj-gtacZ?=U-BiDg5IflMX#1YASA$&e7lLC7Fhu ztSe$$mzs{8qAMcPBGO|k$>vrL zR-l&Z*z(;gw>~S8%XG12d64i>zGz*AUarH+^AcfKBUflC6Yg_@Sc6=tvqzHsTI?#1 zmFY?)>^kgfUGY#K)`44(TVoRSvwM9uAlI7cfwBOL1l@>Trw>CVb9Bn`$UW`Da1;Mp zudA#43lFu6DR9Tn?ArxE(sO?c^BUgWaiZgp4rdJ=0$7F5N^u{cX7q zx!buaNv9o~dz?H%<>MiF^Kt;U*NGP@XLwk-zl>^2+KqHVblTV z_b0|40wxFgQhSni}xuzggAamf{5 zR)WAC(<$-s_bJSAT`v2qeHwK_*GbL_EJJYyc~VOm*2p^eWqz zoEGF6;u_+zvnQolwBfjZgt_mu6i2!o)sJxx z948k!m;891;2t`=%@f8j^}?MN%3gi>4f3T47cTP)zeT-r zH$rz#wCWw^wL8P^a%eMyvkYRxzHy&wjg_uB^jn>x-TU@?+Lw|PfZ;9-uz9YWq zVg%bSet+P;>P+RD8L57vzL|I~^$YP`7ggS4sNa|$x*W1k?Y4}PP3%uy4#&!HHuNu@ zg$x4mRnbttJt6)0YEKqZMMueI_Y`!OL&Z-VZF>%q2 zv!AnK;bLU_JBD;hb!R@3*oZ%}+5LJ5`S=qP(*%+4KM}EXI`X6=C5wlPoz3pt>fTAZ z<=Bps&3>qsU66pyxK654nP8O=6VGF0B?nA=r*eKWy2ifHCB`PmX7|B#*H%Wip%Z#* z`-hVhoya>43YQF<*gLSjJCdW5WV8DrCFw3Qb}t1gsZ%vuxl~GAG80>VqtXo}bHkyN zo2(q08k<5VVENM{o(7jPo86(>-OqONO(-&zjxN&fiT@Lu+G%S)Ic=mvq%k5`rAMUI zg_fU1nEDI#r&c^Ps0^rdx)|>73o_!;dxFVfnhEunCzyQVOfcS-W zjk7-z$&JgQiz1)==RxGujVl8)c$m(M%%yjkTgrJkA3C>*9I5gn^5}B7ypW{jl4`Nj4PlMmrj);h=L|@X^f(nLe7swc3d&G3+rU$enWm2 z#c@TPFf!b=1lvWOF#a7SNSG>#E9PXy4l0Ex?lb~R^KzOhjVxiLyk3{&${yTSU6-y4qr+Wtu+&D^&;jXpse`GhGZ7=VCUtSObS7>my?WT% zx@K}^sgJ0m6Qk3&0j93rJMvXOujv~i>**Y%b5(L^gsX3&Mh2?JmC-a>DfQVmZ;{sXuRxi zg=yi5$l13w?r)u|gktBkLALY?C}*R#xK>^PIUTh_wl;Z4u=bcX#>g#G2TWU2G-}|3 zYNyi=^N~jZ)e+ZTr*Avy=0kSSsdbm-D10$KjtTJ%kl_lLj?V53kszHBot#4^MAig| zkUbTwx}bgkSIZNFh4yob=YHMikLqkf$_4w^X@2@4#c!Y5tuL?$X-pb2P)iM z43ook*UmjTLQhVxU2OD;!jUqmk*%br)QAF8*P zQmQ|yk1hfI%ra6A*Vp5y;y_$KcjHF*Nb(Gk9E9v|N+B15!I%NM6!Q9e2x6dn$45wK zsCy=mj7;eJQ z%P|@?!b{1zs;H5=zJXF|88L?&0<57i0W;PUir&SExN*Ai z0t3X!{B6kbrm%r(5@LeMl0LG@sEJ-GOie-kW74+=Oi#s4(v2J#80y2*`!wui-N=rW zM5p7Ym@>;1egJgW#7v!%eaAKzHA@!` zMZ?X*&erK;r8^Ec$D}5+wjt)4K=%1{A#R>71b1SKF!S9PW$6h}i`ibFlaY(h62wB? z{31f+?ebFOA`?nR&LI}-aw-9R+YEuLmy0#v(#zeH=a&Ew_HKF7ju@SS*Bt`$z zCe(V<60$^_T*5aaH<;4PG;D~Cxg1yT3N$fGN1l-fapF$t^ z3M^fIe(E&#gej~0M|cK#(kq1^zd&iwv*=U0r6fCB%NL~h(|R|Fmw|DpGhUh5U3(X> zXT1_|1=#adT*ROADg-CTOUUzHsYa^Hs0;dllc($}h>N;W=^wd@xuj#+-~L`hT{f{I z)pf)beH`~tH`uL%uzPQ{);;}+_=jufeGvwcJFk4XEYy><>W&kIS9%W+cb#-aaO^z9-E#^k&ZQntz#%R>@RRn zjguQ*85Dx>~-H(i_Z6ouL?C^%nKYOS#lL)N9>l66QVP zjZVlI3GTE`ZGNF9*v<+h_716{pwG|H!b8A?ib>_&W&?cu=5{F8n z+eV(3QXrC=D$34IiAiRXw|~f~aLIKje;IF^8k0h&?a#RUG^ms&keIZXRN3vm(g;hY zFtmGy{=}v>;pDoW4wFWgN`|4ON2JwdaKC)|3-_l<mO{=RbT7sV7ZMutQb!xVOQfh<*SpIeI~i|9P%#i*>4iYn?fnm@{=O5%#?a>x^E zDMWD{&wfBHjVj^PPR4YVVY{T>wp3ZROX)OZG*>x9X&t~{m1nz*&e%UFMD}$BTv=TW zf2l@AOgWuQpe%=qD6gAD20wK4V@kBj=nBrS++S-M42Q4i6X_v0XPySQy1Jo*RYSJx=`zTfz7e9nj^yqYZ;Wf8 z%Mk1v#LM6&$cE0(-QO)`odj$nU5sE3zhHW@}KP@rMJppADupGmx6}p9|L@rs=N6dUJLUmDjDh2K4xV|A?JliWOq+6 zSt9`xp%b@nynCT~XeAG&(!+*|)bU(0%Bl~hr~Va62OHbHbO;_C`(b+PCgH(RRzk(~ z(dCx&-vC5k6V38b12O$PM%F~Y^w%|U?(hdA2k1gFwV4`%8fY?bJSj;FBh{@c>p+@VxgVlJp$LJt-uh9h5Sm)Okf?}-0MC>?k%fa~%cD!@4u*2H7 zzmw1toQANqOu2@es4FF3yi7s-qq7cS7}iwOBrg@LrlBS~`y(X8*G_*ra*D2Mh@9eP zV5WLvx@4|3)HF{_88aN9W+A70V#<&<%nVbmKs5(3(&B{laQGC zm^nHj+0P3Qb9E~y`=((bZk~?Dm%fWI^G&n>wHUELx3miA=*Rb9ORx)d5oBcr8OerP z=}p%B5&MW{qitAhiLp)&!Aj_(se+6C_w| zLaf)vo|222eYV<+-k@WKOX6EF8;y~R_EyX$Z5SWA4YS#)fV>dxV)trq$8K@*Wa+mZ zh^NB>94N(RVdx-Ps^IfXf{QxqegzsNjn z*b^q4T=>plPU`x}1C6YDiaMpM;(j%F4tLt*%9&Yu(2!?50U0`c0e99D&_0J>M4r>x zhR9yKggI}F6y!4If=Si5SJYh;qR}mBd%n(Ux`Vt z8CzG~|L(ywA2$)#-2aYLx7fPwE<~h^mY1~(FgM(-VH=magSzQ1L!?iDy36)0cbc*- z-;g41>o`F^a#g&Kx}(F$Z|?!(t_~v~KFXS=sC(}EM*5(n%M5p4$CESQW5fd^g4Gkm zLmex^?mqt)_efVGLM~EIF^`>S^w>Q^JaK~9y>rho|2hddpCiA(J#|VcPVS{&qMo@A zqexrHxNr3e`&=i-#GbD)FPv7An!I8ArQQxxZ`pq3#ISFk-(g-mWt341>OJC(lY2;) za2ZGPAL^|RBUhgfh<7@SoOb=xN7Q>Kr;u>KppAE{4o}@g4ESeUM8y`9fQ6I({I(nvCUe{fYUeOB>Ef=NIa` z6DXVy<$hy+xJ^&#(3Hv5a)|lqj%FJfjfVQ=uA~gTRnge~?F6AWM5*YgC^_ujF?)0_ zx0NxFQFGYcN^U7D5a6PD94FD3xac|T9x@5%Qn3&*a{L`ry48Ay`1;Gd+p$r9XeHms z_^CLkm`+k5D4Ays7t2YC&ko`tV&|}X=z5B9srZ;UIqY7Ko-#{y0=DDkuzQ7MTYd^u zC?PhU&QH28rAG`EU+3rUy-bWtppy@gA6pVkLZ>BU#WK0ul!?}miE@PJ47a~T@%Qaf zSng+h+soJEvL1o_v#m-l%xOipVpxAzF|AluY%7ix*NSJww-Q(htwdI0D~XlVN@gXu zQdlXiR90#$jg{8=(@JNhxBjv+SQ)KMR%R=UmDS2-Ww&xzIjvk)ZYz(K*UD$*w+dJV ztwL5|tB6(9DrOb8N?0YWQdViJj8)buXO*`qSQV{GR%NS-Rn@9yRkvzbHLY4!ZL5w| z*Q#gLw;EUttwvU3tBKXrYGyUJT3CNuEv;5oYpadb)@o<9w>nrpR!6Io~3bJIX5i8W{W+}^Mg<0WNcPql`VMSU!tzK4dtB=*!>Sy)023P~FLDpbv zh&9w2W(~JSSR<`b)@W;tHP#wujkhLP6Rm%&N!Da*iZ#`mW=*$dSTn6z)@*BzHP@PF z&9@d<3#~=gVrz-D)LLdOw^mpytyR`)YmK$mT4$}dHdq_2P1a^>i?!9-W^K22SUas< z)^2N$wb$Bb?Y9nC2dzWaVe5!>)H-Gzw@z3mty9)%>x^~QI%l1?E?5_>OV(xUignex zW?i>#SU0U()@|#Kb=SIQ-M1cC53NVmW9y0aul3Y=W<9rFSTC(t)@$pH_11c4y|@0e zK3E^EPu6Gai}ls|W_`DQSU;^_*6*%SWF?KRQ9(2i9mD{CfS4c_hz;U^xF8;g4-$Zc zAQ4Cml7OTj8AuLNfRrE=NDb0}wBS#W4x|Tvfeauc$OJNjEFde$2C{=3AScKLa)Ue| zFUSY-g94x+CEGP%cg9@M`s01p5DxfN;2C9P^peCpV zYJ)nUE~p3Ug9e}>XapLACZH*32AYEwfQxwKo|%I-9ZHC0U|+9&@Ag9TtA zSOgY>C15F729|>rU?o@uR)aNQEm#NEgAHIK*aS9%Enq9y2DXD8U?6a~dV zaZm!31f@V}PzIC*zj41|O3AOiFNk)S8&1$u)%pfBhL`hx*rAQ%J&gCSrj7zT!e5nv=31xAB0U@RC1 z#)Ao9BKQYP0+YcMFcnM#)4>cd6U+j$!5lCb%meem0CuoNr<%fSk;608EN z!5Xj@tOM)82Cxxq0-M1WuoY|r+rbX76YK)J!5**|>;wD30dNo;0*Ap7a1`>QlK;_1ImJOpggDm zDuPO&GN=Nof@+{Tr~zt%TA((l1L}f$pgw2-8iGckF=zssf@YvOXaRVR?Aj8v0K~zgo18B0T&1Z;h;N+06jn?=m~m( z-k=ZY3;KcnU;r2h27$p~2p9^6f#F~T7zsv!(O?W13&w%*U;>y3{sEJ~WH1Fx1=GNE zFayj4v%qXH2h0WYz3-OumkJ_yTER+2kZs=zT-1;@Z~Z~~kJr@(1&2Al=wz1Tm+ZE zWpD*t1=qlJa0A=~x4><12iyhszDo3Xl?{0;xe7kQV$2(t-5gFOUIb1eri)kOgD~*+6!X1LOp` zKyHu+QwIyf;T7x#AEocYYgATw4bOfD% zFYp7MK^I^Ff6x^KfItugfJxGE)WL7L3a=VdVomK6Z8VTK_Ac;^aK6D05A{? z0)xR2Fcb^}!@&qJ5{v?)!5A z5CTF$H=uwEgn@9-9Yla0AQJQhy+Ci!2lNH~Kz}d*31;fB_FanGOqrhk| z28;#cz<4kLOa%XcNnkRV0;Ym#U^i_zr%6pWqkx4Wh{FzOGS0 zG!Pxc0Dpj(AQp%X;()jy9*7STfP^3sNDPvIq#zkc4pM-WAQear(txz!Pmm6z2Y-PK zAS1{GGJ`B2E64`2gB&0y$OUqPJRmQ~2l9ggpdcs&3WFk`C@2PsgA$-5C3_A7VrmMK>!E@K_D1}fKbp4DBuENARKfD5ugW% z1U*47&>QpteL+9a9}EBk!5}ag3;{#IFfbg903*RDFdB>jW5GBu9!vle!9QRUm<*2AS=770i9+(dnfQ4WYSPYhcrC=FY4pxAbU=>&m)_}EO9as-GfQ?`i z*bKIStzaA24t9W@U>Dd8_JF-$AJ`8LfP>%=I1G+}qu>}g4o-lR;1oCw&VaMv95@dy zfQ#S~xD2j?F24dQ^fARdSh z5`cst5l9S@fTSQ9NDfkflpqyI4bp(L;7^baqz8Y23?L)O1TupxAS=iQvV$BTC&&eI zgFGNF$OrO+0-zu$1PX&9peQH?ih~lMBq#++gEF8jCas)HJ! zCa48!gF2uts0ZqU2B0Bm1R8@Tpebkunu8XA@hx3jf>xk4Xam}UcA!1z0DM44&su{1Hm9L z7z_bJ!7wl!i~u9SC@>m~0b{{9Fdj?*6Tv@V5||98fT>^_m=0!unP3)}4d#HkU>=wc z7J!9d5m*eCfTds=SPoWzm0%TE4c36QU>#TwHh_&_6W9#4fURH~*ba7ponRN(4fcS& zU?12I4uFH;5I78ufTQ3TI1WyLli(CM4bFhG;2by)E(oM2?jnC*0++!Ra1~qw*TD^N z6Wju~!5wfH+ynQ)1Mm<$0*}EH@Gp1@o`L7!1$YTwf!E*-cnjWv_uxP90el3Xz-RCU zdREYgArgP7zIXy zF<>kh2gZX5U?TVjOaha^6fhM`1Jl6_FcZuIv%wrN7t90m!2+-lECP$c60j631Ixh* zuoA2StHBzu7OVs7!3MAqYyz9X7O)j;1KYt4uoLV8yTKl?7wiN3!2xg(90G^I5pWb7 z1INJ$a1xvXr@t??1J}U~a1-1Dx4|877u*B)!2|FRJOYow z6Ywv13Z8-I;01UIUV+!(4R{OQf%o7)@Bw@TpTKAE1$+hHz<2Ni`~<(iZxAJpAS#Fk zqJtRV4-gZ?0Do3Xl?{0;xe7kQV$2(t-5gFOUIb z1eri)kOgD~*+6!X1LOp`KyHu+;{hJz7cBp3xogE3$%7zf6K31A}l2TTH!!4xnR zOas%w3@{VS0<*y!Fc-`N^T7hJ5G(?V!4j|(ECb8I3a}Ea0;|CquokQX>%j)F5o`jR z!4|L;Yy;cD4zLsK0=vN;uovtD`@sQl5F7%B!4Ys290SL}32+je0;jjF6ail3=9V&z(_C(j0R)CSTGKZ2NS?V@DG>-CW9$pDwqbQgBf5Zm<48oIbbfB2j+tX zU?Erp7K0^VDOd)UgB4&USOr#tHDE1R2iAiPU?bQBHiIo-E7%6MgB@Te*adcjJzy`` z2lj&l;2<~z4ud1$C^!a=gA?E+I0a6FGvF*Z2hM{F;3BvLE`uxJD!2x&gB#!`xCL&5 zJK!$32kwIh;30Sf9)l;~U+@$>1JA(=@DjWNufZGe7Q6%R!GGWb_y|6M&)^IA3ci8w z;0O2#eu3X0N<2YS5Di2JF~A=nCWr-MgE$~AhzH_>1RxA_zh1IP$6fy^Ka$O^K7>>vlo337qlAP>k3@`3!I04N9wfx@5&C<=;! z;-Ca52}*&|pbRJr%7OBr0;mWofy$r?s0ylq>YxUw32K4bpbn@D>Vf*80cZ#sfyST- zXbPHv=AZ>&{-v%hK`YQ2v;l2FJJ23<06w52=mdO$ALtCa01Nnot{?yef*=qKLO>|! z1{83CFc1#9g9y+AM1r267w8T8fWDv~=nn>ffnX3A42FQAU>Fz}_JRH205}K^fy3YkI0}w| z|uxC*X;>)-~s32uSg;10M8{;#EbS^@)WmjD{u zwr!ge+sVYXZ6_1kwr$(CZQK5{PMw=oUHu4M_3eEZ_i!H%@DPvi7*FsN&+r^C@Di`^ z8gK9x@9-WU@DZQz8DH=f-|!tj@DsoA8-EZW%wGZ`5CS6zf+84#BLqSs6hb2m!Xg~P zBLX5K5+WlCq9PiiBL-q37Gfg~;vyd6BLNa35fUQ_k|G(BBLz|-6;dM&(jpzwBLgxb z6EY(UvLYL@BL{LK7jh#H@**GdqW}ux9~44i6hToGLvfTqNt8lqltEdPLwQs{MN~p% zR6$i#gNKJ(R7VZeL@m@t9n?iV)JFp}L?bjt6EsCLG)D`xL@TsL8?;3`v_}VYL??7c z7j#88bVm>LL@)G4AM`~(^v3`U#2^gDe;9(H7>3~(fsq)6(HMiV7>DtgfQgud$(Vwv zn1<Q~(IEVANfQz_<%eaE8xQ6Svft$F6 z+qi?fxQF|AfQNX5$9RILc!uYAftPrN*LZ`sc!&4+fRFfu&-j9`_=fNJfuHz=-}r+7 zVgC{kfe;u$5EQ`>93c=Ap%5Bj5EkJO9uW``kq{YC5Eao79Wf9Su@D<^5Et$k7>c6=N}?1>qYTQT9Ll2tDxwl9qYA2`8l3gFsE!(_iCU6P zCTNOgXpRXpau)h)(E?F6fGG=#C!fiC*Z9KIn^n=#K#yh(Q>P|1bnY zF$}{o0wXaBqcH|!F%IJ~0TVF^lQ9KTF%8o(12ZuTvoQyAF%R>x01L4Qi?IYtu?)+x z0xPi!tFZ=au@3980UNOio3RC3u?^d?13R$`yRip*u@C!k00(ghhj9c)aSX?C0w-|_ zr*Q^naSrEk0T*!zmvIGGaShjT12=ICw{Zt|aS!+L01xp9kMRUg@eI%L0x$6juki+N z@ec3t0Uz-RpYa7>@eSYc13&Q#zwrkF!u=&60wFMhASi+%I6@#KLLoH5AS}WmJR%?> zA|W!OAS$9EI$|IuVj(u-ATHt|J`x}y5+N~?ASsd|IZ_}cQXw_cAT81%Ju)C8G9fdv zAS<#VJ8~c=av?YJATRPEKMJ5A{y`xWMiCT6F%(A$ltd|%Mj4bvIh02QR753IMio>= zHTV~`MRn9bP1Hhd)InX;Lwz(rLo`BTG(l4|Lvyr1OSD33v_V_6Lwj^UM|47GbU{~i zLwEE*PxL}>^g&zL)i*Xo_37CjUn2afyifNdR z8JLM#n2kA@i+Pxj1z3nhSd1lDie*@i63?3if{OiANYx1_>DgZ5dJR# z5eR`11VIrD!4U!>5elIZ24N8n;Sm855ebnI1yK5%~$kqMcR1zC{|*^vV|kqfzz2YHbX`B4A`@ec~2Fp8ik zilI14pd?D6G|HeX%Aq_epdu=vGOC~|s^MQ$M-9|OE!0LG)I~kiM*}oOBQ!=6G(|Hs zM+>w>E3`%%v_(6#M+bC7Cv-*^bVWCGM-TKwFZ4zq^hH1P#{dk(APmNT7=ob~hT#~2 zkr;*17=y7Ghw+$ziI{}Rn1ZR8hUu7rnV5yyn1i{Phxvc`yTmP!7h(|>V+odG8J1%O zR$>)aV-40~9oAz5HewStV+*!o8@6Kyc48NHV-NOXANJz_4&o3F;|Px87>?rvPT~|! z;|$K?9M0ncF5(g{;|i|g8m{98ZsHbh;|}iP9`54-9^w%m;|ZSP8J^<>Ug8yA;|<>8 z9p2*uKH?KT;|spx8@}TQe&QE?;|~Hv_)96(G1Pe0xi)BtPU@g{RJvLw?HeoZiU@Nv^J9c0vc40U6 zU@!JzKMvp^4&gA4;3$saI8NXsPT@4p;4IGJJTBlOF5xn+;3}@+I&R=5Zs9iW;4bdr zJ|5s99^o;b;3=NrIbPr;Ug0&~;4R+aJwD(gKH)RI;48l2JAU9Ne&IL%AV9>w1VkVN zMi2x=Fa$>kghVKWMi_)eID|(8L_{P+MifLvG(<-X#6&E_MjXUNJj6!=Bt#-4MiL}N zG9*U|q(myDMjE6=I;2MiWJD%pMiyj6He^Q*St$ z60Oi0ZO|6&&>kJo5uMN(UCcO{6TQ$Ieb5*E&>sUZ5Q8un|6vG*Vi<;F1V&;M zMq>=dVjRX}0w!V-CSwYwVj8An24-RwW@8TKVjkvW0TyBr7GnvPVi}fW1y*7eR$~p; zVjb3F12$q4He(C6VjH$&2X2Y%uge&Y`UMEXlW1VUg0K~MxkaD+feghFV9L0E)Cctk)%L_%alK~zLT zbi_bR#6oPuL0rT`d?Y|ZBtl{&K~f|`a-={?q(W+>L0Y6kdSpOGWI|?SK~`i#cH}@# zaA|_!nreG?jVLE1DCT3wa=3p-7 zVLldMAr@gVmS8ECVL4V{C01cI)?h8xVLdirBQ{|(wqPr^VLNtUCw5^s_FymeVLuMw zAP(U$j^HSc;W$pMCT`(2?%*!&;XWSVAs*o| zp5Q5-;W=L5C0^k*-rz0X;XOX!BR=6XzThjq;X8idCw}2K{vbf)zXU`e1V#`9MKAMKUBu3Zz6T zq(&N~MLMKM24qAgWJVTbMK)wd4&+2Gf~u&7e^DJZP!qLK8+A|@^-v!T&=8H#7){U=&Cnbz&=RfC8g0-P z?a&?_&=H-`8C}p7-OwF9&=bAT8-36h{m>r+Fc5<<82@1ihGH0o|K;xzH$onXQ5cOe z7>jWjj|rHFNtlc&n2Kqbjv1JVS(uGEn2ULsj|EtWMOcg_Sc+v>julvmRalKRSc`R7 zj}6#}P1uYr*otk~jvd&EUD%C1*o%GGj{`V}LpY2hIErI9juSYEQ#g$?IE!;Qj|;en zOSp_HxQc7IjvKg%TeyuoxQlzZj|X^&M|g}Uc#3Cuju&`|S9py#c#C&4 z_=<1%jvx4mU-*qb2oU8j0TBp+5d=XI48aisArT6p5e8uq4&f025fKTI5d~2Z4bc$; zF%b)~5eIP*5Al%z36ThikpxMR49SrKDUk}Pkp^jz4(X8r8IcK@kp)?i4cULL@)G4AM`~( z^v3`U#2^gDe;9(H7>3~(fsq)6(HMiV7>DtgfQgud$(Vwvn1<Q~(IEVANfQz_<%eaE8xQ6Svft$F6+qi?fxQF|AfQNX5$9RIL zc!uYAftPrN*LZ`sc!&4+fRFfu&-j9`_=fNJfuHz=-}r+7QU4MUfe;u$5EQ`>93c=A zp%5Bj5EkJO9uW``kq{YC5Eao79Wf9Su@D<^5Et$k7>c6=N}?1>qYTQT z9Ll2tDxwl9qYA2`8vaFf)Id$tLT%JRUDQK;G(bZ%LSr;RQ#3*GbiBTAhF&K++7>@~< zh)I}?DVU0Bn2s5kiCLJ9Ihc!in2!Zmh(%b8C0L4OSdJA~iB(vQHCT&vSdR_Zh)vjx zE!c`}*p408iCx%@J=lwV*pCA^h(kDxBRGmTh(~ygCwPiyc#ao%iC1`yH+YM8c#jYGh)?*8FZhaY_>Ld= ziC_4QKL`-*F98t#Th1FPtwOEJs*no}Lgw5E3t=NX`*nyqch27YLz1WBSIDmsVgu^(3qd11+ zIDwNmh0{2Lvp9$IxPXhegv+>stGI^ixPhCvh1$q8N&!1WKY5N}~+Qq8!Sj0xF^s zDx(Ujq8k21b<{vj)Ix34L0!~CeKbHrG(uxEK~pqCbF@H9v_fmNL0hy#dvy5A-zBc2 z+zFl01zph%-O&R*(F?uN2Yt~G{V@OoF$jb4ABJEkhG95HU?fIiG{#^o#$h}rU?L`A zGNxcEreQi}U?yf^Hs)Y1=3zb-U?CP^F_vH{mSH(oU?o;zHP&D))?qz1U?VnRGqzwW zwqZMVU?+BAH}+sJ_F+E`;2;j+Fpl6Tj^Q{?;3Q7rG|u2G&fz>R;36*JGOpk%uHiav z;3jV2Htygq?%_Tj;2|F2F`nQlp5ZxO;3Zz+HQwMY-r+qy;3GcaGrr&}zTrE5;3t0J zH~t_%jK2g#AOuDb1Vu0eM+k&OD1=5Bghe=nM+8JfBt%9OL`5`2M-0S7EW}0}#6>*B zM*<{7A|yrCS*nyWJNY)M-JpfF62fY4JD1)*nhw`X^il~IjsDi4fhJR5VHBb|^P#bkn7xhpd4bTvc z&=^h76wS~aEzlCJ&>C&f7VXd;9ncY-&>3CO72VJsJMZw7yZy5127PSFc|+~ z2!>)9hGPUqViZPW48~#{#$y5|ViG1}3Z`Njreg+XVism&4(4JW=3@aCVi6W&36^3R zmSY80Vii_n4c1~E)?))UViPuF3$|h#wqpl&Vi$H}5B6do_TvB!;t&qw2#(?yj^hMQ z;uKEf49?;l&f@|u;u0?73a;WBuHy!7;udb>4({R}?&AR-;t?L>37+B^p5p~x;uT)w z4c_7%-s1y4;uAjO3%=qTzT*de;un774+6yeOF#rdU<5%>1VeCyKuCl_XoNvnghO~l zKtx1BWJEz!L_>7MKup9!Y{Wra#6x@}Ktd!!VkALQBtvqfKuV-SYNSD0q(gdSKt^Oj zW@JHDWJ7l3Ku+XBZsb8;xV zVK??*FZN+S4&WdT;V_QiD30McPT(X?;WWWJeCfQqPu%BX^>sD^)09W_uBwNM*%P#5)39}UnDjnEiP&=k$k z94*iitvC9|JHDgD@EXVF-p|7=~j6 zMq(63V+_V(9L8e;CSnpMV+y8X8m40gW?~j*V-DtG9_C{K7Ge<=V+odG8J1%OR$>)a zV-40~9oAz5HewStV+*!o8@6Kyc48NHV-NOXANJz_4&o3F;|Px87>?rvPT~|!;|$K? z9M0ncF5(g{;|i|g8m{98ZsHbh;|}iP9`54-9^w%m;|ZSP8J^<>Ug8yA;|<>89p2*u zKH?KT;|spx8@}TQe&QE?;|~JF{!2gvLSO_zPy|D8gg{7yLTH3RScF4(L_kDDLS#fi zR768`#6V2MLTtoAT*O0sBtSwWLSiIAQY1riq(DlfLTaQzTBJjIWI#q_LS|$^R%AnV z1WMLV=d2XsUybVe6+MK^Ru z5A;MY^hO`_ML+b%01U(+490&Lf}t3O;TVCD7=_UogRvNg@tA;#n1sogf~lB>>6n3; zn1$JxgSnW8`B;F3ScJt`f~8o7$riNxP{xegS)tg z`*?tdc!bAzf~R5v{7kP(@X8Cj4O*^nJMkQ2F(8+niy`H&w4P!Rv15DKFRilP{b zqXbH#6iTBE%Ay>~qXH_T5-Ot#s-hbHMRn9bP1Hhd)InX;Lwz(rLo`BTG(l4|Lvyr1 zOSD33v_V_6Lwj^UM|47GbU{~iLwEE*PxL}>^g&zL)i*Xo_37CjUn2afyifNdR8JLM#n2kA@i+Pxj1z3nhSd1lDie*@i63?3if{OiANYx1_>DgZ5ce+u5eR`11VIrD!4U!>5elIZ24N8n;Sm855ebnI1yK5%~$kqMcR1zC{|*^vV| zkqfzz2YHbX`B4A`@ec~2Fp8ikilI14pd?D6G|HeX%Aq_epdu=vGOC~|s^MQ$M-9|O zE!0LG)I~kiM*}oOBQ!=6G(|HsM+>w>E3`%%v_(6#M+bC7Cv-*^bVWCGM-TKwFZ4zq z^hH1P#{dk(APmNT7=ob~hT#~2kr;*17=y7Ghw+$ziI{}Rn1ZR8hUu7rnV5yyn1i{P zhxu55g;<2eSc0WkhUHj+l~{$;hy6H! zgE)l4ID(@%hT}MalQ@ObID@k|hx53Ai@1c#xPq&=hU>V2o4AGBxP!a6hx>Sdhj@g? zc!H;RhUa*Jmw1KOc!Rfihxhn^kNAYo_=2zahVS@+pZJB}_=5oP{t^&@5Ewxa6u}T2 zArKOw5E@|+7U2*c5fBlP5E)Sr710nKF%T265F2q27x54u36KzpkQhmj6v>brDUcGW zkQ!-_7U_^48ITc~kQrH!71@v-Igk^%kQ;fB7x|DM1yB(Gpb!e92#TT@ilYQdq7+J_ z49cP$%A*1*q7o{j3aX+S{zY}vKuy#_ZPYCfiG(&T=KufejYqUXI zv_pGzKu2^!XLLbVbVGOaKu`2SZ}dT5^h19Pz(5SbVEl(67>Z#Sju9A%Q5cOe7>jWj zj|rHFNtlc&n2Kqbjv1JVS(uGEn2ULsj|EtWMOcg_Sc+v>julvmRalKRSc`R7j}6#} zP1uYr*otk~jvd&EUD%C1*o%GGj{`V}LpY2hIErI9juSYEQ#g$?IE!;Qj|;enOSp_H zxQc7IjvKg%TeyuoxQlzZj|X^&M|g}Uc#3Cuju&`|S9py#c#C&4_=<1% zjvx4mU-*qb2oV1-0TBp+5d=XI48aisArT6p5e8uq4&f025fKTI5d~2Z4bc$;F%b)~ z5eIP*5Al%z36ThikpxMR49SrKDUk}Pkp^jz4(X8r8IcK@kp)?i4cUSt$60Oi0ZO|6&&>kJo5uMN(UCcO{6TQ$Ieb5*E z&>sUZ5Q8un|6vG*Vi<;F1V&;MMq>=dVjRX}0w!V-CSwYwVj8An24-RwW@8TKVjkvW z0TyBr7GnvPVi}fW1y*7eR$~p;Vjb3F12$q4He(C6VjH$&2X2Y%uge&Y`UB=}1}1VUg0K~MxkaD+fe zghFV9L0E)Cctk)%L_%alK~zLTbi_bR#6oPuL0rT`d?Y|ZBtl{&K~f|`a-={?q(W+> zL0Y6kdSpOGWI|?SK~`i#cH}@#a zA|_!nreG?jVLE1DCT3wa=3p-7VLldMAr@gVmS8ECVL4V{C01cI)?h8xVLdirBQ{|( zwqPr^VLNtUCw5^s_FymeVLuMwAP(U$j^HSc;W$pMCT`(2?%*!&;XWSVAs*o|p5Q5-;W=L5C0^k*-rz0X;XOX!BR=6XzThjq;X8id zCw}2K{vbfYzXU`e1V#`9MKAMKUBu3Zz6Tq(&N~MLMKM24qAgWJVTbMK)wd4&+2Gf~u&7e^DJZP!qLK8+A|@^-v!T z&=8H#7){U=&Cnbz&=RfC8g0-P?a&?_&=H-`8C}p7-OwF9&=bAT8-36h{m>r+Fc5<< z82@1ihGH0oV+2NG6h>nV#$p`CV*(~(5+-8`reYeVV+Lko7G`4(=3*Y^V*wUo5f)o_0a$g(Fl#v1WnNl&Cvoa(F(2625r#}?a=`p(FvW= z1zph%-O&R*(F?uN2Yt~G{V@OoF$jb4ABJEkhG95HU?fIiG{#^o#$h}rU?L`AGNxcE zreQi}U?yf^Hs)Y1=3zb-U?CP^F_vH{mSH(oU?o;zHP&D))?qz1U?VnRGqzwWwqZMV zU?+BAH}+sJ_F+E`;2;j+Fpl6Tj^Q{?;3Q7rG|u2G&fz>R;36*JGOpk%uHiav;3jV2 zHtygq?%_Tj;2|F2F`nQlp5ZxO;3Zz+HQwMY-r+qy;3GcaGrr&}zTrE5;3t0JH~t_% z;=cq$AOuDb1Vu0eM+k&OD1=5Bghe=nM+8JfBt%9OL`5`2M-0S7EW}0}#6>*BM*<{7 zA|yr$k7>c6=N}?1>qYTQT9Ll2tDxwl9qYA2`8vaFf)Id$tLT%JRUDQK;G(bZ% zLSr;RQ#3*GbiBTAhF&K++7>@~Th(~ygCwPiyc#ao%iC1`y zH+YM8c#jYGh)?*8FZhaY_>Ld=iC_4QKM0WIF98t#Th1FPtwOEJs*no}Lgw5E3t=NX`*nyqc zh27YLz1WBSIDmsVgu^(3qd11+IDwNmh0{2Lvp9$IxPXhegv+>stGI^ixPhCvh1$ zq8N&!1WKY5N}~+Qq8!Sj0xF^sDx(Ujq8k21b<{vj)Ix34L0!~CeKbHrG(uxEK~pqC zbF@H9v_fmNL0hy#dvriYbV6rzL05D`cl1C{^g?g+L0|Mke+VHl1P z7>Q9BjWHODaTt#Yn21T3j47CkX_$@~n2A}KjX9W$d6pfzIEhm@jWall zb2yI+xQI)*j4QZ`Yq*XZxQSc1jXSuDd$^AWc!)=Mj3;=CXLybmc!^hdjW>9UcX*Ev z_=r#Vj4$|#Z}^TM_=#WmjXwyG>@NWk2!RmX^{@;kpUTz37L@vS&1WMLV=d2XsUybVe6+MK^Ru5A;MY z^hO`_ML+b%01U(+490&Lf}t3O;TVCD7=_UogRvNg@tA;#n1sogf~lB>>6n3;n1$Jx zgSnW8`B;F3ScJt`f~8o7$riNxP{xegS)tg`*?td zc!bAzf~R 1: + nn.init.xavier_uniform_(p) + + self.trained_steps = 0 + self.rank = rank + + self.check_required_attributes() + + def forward_enc(self, enc_input, enc_input_num_pads): + + assert (enc_input_num_pads is None or enc_input_num_pads == ([0] * enc_input.size(0))), "End to End case have no padding" + x = self.swin_transf(enc_input) + # --------------- Normale parte di Captioning --------------------------------- + enc_input = self.input_embedder_dropout(self.input_linear(x)) + x = enc_input + enc_input_num_pads = [0] * enc_input.size(0) + + max_num_enc = sum(self.num_exp_enc_list) + pos_x = torch.arange(max_num_enc).unsqueeze(0).expand(enc_input.size(0), max_num_enc).to(self.rank) + pad_mask = create_pad_mask(mask_size=(enc_input.size(0), max_num_enc, enc_input.size(1)), + pad_along_row_input=[0] * enc_input.size(0), + pad_along_column_input=enc_input_num_pads, + rank=self.rank) + + x_list = [] + for i in range(self.N_enc): + x = self.encoders[i](x=x, n_indexes=pos_x, mask=pad_mask) + x_list.append(x) + x_list = torch.cat(x_list, dim=-1) + x = x + self.out_enc_dropout(self.enc_reduce_group(x_list)) + x = self.enc_reduce_norm(x) + + return x + + def forward_dec(self, cross_input, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=False): + assert (enc_input_num_pads is None or enc_input_num_pads == ([0] * cross_input.size(0))), "enc_input_num_pads should be no None" + + enc_input_num_pads = [0] * dec_input.size(0) + no_peak_and_pad_mask = create_no_peak_and_pad_mask( + mask_size=(dec_input.size(0), dec_input.size(1), dec_input.size(1)), + num_pads=dec_input_num_pads, + rank=self.rank) + pad_mask = create_pad_mask(mask_size=(dec_input.size(0), dec_input.size(1), cross_input.size(1)), + pad_along_row_input=dec_input_num_pads, + pad_along_column_input=enc_input_num_pads, + rank=self.rank) + + y = self.out_embedder(dec_input) + pos_x = torch.arange(self.num_exp_dec).unsqueeze(0).expand(dec_input.size(0), self.num_exp_dec).to(self.rank) + pos_y = torch.arange(dec_input.size(1)).unsqueeze(0).expand(dec_input.size(0), dec_input.size(1)).to(self.rank) + y = y + self.pos_encoder(pos_y) + y_list = [] + for i in range(self.N_dec): + y = self.decoders[i](x=y, + n_indexes=pos_x, + cross_connection_x=cross_input, + input_attention_mask=no_peak_and_pad_mask, + cross_attention_mask=pad_mask) + y_list.append(y) + y_list = torch.cat(y_list, dim=-1) + y = y + self.out_dec_dropout(self.dec_reduce_group(y_list)) + y = self.dec_reduce_norm(y) + + y = self.vocab_linear(y) + + if apply_log_softmax: + y = self.log_softmax(y) + + return y + + + def get_batch_multiple_sampled_prediction(self, enc_input, enc_input_num_pads, num_outputs, + sos_idx, eos_idx, max_seq_len): + + bs = enc_input.size(0) + x = self.forward_enc(enc_input=enc_input, enc_input_num_pads=enc_input_num_pads) + enc_seq_len = x.size(1) + x = x.unsqueeze(1).expand(-1, num_outputs, -1, -1).reshape(bs * num_outputs, enc_seq_len, x.shape[-1]) + + upperbound_vector = torch.tensor([max_seq_len] * bs * num_outputs, dtype=torch.int).to(self.rank) + where_is_eos_vector = upperbound_vector.clone() + eos_vector = torch.tensor([eos_idx] * bs * num_outputs, dtype=torch.long).to(self.rank) + finished_flag_vector = torch.zeros(bs * num_outputs).type(torch.int) + + predicted_caption = torch.tensor([sos_idx] * (bs * num_outputs), dtype=torch.long).to(self.rank).unsqueeze(-1) + predicted_caption_prob = torch.zeros(bs * num_outputs).to(self.rank).unsqueeze(-1) + + dec_input_num_pads = [0]*(bs*num_outputs) + time_step = 0 + while (finished_flag_vector.sum() != bs * num_outputs) and time_step < max_seq_len: + dec_input = predicted_caption + log_probs = self.forward_dec(x, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=True) + + prob_dist = torch.distributions.Categorical(torch.exp(log_probs[:, time_step])) + sampled_word_indexes = prob_dist.sample() + + predicted_caption = torch.cat((predicted_caption, sampled_word_indexes.unsqueeze(-1)), dim=-1) + predicted_caption_prob = torch.cat((predicted_caption_prob, + log_probs[:, time_step].gather(index=sampled_word_indexes.unsqueeze(-1), dim=-1)), dim=-1) + time_step += 1 + + where_is_eos_vector = torch.min(where_is_eos_vector, + upperbound_vector.masked_fill(sampled_word_indexes == eos_vector, time_step)) + finished_flag_vector = torch.max(finished_flag_vector, + (sampled_word_indexes == eos_vector).type(torch.IntTensor)) + + res_predicted_caption = [] + for i in range(bs): + res_predicted_caption.append([]) + for j in range(num_outputs): + index = i*num_outputs + j + res_predicted_caption[i].append( + predicted_caption[index, :where_is_eos_vector[index].item()+1].tolist()) + + where_is_eos_vector = where_is_eos_vector.unsqueeze(-1).expand(-1, time_step+1) + arange_tensor = torch.arange(time_step+1).unsqueeze(0).expand(bs * num_outputs, -1).to(self.rank) + predicted_caption_prob.masked_fill_(arange_tensor > where_is_eos_vector, 0.0) + res_predicted_caption_prob = predicted_caption_prob.reshape(bs, num_outputs, -1) + + return res_predicted_caption, res_predicted_caption_prob diff --git a/models/ExpansionNet_v2.py b/models/ExpansionNet_v2.py new file mode 100644 index 0000000..0a9e9b0 --- /dev/null +++ b/models/ExpansionNet_v2.py @@ -0,0 +1,103 @@ +import torch +from models.layers import EmbeddingLayer, EncoderLayer, DecoderLayer +from utils.masking import create_pad_mask, create_no_peak_and_pad_mask +from models.captioning_model import CaptioningModel + +import torch.nn as nn + + +class ExpansionNet_v2(CaptioningModel): + def __init__(self, d_model, N_enc, N_dec, ff, num_heads, num_exp_enc_list, num_exp_dec, + output_word2idx, output_idx2word, max_seq_len, drop_args, img_feature_dim=2048, rank=0): + super().__init__() + self.output_word2idx = output_word2idx + self.output_idx2word = output_idx2word + self.max_seq_len = max_seq_len + + self.num_exp_dec = num_exp_dec + self.num_exp_enc_list = num_exp_enc_list + + self.N_enc = N_enc + self.N_dec = N_dec + self.d_model = d_model + + self.encoders = nn.ModuleList([EncoderLayer(d_model, ff, num_exp_enc_list, drop_args.enc) for _ in range(N_enc)]) + self.decoders = nn.ModuleList([DecoderLayer(d_model, num_heads, ff, num_exp_dec, drop_args.dec) for _ in range(N_dec)]) + + self.input_embedder_dropout = nn.Dropout(drop_args.enc_input) + self.input_linear = torch.nn.Linear(img_feature_dim, d_model) + self.vocab_linear = torch.nn.Linear(d_model, len(output_word2idx)) + self.log_softmax = nn.LogSoftmax(dim=-1) + + self.out_enc_dropout = nn.Dropout(drop_args.other) + self.out_dec_dropout = nn.Dropout(drop_args.other) + + self.out_embedder = EmbeddingLayer(len(output_word2idx), d_model, drop_args.dec_input) + self.pos_encoder = nn.Embedding(max_seq_len, d_model) + + self.enc_reduce_group = nn.Linear(d_model * self.N_enc, d_model) + self.enc_reduce_norm = nn.LayerNorm(d_model) + self.dec_reduce_group = nn.Linear(d_model * self.N_dec, d_model) + self.dec_reduce_norm = nn.LayerNorm(d_model) + + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + self.trained_steps = 0 + self.rank = rank + + def forward_enc(self, enc_input, enc_input_num_pads): + + x = self.input_embedder_dropout(self.input_linear(enc_input)) + + max_num_enc = sum(self.num_exp_enc_list) + pos_x = torch.arange(max_num_enc).unsqueeze(0).expand(enc_input.size(0), max_num_enc).to(self.rank) + pad_mask = create_pad_mask(mask_size=(enc_input.size(0), max_num_enc, enc_input.size(1)), + pad_along_row_input=[0] * enc_input.size(0), + pad_along_column_input=enc_input_num_pads, + rank=self.rank) + + x_list = [] + for i in range(self.N_enc): + x = self.encoders[i](x=x, n_indexes=pos_x, mask=pad_mask) + x_list.append(x) + x_list = torch.cat(x_list, dim=-1) + x = x + self.out_enc_dropout(self.enc_reduce_group(x_list)) + x = self.enc_reduce_norm(x) + return x + + def forward_dec(self, cross_input, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=False): + + no_peak_and_pad_mask = create_no_peak_and_pad_mask( + mask_size=(dec_input.size(0), dec_input.size(1), dec_input.size(1)), + num_pads=dec_input_num_pads, + rank=self.rank) + + pad_mask = create_pad_mask(mask_size=(dec_input.size(0), dec_input.size(1), cross_input.size(1)), + pad_along_row_input=dec_input_num_pads, + pad_along_column_input=enc_input_num_pads, + rank=self.rank) + + y = self.out_embedder(dec_input) + pos_x = torch.arange(self.num_exp_dec).unsqueeze(0).expand(dec_input.size(0), self.num_exp_dec).to(self.rank) + pos_y = torch.arange(dec_input.size(1)).unsqueeze(0).expand(dec_input.size(0), dec_input.size(1)).to(self.rank) + y = y + self.pos_encoder(pos_y) + y_list = [] + for i in range(self.N_dec): + y = self.decoders[i](x=y, + n_indexes=pos_x, + cross_connection_x=cross_input, + input_attention_mask=no_peak_and_pad_mask, + cross_attention_mask=pad_mask) + y_list.append(y) + y_list = torch.cat(y_list, dim=-1) + y = y + self.out_dec_dropout(self.dec_reduce_group(y_list)) + y = self.dec_reduce_norm(y) + + y = self.vocab_linear(y) + + if apply_log_softmax: + y = self.log_softmax(y) + + return y diff --git a/models/captioning_model.py b/models/captioning_model.py new file mode 100644 index 0000000..56345ac --- /dev/null +++ b/models/captioning_model.py @@ -0,0 +1,241 @@ + + +import torch +import torch.nn as nn + + +class CaptioningModel(nn.Module): + def __init__(self): + super(CaptioningModel, self).__init__() + # mandatory attributes + # rank: to enable multiprocessing + self.rank = None + + def check_required_attributes(self): + if self.rank is None: + raise NotImplementedError("Subclass must assign the rank integer according to the GPU group") + + def forward_enc(self, enc_input, enc_input_num_pads): + raise NotImplementedError + + def forward_dec(self, cross_input, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=False): + raise NotImplementedError + + def forward(self, enc_x, dec_x=None, + enc_x_num_pads=[0], dec_x_num_pads=[0], apply_log_softmax=False, + mode='forward', **kwargs): + if mode == 'forward': + x = self.forward_enc(enc_x, enc_x_num_pads) + y = self.forward_dec(x, enc_x_num_pads, dec_x, dec_x_num_pads, apply_log_softmax) + return y + else: + assert ('sos_idx' in kwargs.keys() or 'eos_idx' in kwargs.keys()), \ + 'sos and eos must be provided in case of batch sampling or beam search' + sos_idx = kwargs.get('sos_idx', -999) + eos_idx = kwargs.get('eos_idx', -999) + if mode == 'beam_search': + beam_size_arg = kwargs.get('beam_size', 5) + how_many_outputs_per_beam = kwargs.get('how_many_outputs', 1) + beam_max_seq_len = kwargs.get('beam_max_seq_len', 20) + sample_or_max = kwargs.get('sample_or_max', 'max') + out_classes, out_logprobs = self.beam_search( + enc_x, enc_x_num_pads, + beam_size=beam_size_arg, + sos_idx=sos_idx, + eos_idx=eos_idx, + how_many_outputs=how_many_outputs_per_beam, + max_seq_len=beam_max_seq_len, + sample_or_max=sample_or_max) + return out_classes, out_logprobs + if mode == 'sampling': + how_many_outputs = kwargs.get('how_many_outputs', 1) + sample_max_seq_len = kwargs.get('sample_max_seq_len', 20) + out_classes, out_logprobs = self.get_batch_multiple_sampled_prediction( + enc_x, enc_x_num_pads, num_outputs=how_many_outputs, + sos_idx=sos_idx, eos_idx=eos_idx, + max_seq_len=sample_max_seq_len) + return out_classes, out_logprobs + + def get_batch_multiple_sampled_prediction(self, enc_input, enc_input_num_pads, num_outputs, + sos_idx, eos_idx, max_seq_len): + bs, enc_seq_len, _ = enc_input.shape + + enc_input_num_pads = [enc_input_num_pads[i] for i in range(bs) for _ in range(num_outputs)] + + x = self.forward_enc(enc_input=enc_input, enc_input_num_pads=enc_input_num_pads) + x = x.unsqueeze(1).expand(-1, num_outputs, -1, -1).reshape(bs * num_outputs, enc_seq_len, x.shape[-1]) + + upperbound_vector = torch.tensor([max_seq_len] * bs * num_outputs, dtype=torch.int).to(self.rank) + where_is_eos_vector = upperbound_vector.clone() + eos_vector = torch.tensor([eos_idx] * bs * num_outputs, dtype=torch.long).to(self.rank) + finished_flag_vector = torch.zeros(bs * num_outputs).type(torch.int) + + predicted_caption = torch.tensor([sos_idx] * (bs * num_outputs), dtype=torch.long).to(self.rank).unsqueeze(-1) + predicted_caption_prob = torch.zeros(bs * num_outputs).to(self.rank).unsqueeze(-1) + + dec_input_num_pads = [0]*(bs*num_outputs) + time_step = 0 + while (finished_flag_vector.sum() != bs * num_outputs) and time_step < max_seq_len: + dec_input = predicted_caption + log_probs = self.forward_dec(x, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=True) + + prob_dist = torch.distributions.Categorical(torch.exp(log_probs[:, time_step])) + sampled_word_indexes = prob_dist.sample() + + predicted_caption = torch.cat((predicted_caption, sampled_word_indexes.unsqueeze(-1)), dim=-1) + predicted_caption_prob = torch.cat((predicted_caption_prob, + log_probs[:, time_step].gather(index=sampled_word_indexes.unsqueeze(-1), dim=-1)), dim=-1) + time_step += 1 + + where_is_eos_vector = torch.min(where_is_eos_vector, + upperbound_vector.masked_fill(sampled_word_indexes == eos_vector, time_step)) + finished_flag_vector = torch.max(finished_flag_vector, + (sampled_word_indexes == eos_vector).type(torch.IntTensor)) + + # remove the elements that come after the first eos from the sequence + res_predicted_caption = [] + for i in range(bs): + res_predicted_caption.append([]) + for j in range(num_outputs): + index = i*num_outputs + j + res_predicted_caption[i].append( + predicted_caption[index, :where_is_eos_vector[index].item()+1].tolist()) + + where_is_eos_vector = where_is_eos_vector.unsqueeze(-1).expand(-1, time_step+1) + arange_tensor = torch.arange(time_step+1).unsqueeze(0).expand(bs * num_outputs, -1).to(self.rank) + predicted_caption_prob.masked_fill_(arange_tensor > where_is_eos_vector, 0.0) + res_predicted_caption_prob = predicted_caption_prob.reshape(bs, num_outputs, -1) + + return res_predicted_caption, res_predicted_caption_prob + + def beam_search(self, enc_input, enc_input_num_pads, sos_idx, eos_idx, + beam_size=3, how_many_outputs=1, max_seq_len=20, sample_or_max='max',): + assert (how_many_outputs <= beam_size), "requested output per sequence must be lower than beam width" + assert (sample_or_max == 'max' or sample_or_max == 'sample'), "argument must be chosen between \'max\' and \'sample\'" + bs = enc_input.shape[0] + + cross_enc_output = self.forward_enc(enc_input, enc_input_num_pads) + + # init: ------------------------------------------------------------------ + init_dec_class = torch.tensor([sos_idx] * bs).unsqueeze(1).type(torch.long).to(self.rank) + init_dec_logprob = torch.tensor([0.0] * bs).unsqueeze(1).type(torch.float).to(self.rank) + log_probs = self.forward_dec(cross_input=cross_enc_output, enc_input_num_pads=enc_input_num_pads, + dec_input=init_dec_class, dec_input_num_pads=[0] * bs, + apply_log_softmax=True) + if sample_or_max == 'max': + _, topi = torch.topk(log_probs, k=beam_size, sorted=True) + else: # sample + topi = torch.exp(log_probs[:, 0, :]).multinomial(num_samples=beam_size, replacement=False) + topi = topi.unsqueeze(1) + + init_dec_class = init_dec_class.repeat(1, beam_size) + init_dec_class = init_dec_class.unsqueeze(-1) + top_beam_size_class = topi.transpose(-2, -1) + init_dec_class = torch.cat((init_dec_class, top_beam_size_class), dim=-1) + + init_dec_logprob = init_dec_logprob.repeat(1, beam_size) + init_dec_logprob = init_dec_logprob.unsqueeze(-1) + top_beam_size_logprob = log_probs.gather(dim=-1, index=topi) + top_beam_size_logprob = top_beam_size_logprob.transpose(-2, -1) + init_dec_logprob = torch.cat((init_dec_logprob, top_beam_size_logprob), dim=-1) + + bs, enc_seq_len, d_model = cross_enc_output.shape + cross_enc_output = cross_enc_output.unsqueeze(1) + cross_enc_output = cross_enc_output.expand(-1, beam_size, -1, -1) + cross_enc_output = cross_enc_output.reshape(bs * beam_size, enc_seq_len, d_model).contiguous() + enc_input_num_pads = [enc_input_num_pads[i] for i in range(bs) for _ in range(beam_size)] + + # loop: ----------------------------------------------------------------- + loop_dec_classes = init_dec_class + loop_dec_logprobs = init_dec_logprob + loop_cumul_logprobs = loop_dec_logprobs.sum(dim=-1, keepdims=True) + + loop_num_elem_vector = torch.tensor([2] * (bs * beam_size)).to(self.rank) + + for time_step in range(2, max_seq_len): + loop_dec_classes = loop_dec_classes.reshape(bs * beam_size, time_step).contiguous() + + log_probs = self.forward_dec(cross_input=cross_enc_output, enc_input_num_pads=enc_input_num_pads, + dec_input=loop_dec_classes, + dec_input_num_pads=(time_step-loop_num_elem_vector).tolist(), + apply_log_softmax=True) + if sample_or_max == 'max': + _, topi = torch.topk(log_probs[:, time_step-1, :], k=beam_size, sorted=True) + else: # sample + topi = torch.exp(log_probs[:, time_step-1, :]).multinomial(num_samples=beam_size, + replacement=False) + + top_beam_size_word_classes = topi.reshape(bs, beam_size, beam_size) + + top_beam_size_word_logprobs = log_probs[:, time_step-1, :].gather(dim=-1, index=topi) + top_beam_size_word_logprobs = top_beam_size_word_logprobs.reshape(bs, beam_size, beam_size) + + # each sequence have now its best prediction, but some sequence may have already been terminated with EOS, + # in that case its candidates are simply ignored, and do not sum up in the "loop_dec_logprobs" their value + # are set to zero + there_is_eos_mask = (loop_dec_classes.view(bs, beam_size, time_step) == eos_idx). \ + sum(dim=-1, keepdims=True).type(torch.bool) + + # if we pad with -999 its candidates logprobabilities, also the sequence containing EOS would be + # straightforwardly discarded, instead we want to keep it in the exploration. Therefore we mask with 0.0 + # one arbitrary candidate word probability so the sequence probability is unchanged but it + # can still be discarded when a better candidate sequence is found + top_beam_size_word_logprobs[:, :, 0:1].masked_fill_(there_is_eos_mask, 0.0) + top_beam_size_word_logprobs[:, :, 1:].masked_fill_(there_is_eos_mask, -999.0) + + comparison_logprobs = loop_cumul_logprobs + top_beam_size_word_logprobs + + comparison_logprobs = comparison_logprobs.contiguous().view(bs, beam_size * beam_size) + _, topi = torch.topk(comparison_logprobs, k=beam_size, sorted=True) + which_sequence = topi // beam_size + which_word = topi % beam_size + + loop_dec_classes = loop_dec_classes.view(bs, beam_size, -1) + loop_dec_logprobs = loop_dec_logprobs.view(bs, beam_size, -1) + + bs_idxes = torch.arange(bs).unsqueeze(-1) + new_loop_dec_classes = loop_dec_classes[[bs_idxes, which_sequence]] + new_loop_dec_logprobs = loop_dec_logprobs[[bs_idxes, which_sequence]] + + which_sequence_top_beam_size_word_classes = top_beam_size_word_classes[[bs_idxes, which_sequence]] + which_sequence_top_beam_size_word_logprobs = top_beam_size_word_logprobs[ + [bs_idxes, which_sequence]] + which_word = which_word.unsqueeze(-1) + + lastword_top_beam_size_classes = which_sequence_top_beam_size_word_classes.gather(dim=-1, + index=which_word) + lastword_top_beam_size_logprobs = which_sequence_top_beam_size_word_logprobs.gather(dim=-1, index=which_word) + + new_loop_dec_classes = torch.cat((new_loop_dec_classes, lastword_top_beam_size_classes), dim=-1) + new_loop_dec_logprobs = torch.cat((new_loop_dec_logprobs, lastword_top_beam_size_logprobs), dim=-1) + loop_dec_classes = new_loop_dec_classes + loop_dec_logprobs = new_loop_dec_logprobs + + loop_cumul_logprobs = loop_dec_logprobs.sum(dim=-1, keepdims=True) + + # -----------------------update loop_num_elem_vector ---------------------------- + loop_num_elem_vector = loop_num_elem_vector.view(bs, beam_size)[[bs_idxes, which_sequence]].view(bs * beam_size) + there_was_eos_mask = (loop_dec_classes[:, :, :-1].view(bs, beam_size, time_step) == eos_idx). \ + sum(dim=-1).type(torch.bool).view(bs * beam_size) + loop_num_elem_vector = loop_num_elem_vector + (1 * (1 - there_was_eos_mask.type(torch.int))) + + if (loop_num_elem_vector != time_step + 1).sum() == (bs * beam_size): + break + + # sort out the best result + loop_cumul_logprobs /= loop_num_elem_vector.reshape(bs, beam_size, 1) + _, topi = torch.topk(loop_cumul_logprobs.squeeze(-1), k=beam_size) + res_caption_pred = [[] for _ in range(bs)] + res_caption_logprob = [[] for _ in range(bs)] + for i in range(bs): + for j in range(how_many_outputs): + idx = topi[i, j].item() + res_caption_pred[i].append( + loop_dec_classes[i, idx, :loop_num_elem_vector[i * beam_size + idx]].tolist()) + res_caption_logprob[i].append(loop_dec_logprobs[i, idx, :loop_num_elem_vector[i * beam_size + idx]]) + + flatted_res_caption_logprob = [logprobs for i in range(bs) for logprobs in res_caption_logprob[i]] + flatted_res_caption_logprob = torch.nn.utils.rnn.pad_sequence(flatted_res_caption_logprob, batch_first=True) + res_caption_logprob = flatted_res_caption_logprob.view(bs, how_many_outputs, -1) + + return res_caption_pred, res_caption_logprob diff --git a/models/ensemble_captioning_model.py b/models/ensemble_captioning_model.py new file mode 100644 index 0000000..3498111 --- /dev/null +++ b/models/ensemble_captioning_model.py @@ -0,0 +1,187 @@ + +import torch +import torch.nn as nn +from models.captioning_model import CaptioningModel + + +class EsembleCaptioningModel(CaptioningModel): + def __init__(self, models_list, rank): + super().__init__() + self.num_models = len(models_list) + self.models_list = models_list + self.rank = rank + + self.dummy_linear = nn.Linear(1, 1) + + for model in self.models_list: + model.eval() + + def forward(self, enc_x, dec_x=None, + enc_x_num_pads=[0], dec_x_num_pads=[0], apply_log_softmax=False, + mode='beam_search', **kwargs): + assert (mode == 'beam_search'), "this class supports only beam search." + sos_idx = kwargs.get('sos_idx', -999) + eos_idx = kwargs.get('eos_idx', -999) + if mode == 'beam_search': + beam_size_arg = kwargs.get('beam_size', 5) + how_many_outputs_per_beam = kwargs.get('how_many_outputs', 1) + beam_max_seq_len = kwargs.get('beam_max_seq_len', 20) + sample_or_max = kwargs.get('sample_or_max', 'max') + out_classes, out_logprobs = self.ensemble_beam_search( + enc_x, enc_x_num_pads, + beam_size=beam_size_arg, + sos_idx=sos_idx, + eos_idx=eos_idx, + how_many_outputs=how_many_outputs_per_beam, + max_seq_len=beam_max_seq_len, + sample_or_max=sample_or_max) + return out_classes, out_logprobs + + def forward_enc(self, enc_input, enc_input_num_pads): + x_outputs_list = [] + for i in range(self.num_models): + x_outputs = self.models_list[i].forward_enc(enc_input, enc_input_num_pads) + x_outputs_list.append(x_outputs) + return x_outputs_list + + def forward_dec(self, cross_input_list, enc_input_num_pads, dec_input, dec_input_num_pads, apply_log_softmax=False): + + import torch.nn.functional as F + y_outputs = [] + for i in range(self.num_models): + y_outputs.append( + F.softmax(self.models_list[i].forward_dec( + cross_input_list[i], enc_input_num_pads, + dec_input, dec_input_num_pads, False).unsqueeze(0), dim=-1)) + avg = torch.cat(y_outputs, dim=0).mean(dim=0).log() + + return avg + + # quite unclean coding, to be re-factored in the future... + # since it's a bit similar to the single model case + def ensemble_beam_search(self, enc_input, enc_input_num_pads, sos_idx, eos_idx, + beam_size=3, how_many_outputs=1, max_seq_len=20, sample_or_max='max',): + assert (how_many_outputs <= beam_size), "requested output per sequence must be lower than beam width" + assert (sample_or_max == 'max' or sample_or_max == 'sample'), "argument must be chosen between \'max\' and \'sample\'" + bs = enc_input.shape[0] + + # the cross_dec_input is computed once + cross_enc_output_list = self.forward_enc(enc_input, enc_input_num_pads) + + # init: ------------------------------------------------------------------ + init_dec_class = torch.tensor([sos_idx] * bs).unsqueeze(1).type(torch.long).to(self.rank) + init_dec_logprob = torch.tensor([0.0] * bs).unsqueeze(1).type(torch.float).to(self.rank) + log_probs = self.forward_dec(cross_input_list=cross_enc_output_list, enc_input_num_pads=enc_input_num_pads, + dec_input=init_dec_class, dec_input_num_pads=[0] * bs, + apply_log_softmax=True) + if sample_or_max == 'max': + _, topi = torch.topk(log_probs, k=beam_size, sorted=True) + else: # sample + topi = torch.exp(log_probs[:, 0, :]).multinomial(num_samples=beam_size, replacement=False) + topi = topi.unsqueeze(1) + + init_dec_class = init_dec_class.repeat(1, beam_size) + init_dec_class = init_dec_class.unsqueeze(-1) + top_beam_size_class = topi.transpose(-2, -1) + init_dec_class = torch.cat((init_dec_class, top_beam_size_class), dim=-1) + + init_dec_logprob = init_dec_logprob.repeat(1, beam_size) + init_dec_logprob = init_dec_logprob.unsqueeze(-1) + top_beam_size_logprob = log_probs.gather(dim=-1, index=topi) + top_beam_size_logprob = top_beam_size_logprob.transpose(-2, -1) + init_dec_logprob = torch.cat((init_dec_logprob, top_beam_size_logprob), dim=-1) + + tmp_cross_enc_output_list = [] + for cross_enc_output in cross_enc_output_list: + bs, enc_seq_len, d_model = cross_enc_output.shape + cross_enc_output = cross_enc_output.unsqueeze(1) + cross_enc_output = cross_enc_output.expand(-1, beam_size, -1, -1) + cross_enc_output = cross_enc_output.reshape(bs * beam_size, enc_seq_len, d_model).contiguous() + tmp_cross_enc_output_list.append(cross_enc_output) + cross_enc_output_list = tmp_cross_enc_output_list + enc_input_num_pads = [enc_input_num_pads[i] for i in range(bs) for _ in range(beam_size)] + + loop_dec_classes = init_dec_class + loop_dec_logprobs = init_dec_logprob + loop_cumul_logprobs = loop_dec_logprobs.sum(dim=-1, keepdims=True) + + loop_num_elem_vector = torch.tensor([2] * (bs * beam_size)).to(self.rank) + + for time_step in range(2, max_seq_len): + loop_dec_classes = loop_dec_classes.reshape(bs * beam_size, time_step).contiguous() + + log_probs = self.forward_dec(cross_input_list=cross_enc_output_list, enc_input_num_pads=enc_input_num_pads, + dec_input=loop_dec_classes, + dec_input_num_pads=(time_step-loop_num_elem_vector).tolist(), + apply_log_softmax=True) + if sample_or_max == 'max': + _, topi = torch.topk(log_probs[:, time_step-1, :], k=beam_size, sorted=True) + else: # sample + topi = torch.exp(log_probs[:, time_step-1, :]).multinomial(num_samples=beam_size, + replacement=False) + + top_beam_size_word_classes = topi.reshape(bs, beam_size, beam_size) + + top_beam_size_word_logprobs = log_probs[:, time_step-1, :].gather(dim=-1, index=topi) + top_beam_size_word_logprobs = top_beam_size_word_logprobs.reshape(bs, beam_size, beam_size) + + there_is_eos_mask = (loop_dec_classes.view(bs, beam_size, time_step) == eos_idx). \ + sum(dim=-1, keepdims=True).type(torch.bool) + + top_beam_size_word_logprobs[:, :, 0:1].masked_fill_(there_is_eos_mask, 0.0) + top_beam_size_word_logprobs[:, :, 1:].masked_fill_(there_is_eos_mask, -999.0) + + comparison_logprobs = loop_cumul_logprobs + top_beam_size_word_logprobs + + comparison_logprobs = comparison_logprobs.contiguous().view(bs, beam_size * beam_size) + _, topi = torch.topk(comparison_logprobs, k=beam_size, sorted=True) + which_sequence = topi // beam_size + which_word = topi % beam_size + + loop_dec_classes = loop_dec_classes.view(bs, beam_size, -1) + loop_dec_logprobs = loop_dec_logprobs.view(bs, beam_size, -1) + + bs_idxes = torch.arange(bs).unsqueeze(-1) + new_loop_dec_classes = loop_dec_classes[[bs_idxes, which_sequence]] + new_loop_dec_logprobs = loop_dec_logprobs[[bs_idxes, which_sequence]] + + which_sequence_top_beam_size_word_classes = top_beam_size_word_classes[[bs_idxes, which_sequence]] + which_sequence_top_beam_size_word_logprobs = top_beam_size_word_logprobs[ + [bs_idxes, which_sequence]] + which_word = which_word.unsqueeze(-1) + + lastword_top_beam_size_classes = which_sequence_top_beam_size_word_classes.gather(dim=-1, + index=which_word) + lastword_top_beam_size_logprobs = which_sequence_top_beam_size_word_logprobs.gather(dim=-1, index=which_word) + + new_loop_dec_classes = torch.cat((new_loop_dec_classes, lastword_top_beam_size_classes), dim=-1) + new_loop_dec_logprobs = torch.cat((new_loop_dec_logprobs, lastword_top_beam_size_logprobs), dim=-1) + loop_dec_classes = new_loop_dec_classes + loop_dec_logprobs = new_loop_dec_logprobs + + loop_cumul_logprobs = loop_dec_logprobs.sum(dim=-1, keepdims=True) + + loop_num_elem_vector = loop_num_elem_vector.view(bs, beam_size)[[bs_idxes, which_sequence]].view(bs * beam_size) + there_was_eos_mask = (loop_dec_classes[:, :, :-1].view(bs, beam_size, time_step) == eos_idx). \ + sum(dim=-1).type(torch.bool).view(bs * beam_size) + loop_num_elem_vector = loop_num_elem_vector + (1 * (1 - there_was_eos_mask.type(torch.int))) + + if (loop_num_elem_vector != time_step + 1).sum() == (bs * beam_size): + break + + loop_cumul_logprobs /= loop_num_elem_vector.reshape(bs, beam_size, 1) + _, topi = torch.topk(loop_cumul_logprobs.squeeze(-1), k=beam_size) + res_caption_pred = [[] for _ in range(bs)] + res_caption_logprob = [[] for _ in range(bs)] + for i in range(bs): + for j in range(how_many_outputs): + idx = topi[i, j].item() + res_caption_pred[i].append( + loop_dec_classes[i, idx, :loop_num_elem_vector[i * beam_size + idx]].tolist()) + res_caption_logprob[i].append(loop_dec_logprobs[i, idx, :loop_num_elem_vector[i * beam_size + idx]]) + + flatted_res_caption_logprob = [logprobs for i in range(bs) for logprobs in res_caption_logprob[i]] + flatted_res_caption_logprob = torch.nn.utils.rnn.pad_sequence(flatted_res_caption_logprob, batch_first=True) + res_caption_logprob = flatted_res_caption_logprob.view(bs, how_many_outputs, -1) + + return res_caption_pred, res_caption_logprob diff --git a/models/layers.py b/models/layers.py new file mode 100644 index 0000000..e126a58 --- /dev/null +++ b/models/layers.py @@ -0,0 +1,286 @@ +import torch +import torch.nn as nn +import math + +import numpy as np +import torch.nn.functional as F + +class EmbeddingLayer(nn.Module): + def __init__(self, vocab_size, d_model, dropout_perc): + super(EmbeddingLayer, self).__init__() + self.dropout = nn.Dropout(dropout_perc) + self.embed = nn.Embedding(vocab_size, d_model) + self.d_model = d_model + + def forward(self, x): + return self.dropout(self.embed(x)) * math.sqrt(float(self.d_model)) + + +class PositionalEncoder(nn.Module): + def __init__(self, d_model, max_seq_len, rank=0): + super().__init__() + assert d_model % 2 == 0, "d_model is not even, even number suggested" + self.d_model = d_model + self.pe = torch.zeros(max_seq_len, d_model).to(rank) + for pos in range(max_seq_len): + for i in range(0, d_model, 2): + self.pe.data[pos, i] = math.sin(pos / (10000.0 ** ((2.0 * i) / d_model))) + self.pe.data[pos, i + 1] = math.cos(pos / (10000.0 ** ((2.0 * i) / d_model))) + self.pe.data = self.pe.data.unsqueeze(0) + + def forward(self, x): + seq_len = x.shape[1] + return self.pe.data[0, :seq_len] + + + +class StaticExpansionBlock(nn.Module): + def __init__(self, d_model, num_enc_exp_list, dropout_perc, eps): + super().__init__() + self.d_model = d_model + self.num_enc_exp_list = num_enc_exp_list + + self.query_exp_vectors = nn.Embedding(sum(num_enc_exp_list), d_model) + self.bias_exp_vectors = nn.Embedding(sum(num_enc_exp_list), d_model) + + self.key_embed = nn.Linear(d_model, d_model) + self.class_a_embed = nn.Linear(d_model, d_model) + self.class_b_embed = nn.Linear(d_model, d_model) + + self.selector_embed = nn.Linear(d_model, d_model) + + self.dropout_class_a_fw = nn.Dropout(dropout_perc) + self.dropout_class_b_fw = nn.Dropout(dropout_perc) + + self.dropout_class_a_bw = nn.Dropout(dropout_perc) + self.dropout_class_b_bw = nn.Dropout(dropout_perc) + + self.Z_dropout = nn.Dropout(dropout_perc) + + self.eps = eps + + def forward(self, x, n_indexes, mask): + bs, enc_len, _ = x.shape + + query_exp = self.query_exp_vectors(n_indexes) + bias_exp = self.bias_exp_vectors(n_indexes) + x_key = self.key_embed(x) + + z = torch.matmul(query_exp, x_key.transpose(-1, -2)) / np.sqrt(self.d_model) + z = self.Z_dropout(z) + + class_a_fw = F.relu(z) + class_b_fw = F.relu(-z) + class_a_fw = class_a_fw.masked_fill(mask == 0, 0.0) + class_b_fw = class_b_fw.masked_fill(mask == 0, 0.0) + class_a_fw = class_a_fw / (class_a_fw.sum(dim=-1, keepdim=True) + self.eps) + class_b_fw = class_b_fw / (class_b_fw.sum(dim=-1, keepdim=True) + self.eps) + + class_a = torch.matmul(class_a_fw, self.class_a_embed(x)) + bias_exp + class_b = torch.matmul(class_b_fw, self.class_b_embed(x)) + bias_exp + class_a = self.dropout_class_a_fw(class_a) + class_b = self.dropout_class_b_fw(class_b) + + class_a_bw = F.relu(z.transpose(-2, -1)) + class_b_bw = F.relu(-z.transpose(-2, -1)) + + accum = 0 + class_a_bw_list = [] + class_b_bw_list = [] + for j in range(len(self.num_enc_exp_list)): + from_idx = accum + to_idx = accum + self.num_enc_exp_list[j] + accum += self.num_enc_exp_list[j] + class_a_bw_list.append(class_a_bw[:, :, from_idx:to_idx] / (class_a_bw[:, :, from_idx:to_idx].sum(dim=-1, keepdim=True) + self.eps)) + class_b_bw_list.append(class_b_bw[:, :, from_idx:to_idx] / (class_b_bw[:, :, from_idx:to_idx].sum(dim=-1, keepdim=True) + self.eps)) + class_a_bw = torch.cat(class_a_bw_list, dim=-1) + class_b_bw = torch.cat(class_b_bw_list, dim=-1) + + class_a = torch.matmul(class_a_bw, class_a) / len(self.num_enc_exp_list) + class_b = torch.matmul(class_b_bw, class_b) / len(self.num_enc_exp_list) + class_a = self.dropout_class_a_bw(class_a) + class_b = self.dropout_class_b_bw(class_b) + + selector = torch.sigmoid(self.selector_embed(x)) + x_result = selector * class_a + (1 - selector) * class_b + + return x_result + + +class EncoderLayer(nn.Module): + def __init__(self, d_model, d_ff, num_enc_exp_list, dropout_perc, eps=1e-9): + super().__init__() + self.norm_1 = nn.LayerNorm(d_model) + self.norm_2 = nn.LayerNorm(d_model) + self.dropout_1 = nn.Dropout(dropout_perc) + self.dropout_2 = nn.Dropout(dropout_perc) + + self.stc_exp = StaticExpansionBlock(d_model, num_enc_exp_list, dropout_perc, eps) + self.ff = FeedForward(d_model, d_ff, dropout_perc) + + def forward(self, x, n_indexes, mask): + x2 = self.norm_1(x) + x = x + self.dropout_1(self.stc_exp(x=x2, n_indexes=n_indexes, mask=mask)) + x2 = self.norm_2(x) + x = x + self.dropout_2(self.ff(x2)) + return x + + +class DynamicExpansionBlock(nn.Module): + def __init__(self, d_model, num_exp, dropout_perc, eps): + super().__init__() + self.d_model = d_model + + self.num_exp = num_exp + self.cond_embed = nn.Linear(d_model, d_model) + + self.query_exp_vectors = nn.Embedding(self.num_exp, d_model) + self.bias_exp_vectors = nn.Embedding(self.num_exp, d_model) + + self.key_linear = nn.Linear(d_model, d_model) + self.class_a_embed = nn.Linear(d_model, d_model) + self.class_b_embed = nn.Linear(d_model, d_model) + + self.selector_embed = nn.Linear(d_model, d_model) + + self.dropout_class_a_fw = nn.Dropout(dropout_perc) + self.dropout_class_b_fw = nn.Dropout(dropout_perc) + self.dropout_class_a_bw = nn.Dropout(dropout_perc) + self.dropout_class_b_bw = nn.Dropout(dropout_perc) + + self.Z_dropout = nn.Dropout(dropout_perc) + + self.eps = eps + + def forward(self, x, n_indexes, mask): + bs, dec_len, _ = x.shape + + cond = self.cond_embed(x).view(bs, dec_len, 1, self.d_model) + query_exp = self.query_exp_vectors(n_indexes).unsqueeze(1) + bias_exp = self.bias_exp_vectors(n_indexes).unsqueeze(1) + query_exp = (query_exp + cond).view(bs, dec_len * self.num_exp, self.d_model) + bias_exp = (bias_exp + cond).view(bs, dec_len * self.num_exp, self.d_model) + + x_key = self.key_linear(x) + z = torch.matmul(query_exp, x_key.transpose(-1, -2)) / np.sqrt(self.d_model) + z = self.Z_dropout(z) + + mod_mask_1 = mask.unsqueeze(2).expand(bs, dec_len, self.num_exp, dec_len).contiguous(). \ + view(bs, dec_len * self.num_exp, dec_len) + + class_a_fw = F.relu(z) + class_b_fw = F.relu(-z) + class_a_fw = class_a_fw.masked_fill(mod_mask_1 == 0, 0.0) + class_b_fw = class_b_fw.masked_fill(mod_mask_1 == 0, 0.0) + class_a_fw = class_a_fw / (class_a_fw.sum(dim=-1, keepdim=True) + self.eps) + class_b_fw = class_b_fw / (class_b_fw.sum(dim=-1, keepdim=True) + self.eps) + class_a = torch.matmul(class_a_fw, self.class_a_embed(x)) + class_b = torch.matmul(class_b_fw, self.class_b_embed(x)) + class_a = self.dropout_class_a_fw(class_a) + class_b = self.dropout_class_b_fw(class_b) + + mod_mask_2 = mask.unsqueeze(-1).expand(bs, dec_len, dec_len, self.num_exp).contiguous(). \ + view(bs, dec_len, dec_len * self.num_exp) + + class_a_bw = F.relu(z.transpose(-2, -1)) + class_b_bw = F.relu(-z.transpose(-2, -1)) + class_a_bw = class_a_bw.masked_fill(mod_mask_2 == 0, 0.0) + class_b_bw = class_b_bw.masked_fill(mod_mask_2 == 0, 0.0) + class_a_bw = class_a_bw / (class_a_bw.sum(dim=-1, keepdim=True) + self.eps) + class_b_bw = class_b_bw / (class_b_bw.sum(dim=-1, keepdim=True) + self.eps) + class_a = torch.matmul(class_a_bw, class_a + bias_exp) + class_b = torch.matmul(class_b_bw, class_b + bias_exp) + class_a = self.dropout_class_a_bw(class_a) + class_b = self.dropout_class_b_bw(class_b) + + selector = torch.sigmoid(self.selector_embed(x)) + x_result = selector * class_a + (1 - selector) * class_b + + return x_result + + +class DecoderLayer(nn.Module): + def __init__(self, d_model, num_heads, d_ff, num_exp, dropout_perc, eps=1e-9): + super().__init__() + self.norm_1 = nn.LayerNorm(d_model) + self.norm_2 = nn.LayerNorm(d_model) + self.norm_3 = nn.LayerNorm(d_model) + + self.dropout_1 = nn.Dropout(dropout_perc) + self.dropout_2 = nn.Dropout(dropout_perc) + self.dropout_3 = nn.Dropout(dropout_perc) + + self.mha = MultiHeadAttention(d_model, num_heads, dropout_perc) + self.dyn_exp = DynamicExpansionBlock(d_model, num_exp, dropout_perc, eps) + self.ff = FeedForward(d_model, d_ff, dropout_perc) + + def forward(self, x, n_indexes, cross_connection_x, input_attention_mask, cross_attention_mask): + + # Pre-LayerNorm + x2 = self.norm_1(x) + x = x + self.dropout_1(self.dyn_exp(x=x2, n_indexes=n_indexes, mask=input_attention_mask)) + + x2 = self.norm_2(x) + x = x + self.dropout_2(self.mha(q=x2, k=cross_connection_x, v=cross_connection_x, + mask=cross_attention_mask)) + + x2 = self.norm_3(x) + x = x + self.dropout_3(self.ff(x2)) + return x + + + +class MultiHeadAttention(nn.Module): + def __init__(self, d_model, num_heads, dropout_perc): + super(MultiHeadAttention, self).__init__() + assert d_model % num_heads == 0, "num heads must be multiple of d_model" + + self.d_model = d_model + self.d_k = int(d_model / num_heads) + self.num_heads = num_heads + + self.Wq = nn.Linear(d_model, self.d_k * num_heads) + self.Wk = nn.Linear(d_model, self.d_k * num_heads) + self.Wv = nn.Linear(d_model, self.d_k * num_heads) + + self.out_linear = nn.Linear(d_model, d_model) + + def forward(self, q, k, v, mask=None): + batch_size, q_seq_len, _ = q.shape + k_seq_len = k.size(1) + v_seq_len = v.size(1) + + k_proj = self.Wk(k).view(batch_size, k_seq_len, self.num_heads, self.d_k) + q_proj = self.Wq(q).view(batch_size, q_seq_len, self.num_heads, self.d_k) + v_proj = self.Wv(v).view(batch_size, v_seq_len, self.num_heads, self.d_k) + + k_proj = k_proj.transpose(2, 1) + q_proj = q_proj.transpose(2, 1) + v_proj = v_proj.transpose(2, 1) + + sim_scores = torch.matmul(q_proj, k_proj.transpose(3, 2)) + sim_scores = sim_scores / self.d_k ** 0.5 + + if mask is not None: + mask = mask.unsqueeze(1).repeat(1, self.num_heads, 1, 1) + sim_scores = sim_scores.masked_fill(mask == 0, value=-1e4) + sim_scores = F.softmax(input=sim_scores, dim=-1) + + attention_applied = torch.matmul(sim_scores, v_proj) + attention_applied_concatenated = attention_applied.permute(0, 2, 1, 3).contiguous()\ + .view(batch_size, q_seq_len, self.d_model) + + out = self.out_linear(attention_applied_concatenated) + return out + +class FeedForward(nn.Module): + def __init__(self, d_model, d_ff, dropout_perc): + super(FeedForward, self).__init__() + self.linear_1 = nn.Linear(d_model, d_ff) + self.dropout = nn.Dropout(dropout_perc) + self.linear_2 = nn.Linear(d_ff, d_model) + + def forward(self, x): + x = self.dropout(F.relu(self.linear_1(x))) + x = self.linear_2(x) + return x diff --git a/models/swin_transformer_mod.py b/models/swin_transformer_mod.py new file mode 100644 index 0000000..6b16abc --- /dev/null +++ b/models/swin_transformer_mod.py @@ -0,0 +1,655 @@ +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- + +# --------------------------------- +# All credits due to Ze Liu: https://github.com/microsoft/Swin-Transformer +# and the additional sources: +# https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py +# https://github.com/yukimasano/PASS/blob/main/vision_transformer.py +# --------------------------------- + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + + +class DropPath(nn.Module): + def __init__(self, drop_prob): + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x): + if not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +import collections.abc +def to_2tuple(x): + if isinstance(x, collections.abc.Iterable): + return x + return (x, x) + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +import warnings +import math +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official repo master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + return (1. + math.erf(x / math.sqrt(2.))) / 2. + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + # mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock(dim=dim, input_resolution=input_resolution, + num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + def flops(self): + flops = 0 + for blk in self.blocks: + flops += blk.flops() + if self.downsample is not None: + flops += self.downsample.flops() + return flops + + +class PatchEmbed(nn.Module): + r""" Image to Patch Embedding + + Args: + img_size (int): Image size. Default: 224. + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] + self.img_size = img_size + self.patch_size = patch_size + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C + if self.norm is not None: + x = self.norm(x) + return x + + def flops(self): + Ho, Wo = self.patches_resolution + flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) + if self.norm is not None: + flops += Ho * Wo * self.embed_dim + return flops + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, + embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], + window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False): + super().__init__() + + # self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + patches_resolution = self.patch_embed.patches_resolution + self.patches_resolution = patches_resolution + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), + input_resolution=(patches_resolution[0] // (2 ** i_layer), + patches_resolution[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + self.norm = norm_layer(self.num_features) + # self.avgpool = nn.AdaptiveAvgPool1d(1) + # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def forward_features(self, x): + x = self.patch_embed(x) + if self.ape: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.norm(x) # B L C + # x = self.avgpool(x.transpose(1, 2)) # B C 1 + # x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + #x = self.head(x) + return x + + def flops(self): + flops = 0 + flops += self.patch_embed.flops() + for i, layer in enumerate(self.layers): + flops += layer.flops() + #flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) + #flops += self.num_features * self.num_classes + return flops