From 91a56b34503f622c66c0d85cb7722143fced403f Mon Sep 17 00:00:00 2001 From: wxywb Date: Wed, 8 Jun 2022 20:11:25 +0800 Subject: [PATCH] init the operator Signed-off-by: wxywb --- README.md | 2 +- __init__.py | 19 + __pycache__/__init__.cpython-38.pyc | Bin 0 -> 314 bytes __pycache__/blip.cpython-38.pyc | Bin 0 -> 2719 bytes blip.py | 76 ++ configs/med_config.json | 22 + models/__pycache__/blip.cpython-38.pyc | Bin 0 -> 7054 bytes models/__pycache__/med.cpython-38.pyc | Bin 0 -> 28160 bytes models/__pycache__/vit.cpython-38.pyc | Bin 0 -> 12221 bytes models/blip.py | 240 +++++++ models/med.py | 955 +++++++++++++++++++++++++ models/vit.py | 305 ++++++++ 12 files changed, 1618 insertions(+), 1 deletion(-) create mode 100644 __init__.py create mode 100644 __pycache__/__init__.cpython-38.pyc create mode 100644 __pycache__/blip.cpython-38.pyc create mode 100644 blip.py create mode 100644 configs/med_config.json create mode 100644 models/__pycache__/blip.cpython-38.pyc create mode 100644 models/__pycache__/med.cpython-38.pyc create mode 100644 models/__pycache__/vit.cpython-38.pyc create mode 100644 models/blip.py create mode 100644 models/med.py create mode 100644 models/vit.py diff --git a/README.md b/README.md index cfb1ee8..33f8a32 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ -# blip +# BLIP diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..3a4024d --- /dev/null +++ b/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .blip import Blip + + +def blip(model_name: str, modality: str): + return Blip(model_name, modality) diff --git a/__pycache__/__init__.cpython-38.pyc b/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..602c1217c698bd13765be3c1c40fd776dbc7ece0 GIT binary patch literal 314 zcmYjLv2Fq}47HQAJy21lenD3jvN0iq+MT6hVTtZYBqHSwlmsCVTl*Q6I^Qq!Q?hpI zFLbJNqKGHievTc#w-^liKy0)}HDzs5LA-P}9#$lDLFQTHg(74x5l(QzH%8}h?(^6S zN-g+1D!*g72*w9_w9eV19L3FkZH!#+DtV?ANz1ylja14uHYzpR9+JqgapK e=R@9ApB*F~qVGhmQjrJ6gsktJnLhmgzvq7ora@-_ literal 0 HcmV?d00001 diff --git a/__pycache__/blip.cpython-38.pyc b/__pycache__/blip.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..920f8f17bf8906a4f40e721b3701952803235617 GIT binary patch literal 2719 zcmZuz&5z_p6?fTXx7*!4)3ZCf>;^(62og0g-F!p>q9}uA1!1LRh(dN1IYO4lRqgRm z{zz4MmYMV%pjX<{3NA<-ICNh?`wt-TM}U%RNgE*{4twC16A}{d**%-yN!+S()qAh1 z-|xLwzw({IAh6)smW#u`^epRNI9Y#OnEV);`XhAAVnkRWzHLErJG2Ro9pU6Oq|KNL zH}^s>?}VLZ?g~HehTS{}gJw=dFYGn@^us>9Ju%2P!i{`0+-&q6G0eBZt!C_tEBV#% zDzP|a-ITE4A$h}MJ=VW(u|D_a&YXnXjQqkH4W58^*1?Dv`gW|3;O$GEW=eB8vQ2Ps z@bikxSeMfDFGfEVaSA#omZ{m$Wt8P{%A=$dB`)!&vdp6@6UGA_ykW~g%eYX}QsxR~ zTel0?y(&elv$A-rWZXC>JORz1taVlENF_%+j}!E4{h_Ogx(fqtg@jq5%}D5Q3OMaQ z*$=EKVGg7B`=QHR=H0hKk9C+2?+*7_mv>n(Cv&@v1jHa<1GWL$-UEwTVIShLY3RPl zsmZ@;~h*FtBzdC6k2!}CMVSXQJvIm$+15tO9F``P%KaY=MIdWbIRzsJ9B34j3T&) z&diE>GaLH89vp8hHkX!sk97*0`5+tWt;H3+b$oS3XLM@o?c-~#`_KmI^`!;R+q}oc<7vD+R&tCv2l{B#)8S#OKIe!-+%4zue|)xCwu9=^gn;?z4dr6 zJ^k<>|9)Y|?1A!k5B5L((RV-IOaE~1u^#{JqrLPukH6f#^XHHEMmLSCYRIVZqbMsf z9Yw}br^?U@4u$dJs^SF$k(Sbi5TMyK6CBLEDwe#^VXwtB#=yxfv~e4W=|&gTU|fh- zawM;zwx6jeK8Z6C9|~?f#!s?@OQe*rbF0j&Qt{Axha+$a^a9fT+qdpO3;lP?cX^>o z8FmlgNW}t74AoOOT%Mtdi>cg%1#*ZTdLp-ReYbHZs-<{Dz^Nz^u~Jd=$olX1CU+E< zYI2qd*lRM@<-H@$Cr9<+WHm%klN>TK!O^%nl{aC_7oe%(~7UO@I;BG8x{u3#I zIz6PmSYwuT3>TfD5fmdvFyW8j-GbkL=Dtj5(^PU-+`Ih zfX*U5M81#B9+IcAZmm8SYvi9M)?a}EL<}yV8DVhxkAY_%Qs&&W&h44q#7lk~Y#!N$ zV$sO2H+FdB`~L zys`fb3|#pX1_lL60YE^301$8;b_04NHeoS6fl+yi6DZ^0V*L+sF#kz;xh zkWD4age$cMinhscvOG1NB(CtL3I{D0tyu9gya{ahIy8kGZ@B@S1%UY67eZ_{dIlLD z25e1&Fy0z)%>fr61WZe^fFl7tkp3~n1JNYPUJV26Q|_`hbbVx@#*nXkF_GMZq%C6+x>PE-HBO`Yv6 zKkzA>fo`K4U6ZIb9VBj%9kz50pCU+U5+iK3+sXLi1|Q?KBeAq3ezTe3O0Q2VuEy|{ zv*zp9&}tX0ZaibvRz|Hep(N(N$%pAReBWM$Ew#V6CK-SBRWzQ~MS?XJi-znQC~No| fCTL0EwKmwVBgHEu5+4YlPloXG;WxyY9gzP6sX57z literal 0 HcmV?d00001 diff --git a/blip.py b/blip.py new file mode 100644 index 0000000..ec5c904 --- /dev/null +++ b/blip.py @@ -0,0 +1,76 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from pathlib import Path +from towhee import register +from towhee.operator.base import NNOperator, OperatorFlag +from towhee.types.arg import arg, to_image_color +import torch +import ipdb +from towhee.types.image_utils import from_pil, to_pil +from torchvision import transforms +from torchvision.transforms.functional import InterpolationMode + +@register(output_schema=['vec']) +class Blip(NNOperator): + """ + BLIP multi-modal embedding operator + """ + def __init__(self, model_name: str, modality: str): + super().__init__() + sys.path.append(str(Path(__file__).parent)) + from models.blip import blip_feature_extractor + image_size = 224 + model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base.pth' + self.model = blip_feature_extractor(pretrained=model_url, image_size=image_size, vit='base') + + self._modality = modality + self.device = "cuda" if torch.cuda.is_available() else "cpu" + + self.tfms = transforms.Compose([ + transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), + transforms.ToTensor(), + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ]) + + def __call__(self, data): + ipdb.set_trace() + if self._modality == 'image': + vec = self._inference_from_image(data) + elif self._modality == 'text': + vec = self._inference_from_text(data) + else: + raise ValueError("modality[{}] not implemented.".format(self._modality)) + return vec.detach().cpu().numpy().flatten() + + def _inference_from_text(self, text): + text_feature = self.model(None, text, mode='text', device=self.device)[0,0] + return text_feature + + @arg(1, to_image_color('RGB')) + def _inference_from_image(self, img): + #img = to_pil(img) + #image = self.tfms(img).unsqueeze(0).to(self.device) + #image_features = self.model.encode_image(image) + img = self._preprocess(img) + caption = '' + image_feature = self.model(img, caption, mode='image', device=self.device)[0,0] + return image_feature + + def _preprocess(self, img): + img = to_pil(img) + processed_img = self.tfms(img).unsqueeze(0).to(self.device) + return processed_img + diff --git a/configs/med_config.json b/configs/med_config.json new file mode 100644 index 0000000..d9031d2 --- /dev/null +++ b/configs/med_config.json @@ -0,0 +1,22 @@ +{ + "architectures": [ + "BertModel" + ], + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 768, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-12, + "max_position_embeddings": 512, + "model_type": "bert", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 0, + "type_vocab_size": 2, + "vocab_size": 30524, + "encoder_width": 768, + "add_cross_attention": true +} + diff --git a/models/__pycache__/blip.cpython-38.pyc b/models/__pycache__/blip.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c413bad2f0bb0634584e1efd1037fc50414fb845 GIT binary patch literal 7054 zcmb_hNsQ#ydFEr0EEbDZy-d%lkwjTzYf8~{+vCKrC$h(y&6YFjD4uZ^S`x#q$F62I zmzwujvvgQM;2yznf(Zg2WF*j`=cr2#0dfeCV~|6T0J-E4UJ@V&r-2|yE?LGwl<$A6 zrF)F{6gK$R@6U*nAH3G-RR z4s_ml${5ucNh39UBQv8>#*7NrM#qnPEuIHmiA|C7%-o<{;yZrgfm*4PYm_%|r7yW3xpQmqlan@g_zHcRo zr}|1yL^2ZhBd+T2ynEx+2c-wIjfvH#QL|`<^J(1^yhp{_FF_Kp{Hu4cz zD9`w#xl;cyCTeJw`nA* zlW|r=Vw5Lg5e1`M22r{d@m`~Hu0^7_o@d+fplVV1ot#HWwM50+?|d@~xvJ=~szX(G z^W7+m_acD`JAqUG+(^NqF0T(| zo*n)JCajDt^tmM%%6$uD_Sh&8>jqxQK?-eOJ77;CzlB~j3S*xQ&7rx^CRS;2c3La( zPFP`;jZ!a-(%d)R(@QOAPMU?iuc6c`oP&1Bp0Wumnv97NH=q5z z<4vEp#6Vt|e^KRP1Kvehh~5=%gj4l!c}iB&_+c>?k(Y0KQP#%=iAy|Ezg|k$^_ukR z?TKD8;uK3)q7Nbto!{KYEv&nF;G2-{e2}_Ua=6Z z`M76n;}(R^s#oH#gq%at@+8g%K@vWS#Aa`)GUa##akPUV&f+2nss^|S0yU~5F+rUL zIfAZLPG3YMuUJjh1?O>?1k?RjT{XU#iDT887X@8z)!i0(8jv)oS*y-;VDKR3#f~6> zs9e(0*^E8AGGvr&SI*)xDra$SRfCq%0SN|zBzt$HR#m7{y~C=>V?j4px%ENS+&*Dd zD+u~YC}j|QqCNY{#&-ZRa$_$}l6Y@py~rQzMA631cxz)?+)ConMjFQ1hB|(^K^5x& zw3Ar(S-f%?MC&+gnYnC@+00=U+gqLM#`^4l`!g-0{&dbwyw~x{zXT~8ob6h2xzM=2 z&%Vb>ZOBRpE;pX&2M~6Mw240Et}d>W+Cf7JIn?3~jkHkv0Y*S2h9+LIqXXy3kbZdPR**1GZ0^-p`EJxmV^Ki-Tf+MTGNE6f2 zMF->zUP%mXa)au>pL9>Fwx6xI=c2pP0Hqrm}$0yWU#m zs@>kG4S?F_;cr8xj%@Q;C-g>;!KuyYlg`hcpK1P{GjR==I768@v#<_e+U6$CE?O{g z>3xApB+v@S#fwyWi3n|C3fWWR2SWjn2MjloC=+i|d#A1mP%(g8V%M_!3XT5^kynXa zAo5uvze40SBI-Wp@~9R?jWy189eswet`3|%dGrPrP)i=g>}jDuN3Edtk5Rzkz>~lM zHVQV>%f>-Nfgn>L$c6^j0fJ`9h8Fr5IL2lT#M~rj=!rRRqa$t(Z9rse3X!&gNPHhS zRIA$CyE<<@)h8AQ;733%pHH7&%5g7C&J%S1f5C}MS z@k$a~4G4BYKd+zC&#+Va9JQGK6RT;Ws;fx#iUYucoZnpW9X5~&ZJ`537Fur+L#?o`s4lCjyB#=&VD z#(v_Ky7(KC!eM7yD_!!YxeeJPnQcK5i7s)6w?A%`&X4rpW0Owy`tkmd)+2q(CU-w> z&iW3tNw@6W(B5C4EU8}0wVEHtmO>cbHOnvYITZA(yGrna%r zO5Y`iDhYPsCk$i_-3s%YwLZ$qo_Z%z+@j%1sgL^^B1Xga@XBw2Xw1HlH7vwBtc%xLQWWwi|WARJYigLS%(Kx6weiB3D6P$3lZ{GUy(nSFv6oASktc{XKo6!*;=P4I9uOwyVqa z!a&G?oXaQrX01DB&0wjH{dXb+NjW4;U3=xGv6K`I?naNKxB^a}7?m@EDhNb65wEQJ zYAaJ+bk#`Z;K(Lvf0avtP%s7JKcW>oAh<}lgI%`D#6x`je{WBWFeywId3xd=5fWn| zh|o5KB(k^;wbD~+rQ6UkcwqtJ5fS#`aQ_LqKELKis3T>bKXg5GEeg`fRu)K55Cl&M z+;vJV#Q0+Mm%+^en|%)vmz_m@02czF*4*4%*+QsriIO3g$U@7A`>nb4+%vy>jzAUG%YsZdcyn~Q zG!U@b6Dzxjkrb?Mapy?SHu_Ufcl6$@C!Ae#oAQjEO<0Cy@T|;)JcPYhDQoi6Ats$X z<@i)5H6SK^JEWn9WVq|2wJSSL2P#G5JIEpQcT~A*sfa3oNkaS>jgVVVkiUb>9O6Xe zAE)Vvxw1jSkU2qiVw#iaazSc6tzq(*q>g-W|S;rX@ozo(!!L> zOmHdoi99Cq8W9y|K6h*6!v8s5c>#pNH*$6Wni~F?^*^?8{M#}FjX0*OIt|2o9j`*L z0vC!z94>B1kVEm0j$^M3N|;urN}FQgZS;pFL8>vO{O1x{}K6CXSHFc?o*d{8$#88RlD~Qcy+;v!RtqqP%Lp| zFi+vI2~n@Jd!MXN#gPl zhm$3-SBcD6BHwsFOkytX;{^B49M9&h&ok0{=Z?1o>7otXEMyB2*Y2Gr1LGZGMu6mf zR`!BjlByBg3H*bGUQLGFI-l$4K0yQJp}c+pqm1M`d83Zj`|xL**|& zzak_tcfwIr)mH9Ee{&=#x7t*D6h9#L?fxPGK0V0VB7Q`z4KNT7i1%~4L`LTC@T$}} ziDiv|%SeCN2t}1{eQHC;T(~W^-o>vnOaRmz!)8B?I_psb%qvwh2zcH{=EH^yl>sLt z(xh<$OqhN9jxxP+9Z)73v@klEc`Y)dbyQ9%j9EawNCitl)}i(zDgLGCu-HlBEtTN) zPpEv;dX}wk!w}WRrgDyy*)8!y8sJ3%S8zl6I=*Qbj^qEy{*Wxb>FH??6V z*G8FtLH1M`3xzPXL@2YpIAv8*cG`Xuvmy!fH8Q%AJSn4zbSkBvn2UhT#oucEtFL}} P^#iBrG;P~iv5o%)bkPNT literal 0 HcmV?d00001 diff --git a/models/__pycache__/med.cpython-38.pyc b/models/__pycache__/med.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e177a3b1485afe9c520c677bc21948e9e5a4ce06 GIT binary patch literal 28160 zcmeHwd2l4xdEa!;Js1pt!Qj|~Wb?)>vDoEdl3I}~E|*-2)Gowmmy|pbH5g1cfC1(J zUpID_a6{Kp;z|`N%%#MRECI6Pgh`}aPGY+p$CadVxDrQksuZV_@;{NSFm@c3966Mf znECy_*F6W06{XT&Db4P@>3;qCo!|TJ@8P-O;am)#?CCEqp8C01?7#A+|C7MYWB9ry zGZwRBm6&bVX49w`@@rO1`HffN_>DIctz;!>uuP(vYNadbR;H4Xd$XCXWRah2W?Q*R zt~FE{YUL~W)^KGwd|r^}DLjwz(#?_9Xk|2fKGqsGV@9l1tc*)erdeuDR3_v)+nj7o zRi@-R*PL!0svK$^t{iS1sT^q?tsHILRJo~jbLHmNEtOkZR>f-FTDeuq4mEFU-Cnu9 zbw}ln)}56*TX$9NlKg!0?$%6YMy`jO<<_yvF(c;8Y~SO|?iiKhPsQwlJ@RtQ9&v_O z=dQ&oCy+C0k0EC)%sGjiqCJkB@i6CJITYqR zfSkkj5#$^RbAAFjN9~)Cb5oe}AaZWDZ$Zv2Va`LyvFuxsb8DFMFmi6QZ%5AUVa_{{ zbBBE=a_$UsP9f(m`)=gi?c9!0y%YB{b{Y3&=bgBJ1oy}6dvJe`+&^lY7h~nwTEmFN za@H~HY-jx?)mUEfteJY*y6@zD_sv>vt?9T+9aVSc>YdiC)o9n}SoTb_X-QevQjY7W z%Z@Gi7oUCN1IN!d>Q38rjz4WXZLhJ^aMUU5@rzF!zyJ8zW^KcDSi!lDvX(YbOH&uN zJm;#1_8iAL|Mb}>=Py1v=Uw%zrAE_1qn5YgprQ4Sr8*tY!n-S;x9*-gaiX)1raBw! zU`H*V2y1gs>~8Pzm#k+t+U;7~I^SUXkJntswmNNVWn+1{(OzDv)g7zeu^np>Pgo*q zVY%V0Y%Fs4C;G~s@KmkMnQ1x7J+avAES|Xhz}&q*F?aHWtLlSKTd1~)LXPrsZ*X_c zmyIB6*SuQ2S##a88D!qO?ln5?S~Eyq+E{NoGc;#$R=8*fc-j8uTTXVfL zZCk22<9S@JPTLJ;_bb}zG%-*6l)m*W7PZzqe?d3JdUsb?xKMM~96Kr!9J;X4^cooG z*_93k*Vn7y$UFeyB6{Mq>&~;vu^V--qjc55(Lc4}xa{MZwtK}<=uiKv(-)l=H~1RB z%yk9u!uKXFb=I8r?rhBR2wJ$LYK^vIOHIL{+WLC)rD}a;qrHaet6ryyMZQu~b^u^( z+tqco(RON!O{coz)NHp}Lmu)m3O6Xq!z$*$ za-D$C{~@a8$32F`i2+5&h@LC)S7J**+IIZqOeJAw?3A5;*{CF6i6vr{6gF@+D6rNi zTZ@iuqs#8*JJEvcsg1fP`^M=$v6fV)WnJkg+d{Xk^^V)%WLmuvb9K;(?uXYYeB=1K z*O7R!Yq4!(2YB2wSL431n!q=SZ^|}ajr*}`+B3JApJD!viJUA-=E71#w)raaawwHY z-tdlryMiCv9rhNJL_(cj|PQI?5sC{+hT9Lg-7w&-@68=h0143h4~x}$Hk? zs#V!(LAE=&LGE&=UR$iXjZG&QT4~roar!2Y&H!2fW@tGW;UrhP4Y|Qot9G@zw+C)8 zzF%hcJYi=Z*d-_k7**T2u5y6sC$QN&8(uITwr=Zt0A^<~NQZa9h;o)2uIH%g;>OYv zhLprEs;>m88t~S#6QrE0>wtkEz3g~3&r|9)OjMAn3v9k_1_{?`E~$r*iJ8q-tAeSj z)z@R+efY$4K=|&7%|^4?*gP@kb*`*94she*iSTl<*;qf(!lpXW(wMIW>HykuCu(Dj z?kJL&ku#=^VxnXe;)jibxj8im#dFaT%qy;8J;`^%PZqztKZ~#XIV6CR*jiQr1;Z+(?nwVF&qIb$-ayyc8 z8WRi9QMWLM8(rpI&GPCvlY5brv+5oA39^kgP&WZt$_K-8?`p7egUNMZ@9LWKQuT7J z3A|WsI_+g|B^c7Lxhpl$i=6Ac7(K}K;8C~u2wRloQMQ*3G2K(Rac@Qvi)W0SIcjFi zjByy>oS86lST|hZ-{$156$@X-u;=xnVYt{y?OMyJR)bu%3UJr};;aVwYW2m9S~Gl- zu2$_%9h;2+qMk$&42KX`cbZK+=ymiIi?Fxq2$Bczbqh!`M#9XG;ewyH z=yu7Rj9oK6VcM(UT*{M@|NSiYKd?QVe%lFSk7;C1R^tLC1ODR9^~nhfz-F z7WU+fwC~vSZd3}$IDBseWkyl|m>}|^pleKDh^}L+B|Eidx^JR<$xg4CuNmqvN>31t z=9=XGthvdjlj;@eZg?GdY$}Z+TB|!P~JgL0&n{n%B7Oa9ah%{yPAyqh5N0 zBvJPBx$+@(2X6#uiPqE%s~AUP)owGINeI#_HJ4a6$UV=k`lM3e?Bb0!u=rq6O-qJr zfO>XNZZ!=`ofobmNJ>9~VemOXuBv*aVPnt7+25q#0d)f1s2uIpOTyq*`^ki08TBCh zOti1=VR9FeplE|=)WAaZXjf2hYnYI3eXqv_I|*$FiVe3~CllIxu4IdLv5L&kBI9^SL5x@2Jwqr#Or%XTOc z%bo&yzXU5_uJKFRkIsde=r7m3^f$(LoR{^$D`r)mH!CqWdku|^Ul3}+UJcr ziB|OCHNVn5q9Fh~x~8hQR0~L6H*|k`cuu-f1aaoX_VwiBC>{0Wj=eqE`@-9uC+Q4k zYPh@5)mCjiny4lAba(HfVe1{m;xFI0cY9xW`_tsG2lairF&2)w*Na#BdU5-KbJQz8 zFhy~^%q5bw;ug_7MZ`&9<$z{a!7EIWS0Gw8KO5TuLf%f0NwXa#?4{2)}Uzvo@0Pw-sU?PO+Ns!gr%KBOL z2y!tQV3WwM8(YBJavu-xhy3K0*}m+jggN_j-O?yKP+H#)c?pOT5F`%8{M@q1$$Zt^ z%7wWJLOrgf< z7sr4es#>wy~?|=51@`8Lm!Lw5PDhMiEoYj!~QtHY`FbEfA0p$l;n?2T`i@aDvhafJ6|eY0rgYbZtV9rh+yr!bcItA;;(&Dfgsv#*&7tGGI? zf42|$lRJhH^C!0tBln10$F`3$cWX)*WxjEjkLyxwYf5H#${)h`r~T>eo1ic>u_1>t85n)cJE@3KGbPcE75gg?DxY#sIw`za}h zD{QQS<_Tm^0bj-KMoTl;w*p|__!EeF>H@A5xjdjRg0bR;0FLGLW?^xqY0oy5ef%`8 z6z^5vy=w41OTC}(wV3=QlM0jjkp%I2%?nbkng=c;$aXiArlhj3$(06)W~aR@4XahY z`#~low1Ejpj6H!2)@ZvMtssHQRv;L>s60Ujn&77|c9uLyjp{>ubD1v;cZIr@PG?P# zs%lcc#-}+5Z(d`0qqE_v=a5l8qQ!{eNKA#G2PFkbVHfwgYe7~RjTRI%!9>JV48AcL zW$${kFercoV-QOzuyl*Lm9bXis$*;3Nh?Iuhtai4mST%Ywv{2_TA~}_Zo-?T`4%$b>Z$(%IuAO=VA48pdV2YZ!64%w_cO0mo+m@ecca&ZWEX5K78G|!nO z2z#4{_7adt&Pzego1Z^d9#ZGf4fPb0cQX-|SfJD9En!!!Fp>TAB(I)f(qkxkV%>Z2 zpoXt|7)i#+AI-$We;M#l(Z4qp%hmB6_>j8tW{QP* z%~77k*S!k~XqKk-r0!sCfbBJ%H%K?+&Is98NCVgLhVBgvxY=NT#ETt54>i7rmot5ULa5iG0!I?0G=d1?uH!-VN!7HiMNahsOYQ?Be}w$*jOWQB ziW;Ze{T_%A_@fWIpTzepzV6>c;sHRe!HNit*eJW|0Zc&9sTF2Q6-4q{;zQ<^v1NKG z$xG|OuA0yi>zqtCCllsmyE$3O$${SBj9ciwY9lEZf;e<$YKj@uxb--4^%`%dnFu4$;{iU)JOO#q*dV#lh6X+2 z*ps4F2$C=eI9GSUdoZM}15)HWcvq{~fQb_nhaq&NVu`mhNr=GsHyJN+{|ff)txaDI z^8*a~5bC2=RMv-RRc~P*+Dzy-!B#^nnTE!a+c_gFHuURc;+J?2#*hpdSh1W>ZC1Q* zrXmTD;|9)>Y2w~iA7S!QCLd$+aVDQ&at%o(?*gM%L;4l;QF5K7KFN|j{NF_gLs%l0 z^B&8|w1}pF+5+=CiR>sQW^-a-x6MVw$T$a)OVgwDzNq3avc((Yi&s(PTll&~BpIWS z%1m@AjuCrP@p&EJfloZV`$1^VD97+LcmS9W?FQJgAA;&=^GZrIfEh1)%~TJ1G_+*M zSyzYJq&ryf>QE1p6WD9F`4;TkFgB(kY z54?$I)ur}<6pETpQP5}#1un$YFy22I`=cPDBhK|%d<8=GrBV@H0@%tX{X9y)9%_38 zZ1;h=S$0Z@0MH>2;`Hv-iGWP~Y1GySwLPxoFX6%MtR|#18O59*39N|C@Q3D2A}BFe zsVSzZ4%=UN6PoI>xX%fS)KKyYI$ho$O8zp3KEp)jm84$%AiT-_oSh8b>hdPzoMt_+ z-ik-Ni3NH6!}2BS(|B2ZhRHrF{*M062-{KY>r9AJ4qojqpxXQTRy(DbvuCx_$mB;f zpAla%U~tHaslfnWp-l}vt8OmHvPWVE+`~|CQaCy80+Jp>(Pu~iOJS4pC1^$v#sZcv z+|!O_u4dG`nXj)yn=DKPVaXv6wpn>+Sm-9GfrWP9c0h5XT|rUHbR~4DPbIB> z4h`+c_?+Q_mGDsOpfeDbsEell8GByVUuL@ti_nRYq(+}nTo8WG;_Lnjl2ETz;;%L{ zwn0ciRH57@X9v=o_OqDYB+05Bhc;;lzu+XrBnh33okZR+2r#T9>a)IR*4zcpu+w7W z!BY8Pe?k%t2zfQNRqzX8O?g~}b%Ai_J+eWa^d3s+>E#?$25EXYvcJWmyN35S*a(Lg zkR6AJ?yceMk#V2FcxQrp@F=9=tC5?{f?WCVJ6_&OOd(~EDTB8ZS; zUG|dKu`RC|Pryto1UC(%F3h!|WHMj}iK-rFZ@Dp+QQ-@PEyIhUYU7&uXU!L3Vvx1K z-r{KCLuSsM42%Y}oJ7icoyb)Oxks9?>A`(%{q(0eYXp!~%#3Z$^bM_x^yVIE!oj8K zp2kJ=+BrBR+`&rkWFplQ5eB&nV$VO1azRqNm5kB)8Da^&`GchV29uHT4g1=uudR20 z%Hgfl$0`0D)-Hko+quTRM*Kp`u#C;pe*K+4cS-$q6tCxzM4t&E~F7}qmI*QJ!C|0%?z-#swf%BSflk zF>$N|RpY6&Qw+(&%aFaoyib$jCZT=@Bp^+zU+~h~kiMu^x8Y;*O6(P5i3;=N%bBfl zFS`oq7xM48KL%>d-CGn{Zpco(n%*k4r(ss;=3X}d8hn!3D)}(BqmKrZ=?2vKBe;i9 zuw*%?xs}8>2_jsUM-6vE15-L`|%;c{j!SBz*s6@Hoh;iiunqw&rAe()Zl zjlaqEICnjNrhV7KMG;Zl0$&xm0D<~frmt|}^k%43%S$5iR zP*bOWi=8KJNtk9j^QJauhUhqw%6c=_eGO{eG5COb5h{WJ>p8*5ARFMSEjFL)vMX*d zx{JoU;C2Py{X9ylzt7|snfwDL{}4%#fdL4vg6-uXMK@14+9lvD+SaC`*H2R%-lWfN)glVRc%N6n8hOpH?(Q6{*7-)XO|@{Z=T~>R%UKNcJ?i&a&czH8q6%suZD3;1 zF7m4}@v;P$1kN=Mql56RWtdOU7%jWk+2w!3>yUNjcCHkc8VVaYh=%(Z z!hJwT{WRKFKf~m&Fd^?9jEb!sa|!=AttIR!ksf9#a?w31Al&93b5e!1!wG65qP@|7hTB+JlUX$Yrwn(#lC>qzhl>=yFbgTT~ZKH+FM-+68HfG ziR;lpz5Ko)^_!^k+kGIFRm}O3fRr%O=PwL)Z4WFZ(Hy7DJ&&XxmO|waY*yO!pU^{| zO9B8g@BxXuPT>+Qq7IEQ{AQ~8P@#0LYy^qLM$Ikv0}=-qdQ=cf#0{bkC*94*>5wJ` z*=OO&-g2N03TZ98synXw_bAjSQ*^mBfu=OTpsRh(B__>MeDfymuicaJHh_q@e_tT# z(?I_gJM$ly90WBC%j?R{fEg`2?^>FO(K#6Zd=+h^SXGFBh81&uB)~Gl`FU@<`O<~+ z7l?fJzzLewaKahc4^FyVAMo@YD5J$lc91493Ge}8a}Qj#BhkP}T(ok&uRr@Q6#bR-mW_ADJ|`7wM4v==s_gPa5$)lE35$CnMbsB1elG>VXY(t+?f*yo@a z-4LhEPvtCp&a^GWy#OU6N=Sk`EU#m&AS8qmn!_+aifxDwsv*ilsK3Mv)!=X40tbs{ z*m@E*hap;a=SsWTfp;xLH-;8P*O)>y!M3GQ>gAd)A_egQ0sv%5WWyeqA{fikkw>Jl z*E3d4eB(>7m#bsIIKVv&Is#Tn!=iz(A|uwmD=bM{*s~~;TTN~cF<42L-_F}$BM?*+ z)`wDxrR)SkN8~ChZ_|M@xjAh;9Zi5Ee&o@-ETdQQf!Za}a%ydw!|ZS9yl=j19#~BX zV2?81g=axZzaJzR`WK`b(gVjN5ilySzlC!RpKj5mXmUe*79@$=szDObkj-jjko_e* z*+qyDHo8savK0MLF*oq$JTyMLv~y3hxx+{*c^xFA zLxL(}jF)6&htos|lC#vf8r(wfd6%Jcjb#kdsbT{WAgZkf!t$W=`8wX>VmgwP15&19{+=x{f=qtI3o26RaZA|aqIY97NA}=o4kzPeo!p_AGvwr*j6L-E zq@A~iUrtnp5$8K*7oi9&h$3(Vz%ZfBF&8+@+N0>9fH0#FWaL&Ifu)Pi3LNFFnu{G^ zIa+0C&4{+61N@2D*5*=AH(qAJD+q6q7p#!1F~ZimveKxpph_2&X%#v`7o@@6`gN#Q zU^_t|>!RabaiD}}oa|sbp$>i)`uS$FbA=m}g=z=}YryUdsnJ2>Ze2AHKgiY>&P1Lp z(A-F?kPxkxIvZ9S#vSXCC@!|9t~M^uG3XX}vB)^5yN^PO7Dy0}u?bR31d##+My32)VK^Gt26vuHi%Zmt1Z8s4n5dcFz7 zYdzCxJMOG?reR}J=jW`aIxS~&);hnjR&zJ4_tY9NB?!iO8qsJq>)B4T<8G|2G};=; z)YS4u3wnGha;meqdTL=GW77iW46%|OOP3I})jPS0`aWFCL6KYZT9`>J_0@IEBc?`H zUivP2BUZ?%4oQFo?L&zAF4f~2bS33=^mU)WjV7Y>T_NfMGY(yH$aD?Z^VEGb{$CTl zCQ8w}1MaoZB}QtNa;}fyiXMZG-HL69cp9X`p^3N>q(n?ow6gV(NJt(9O9f+`JeC3& zY3IfLnD!^RgEiBh$L^)&qG)Ohn{1#S$p(Mh{qUOKv-pZA2HXzCY)GtO!p>)+K?8TCO4m)w;DW@roWhp=@6%NSodM z=~925rFwKk<~^?mpuONe!1?CH#ge0;qgx1peV$YZ$M^^tKr9m@Pc^sgtha@nn7PKEIgyGTpjLW!}-?fO2G|b=LhhnlRhN`5rHljxwRCvtwo2f>}cuwOqekw0KyFt;&@(*UK?s9<2Y`t*rYQSEx1 zI`g3V_0tr@RoRmRotZV8P zmld0nrwhG#lz@-WAHtCRuPp3s;CA8y9Y1m_GoZ4lU2V9VXWp+6+7V78g&(~N2u84G z^xXc`R+(p%3^v^NP`${Se+P+{B(DFnUx#aDhne>?c>oC!&pPZwiXxDX7fDL(O7Ure ze35#e6ht2oK@vG<4&pNPO%|Do=B^i;A(Ij07HeL8Mf>{^n^n^E*}+gMLwXNMF@#tM zXAk3n1e5Q^xkpG)4``xB6#O8nj*aCF%P1tEnn$$3T%1t`B_o081oKbKg!#Qx=0WqY z36}@>!X_9RI)wOyFx;|7whw4!{+zY~kFufga|?cl@r~o_egxfsebj)cm-mO-DOlq{ zGU1Q|Ci=lLUKZ;DVGogpez+1_*T%>vRe>VrbgeGN3)Pv@^q!Tx8(b zlIf2N&Nr{e5IAo7qp&^~b`l6IgNje;`zpM{;GX0S3;kL_K5Ay4J`~@Y+?v{&Zo`7! zfA3HFQwYC1g!eMt_qIhsV_ZF`9IeV*hud?YYPwJVT>5k<>`MlHDfkTKJKWCoT_(Rcn^10C#thCAt&ICO?U2(pwr=)^Uo+iB|0e%t%-2yH(4xJkVHYnk z*FTC7_oJ(m_)g(FZ5LjpI%cHy0x(J!_w6fHSc8c69K>cXq$)(H8og+=Hq+a>JU1!S zUFkC*X2alk+1TZUS!I0FqM=O?HWQp?bopTm3 ze9&yV5Q1PPZz1pzO9{o7s2XGi!cAy}8!F_udccEI!|H49oOM=|M;aCYZtgnuEF0^3 zCty&$+2jg5y%bK#uBly+bt6E{3asD?4wL!nQG_zt&3YR}rv5H3voHXY zGu{f~%uz4E!|t^2_Bh|76m2LfHar=*9(6ce?G=jgh-@g^9bC~37UkaZV4I9Pz#8!# zYM>74=MC%JwYud#z7O-G-IOAjT!dcZMyL`KzgJB(d(dJK5(egjyGqNF#B?I0P-nPj z*$3ubmW6OV!;|s!6zpF2at}V{&uu>OlhD***|~iIN%h9kOV(ls9#`F!mB!^RI!Y+Q z^};=S4IF7lhIjKRR;AZjpi=6Ber^ST)vGOHfV-6;BSP7tiE*{((0fs(`WBKpwLH-W zY?-)Gj^lU0*AXNa;_Hqv;OiJnG}@P;#@WTjf>-|v-e(}SyVj>2qC!CZT_k@*CX{*r zaeR~$JTyxg>gM`fh`L^n&^)}0?~{#Gk1;ue1S&OQus_O-k1_c;lV4-DY36oz(f`es5SJ$m67$K}`*wG$7(h`|}eUO9* zD&OXUtmwwGL*YbE(c`@O4ikzYTJ++4t2dEUic!@)o~JSut2qppr!o?~5;-&dHv9B%nTX!0&MN^k`T%@^VD>p& z#O}aRHgB1Ty-paD5N_yGlIi{%4X@-E(bpE@tS1v0$UQs|hv6tCNJx~DN_dY^+IcgP zMl?5NBmM9HWYUR2m+|O{VSYO`%;UOJs705i5e5IgtKG*30KaTpDvxNtr!dum6rAnP zacEC5c{dZW|J=!|KVb4dnEX#B^b=6rTk0Yc!Xiw2z@-9$-zVbk_fYzsoV8h8z~GZD z3`?|k_VBy*ajJ@872o9S4}8YLJMn+%nS@{&jYEYMUpDF6y#yi4hKnuiraViL z2YhAhtQ_z)5gzQ7!NIQD1eCSM?Gm2l#1u3HHhwA)leo5q%Vg?(!`-ks4Q3#;8M|{K zO`W5o!+<&$1Ma!%oWxDP#veY%;xjHmT77}R(C$OZ(%OAQXu^d;{dc?yKl(fHqmAmE z6!o8(d<_YXs%X0E4csKS& z`s;BOH_#dl?AilDcFHbY?F6w==+D^QdhWyQ6Q?K^O8@=6+4qe-lgqcy;;T*nqPt%; zs(t>a33M*h-d74dexcX=Zso=Vwex#A`OX`4a^E-Tss3NkC#|R`;wt(a!UYu<(&J0$ zS*U=3BBISeX(}ohB}4^)CF%x72I&Z+Fu1p0jP=KGqDz6N%*Y8g;{cqvKMwJ^1SQ5M zgkgXe9qky0q_0J}oY;YF1LEz3ZV9S^%#F(8v>5bAYJ$kE4^P}i9a~UA_@klfcnT-O zOm0I3Eu%=|oP-STb`ZmWc1G>2C=oLL1Wx8D!4*ESgI?_*F40b*zc|eXj_42jB}6e5 z;fx-N**GO;LIiU|J>XB^+4K~8?oUJf&Oxj%;dGrLd~t>jPL(OH79jjn{La^&1E6-f zs|c}26hd5Ne=L6gSp5F~O8jot7U6IcNjO^4J`e5n51~`Kp{nG(Zim`%Eelg-5fRf_ z@{W6*?DveaY;*%X=!*TGg;7-J3nt+_tNl+xAu?%Du3bzCrG(YEO?Mg#D=Q$ljW@nhI5S zhdp!KyWunk8+q#`VDV+KAVBqc+ia*@tA_gDM{isA_UURU$D>B+^vtnix{19__b3sZ z7x70RT)Y#$J*P9e(+Od0SgVBwRU&UkLDW-$fbs8(uUr0^pXTS6-vuAjEQCs&<_~|k z01;Q9=vB9czk>lUx8)$wh9y{{`O7IDI_SC*iH82}tHc@6x~2ciwnR`AG0o>~mdPrU zyP2Fu0>9RcmZA&kzLg!iu|}vjd~}K34!b1Ut81*Zt0$}AXhR8L(=#hpYn7%1DGIbt zvaqOL{u8b$>1GRS;<}YX@G)G1-ko3{irvv!ei;_1E+HK5vwV3_@)WJaX^L={@G$m3 z2^yKiLHZP}S@~9KOb$RdZbrGBc{5JP!t|qLRA=b>zEvBZ@pxLUy0Yzw}p4kQIpOWZXoBNRGmB2G1qY1q^e|B^{wf;z70wVr2d6>AHt*d%8_*8p4^F^T zRcGLJpE^4Vq=mNVU*K){y2%3KuQJ6H{`eBKw&9!HL|D&CPW$`G+R7T1Sd{$;3WKfu zBX&vajuO~EH-|2k{fjjCLV0XZ)7YmEL0I2B4jwv(W_miu%54WZlEaHJbh=f)%UAAX zg1=k^^&^ufd0XVwB$H=&+k;0@gUQ4qXdreEM{yrYWEeDR<}=VgCU2;Fq!#iIb4$5# z#rMmnwQBS#stC-E8)O*&3(Sn5-0q+FfvXN7CP+aak4K82sCA~IuqPz-c^h#~gGnu{ zNd)m+UjT7N=eOH}uY>XEc5Z2-EhGYeL@3vHZJeu2_wy%ny>L!f;_@m}1CRL^m#WgL zqMKvorh%uj&oP2fA?8XTvo;wBm7q_)qAp8O6|1P_QsYcYNGeD6uS>T!!D5q4rkD^L zREh&H=yKGSRqh#hAiZ+u$f(R|PGUpjl`69&Y9Yu`bEreKHJJTV)DxsG=(v9IizXJ9 z6C&lXA~|tLpE2}BKKL?|-(d2aOuov5T2={85OuV`Lm!|C$jQ#hJc2IbPhXq{z{Ffa zc_OFn0eJ*0XFy93`GbE&90!w+&kjwe_?wFtj~720OB4&mdy6ATA1~VY9V?cKxzVZ7 IkH%j7e|`g5LI3~& literal 0 HcmV?d00001 diff --git a/models/__pycache__/vit.cpython-38.pyc b/models/__pycache__/vit.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14596182be1299d004b1f736f3e8e27992775b5f GIT binary patch literal 12221 zcma)CTWlQHd7d*fdmHX9DUzb7n>}`9N9%~BBs;FHy0Ue#90xL^M7EP|;!cKphU6}H zXI5v1lDJs{ZK;CvgHQ)e?{Pz>NCTm05ftrnQJ@IWm$r|^Jf&^WR1XF6*1|=qe*ZtS zH&SXki#dDF|DW5O^WV>TuT-)$JcloTXX$&tplSa~h2bZQ!W($Qe?cO(swRcho1!ZC zUvC*zLnJV>W$2pFTA6B=Ax1OT%2)H8H=Bi)Rkd1U)iEw-n#ESBTH<`RS#FJ2$6FKC z2`=ZFhgy@>$<|bLN@#CsGA|4FG+FSB#`Km^Jq(B?#{d~iAv1s!WeJc{3ONEuS&joT zon~}^_yDdi2DVhX_ozheI;mb zsQSukWS_28?DJ>OpFd-VZqo~w16A|pYeDObUH5DARQFP|X>;AsR$l0-+n!|j%^O$0 zGIzaR^Zd}8ds}*bR9~)p>Vkdw=GD0u=dLu}_0XdZt_8|oUPmv@q_Q1(cO#7FdG__U zue`Z<^Ue9_Ze%aln;r(Wqg4+BwF6rPL1d%tY817@3kwTD8$$)_G{HcvETp}K3;V~r zykUQS-S=JJzFw#CFT0^9?EpR3TP?d5NY7pZh)}(*SL)H~`VxKjg1STN(sKQU_C|!& zSe>i4+!b%Gg?7zwAyS^V&~n4bQw!9e^$@h9EmlOFYr86{yUmIon@(+cCAMz3QEm0j z){-ZIE-P=PPR$(d3AIX*TK8*?AE=hwbmII~6|~W06{s=aZF$mh+wJB?JmE^W9XWNs zy&gHWz`tFiA%`C`Erc&2<7psZt*T2=HKZKg5duGloXdV3cCdZh0f zUD4I|^seTZU2{vre}&1Su`FdqeXRzX3awuLmi z)U3BbQFVVIC9&J}XddiYMsLSxp(vvXOXTq@^V=z6v*vprSktLO`cR&h_X^(dWh7l~ zSL=#Bv595c75$vv&taKUY!4=GYO08)6;mC;pPHnEK2A*$R97?ji^W}al3<)XMUWgQ zuxi}R9v_eMI;#m|6v|I zUt3gT_^aj8;pmUxG(7EEsE`aX1ra}XrK=y)HgjFG{cJZU#T2oE5rM6NfySB!^GM^q z0cbWd2P}a+*I;HTFeb~)VoB;@jME}9PQ_eY!N7Qa^w$q*9*|e&Qr!(JB~?O^7dn&&1b7cq~t3g}vwx|hIh>crox79S|3T_nn4zW{gwpGxG3zQ~+GUHa1E9+R4FfO5- ztd7%i+m(DmSca^()Mu!#JbHC#9+Y#EG3RMMXBkU#ai}tHc%Ke=wsWWv(u9Zxvm&pT z#f&I|e~Kt|Ci~m+WG*63x0~7 zBYx;T#Jt3>-1zU2>57K-HDlA*G&eIn`ML`Q)iot3reSmeZ5YS2z09r&Wo*E*gbLov zY-YPzX&lv%nw;uKApmr6`DT8z(9L!8yAV4vx2I3gZfB>pO{;4)awLTA3*@!keAj~9 zwz|6dHcAC5L5*6wz%|B_e77*6p&yLArzf+5x-?i56#G)Yl;$AyD}_Z>!B;(`bx}le z4>=*7{qTPtJ$m#I?;}v#ivU#05Cf|&bS?2(oP)095_4`2;MRKN#f6AWhjsuZc!sNm z!7{9?yNdbvH3F6TmeFbutf^N3Qwx+3msiFT(ter%ZzGAt zsCt%?PfS-Zr;wJyi^%lV z6J$Rq%A_BB0eUl1UoH$Ott^xksVy;N>REB-ax#3wlhP8kqqgH*ElD_Ou_QO=9+tn;3w83nnjHs*(8Q14{ zP1Oymk)wpIiG1R+L>qB|l$Fzl2Sr=o7tE0Mt<~)cVlm z*g)bLzDKobLa{~TC)Qy+1Do0*-RIO+z1mtkz#w&hrCO%WQXBIL%IvG?D?EWj(}`Sl zp+i)pzejn4h`1;^W!~0-LZiLwJ(TYYE+SG=XuBj@=${S0E*^VD-l;=*;Zx)-s+1EcWC9QQ>u`DGf8I5v-U)ZvPln*OLfp$K zC!|jr@Ei=mW6WXXg%@BBPRK)K4tm)}u90WY8)RKA$m8;`oB`)qgxSr*Lq8#B<r$@oy>V-2}jod=KcyV$R~WZ&3$mz+|&-fB1PVA;N4fBlkgzfCTxT@URq z2P#^%&%J#2+{=`g;e}z}Ewr(+Y2`0@wO|Df61jJ7WZxu%?(4Qa$K#L>*Yk~Cb+>+d zo;+@MDO@;z_T1U|bLU! zMqzU@%IsRv*lv+z;tq5j+q@4YPN%T;1{{qX=--JtQCA`ot(YDlED^6JNc=lfgmc?@(8!+#0xKFBQ@hLAp@AtQs3 zcffwH_n3A)1koSo5i6>Z0QGIM;zv62C}Ki(eQ$UMNXSYXK*WI1YNUbm+8}yb56m?w zV5)3*U_8VuMr!xHm|ly)*TIBH3_qUK=;!rCey_mRk{Ol;OMQ6Vvs4cL{N?B{(_EoTy;em%+5_qg&YXf9%uC&A_D(4Q?akP)9IJddu#5GqP{3 z+3X_GoO-U2{lW|O+}ZihQlFt0jm!l>HVWwZ5${7v9$#qFrnt?oVl_V)%#R{HXitH; zp*A+hJ?ztf%{2oA(ZULSezXcnblYvNLnIv>mbEPw4;0+{6apcrViR;^ie%HV>qy#m zsKH>Pp4han)b}+FqX6U^M)g`sDhCD~=6f66(E0BpF z6iGerYJ1u@^;;(VQ8F*dU#~}A3!y`O{;XnSd0=+3KWe?!MEDUA&i2LMr~atpER2ZG z`~m&bHzDR{Y zG+mF9Wu7xlGnM(tmyr+gxNVK6MnwvXA$5yt`_xl;l#f+^+f_hy?oe`^k`^VrIb>4Y z#v8tXM4KVocvA0g5bWhVT)<Bp=chu>2=8Ifx=U!rtgK=MjxxmM|f%2-rgl+`yVVFC7P zKgUHDW#fL}B>g(ltGOFLK%FG0rFWh=IFWs|)n!y4iRzG3NH5RlcaWi|j)bVfal}x} zyg^42e58UHiplX;xJJwDgWuN^rwKMHHt08W#8$;_PQw1AJVBNb?O_LsA~&!RAs!2` z{4BB=KbZNNzR58mwrewMAf-)ltBuj&9CLJ@qBGcZS~~s!RvC(BTq&xn1SOK^g^RP^ z-8K}q+5jXrU}o{6;%ozUq1q&1!2^SOEcbqmn*BPGN^an=Rg3+Y%u;hreIDql)fXzE zYEmZ#p+>cgvVr(ZysWM`{I5@d`<_R@WfF8^>7AnoHz;-Y;?Z3(&`qDs$io1WU{$;N zmXR<)az-I=4giZtrW6sTerG&B@Uj!%J_{>lxqjf#Vw@WT2YV4`MDlNG_C%f9V~J^B z?aCqbE2yd*N+@)uzD|it$x=T_+@Qn{;?M;ri-}7F{Y4aszu%(2(td~c6m>%~P2Hm; z+0+IYH(OtUboSJJ0^5}QE+tQlIeh>u3D%GTTR03cIjD?09-=FH=>KQsbNND6ScWx` z7m8XXGc)20pHQjaCJ^z-2p7>7jp3zz3;zi_M_kSM%)$)AKGp*GV`Y;(AWku(2io8FhwIDtnhk8k~Be?N{V@XeBcie;kFm^!edX&uPL z5>p3iK#rDp4~bf)8oiNdA5IKLLhb3H{XIz-`ILN`LS14&G4=qyS?-n_6h%MWu+|LF z#YUhK0Sgv);LZrqZ7L) zlZeK$Ay3Q7y}W!OiCyq_SPiO&UPkJD=CSp7CulEs1V%fPjJErf#Cb%&um0|Eow>n` zz!&q(7mb838c)lG9xab>27SrdZV~A|t~k(Q_}h~f!}M+5I)fFmM^^9LU_Oi+?&16Z z_mOey6~0IFQs;g9I#iCG8aMW`3R?EO-|pB0rIWbx!7}0;xD;Z)6$C5r^DmQuNa2cYb zSC$6V_Dxut2$b#Wd)g)xDIFW7J*@(2NIoc%#)=7pL-R=pYZ9i2y2n^PzFL7n1Kcd* zOyKC~35U;8UjaL9YJB9Z?UAvPT~u#iwu!|;{;j%53F%0>XW>(*`zi{!HGm5wh`Xq_ z2+!)xq6KweqO>>Q(TY2g1DL+5;P5Rei7l^$0-e)w^o`?uR7`wF3cgh>a_ZQt3_DK^ zxAJ_tRHgVPQ!@3=$tU7@jz=GZ<|T4EPrrj$R{e7b3SJGO8w#!g;wF zqAe8|UANXfwGkU@-bS3^sM?dij*b-h>rVZ=uj_x?_t7N%w$u!LlP3G8l>8YbqaRQn z`2Y)1yB<;IDNl>fj2`RDwa%e;nr%MOab8@wO7{ev;oUobaQDu02Nm&d~ zS=@0BAv;J3m3TROG_UVc@;ypEL=tCexYOvU0s%;_FgvlItGHIkI?LXk|SE3K(rHHlhfyfYGDOISB_G|`Pyo|DHTK)3FqX1 zagK5%#MEz6LUb~8CN`lcYBVsQaW}24=xeG4k}Xc)H#d?GV-G9CbB;wr{W0pq`cgX< zs#;7~lqJxe>R5kj&Yfy8VWMGAhxd@_?_22O@9>5lB$|mcup-4qOuek<@n05a#0mVe zkfGB!jw_0n@OwpA$Uh~X71M~Jl(mURCOkTV%)pp?z zC+R8FFN*WxWndJ|V?@PS0Fa6!Z#MidB#B~z|EhhxPZN-v9dE}HS)f>4k+6HWa(PZt)c!eHDGx;w zII_mUm@m`FEEKL$j){AmhP0M(e>FKSpevJzL$(9D44P0q(SMw2D>|03s?!599oR^5 z`F1_Tbw4N7Ak~4UO5%VtiRuLD9b6e!ZmNB%g#Z_lQy%oaJDdYs3|FyeyhK@5OH`*n z{(ej1@X*W$wT=qVB00)*?wAxKguGAI4NdzQ&&Gk-JTPF zQk_C6F=p}{nw|IkQi*Zc%HrPa2{)gqZtS5 z(7TJP$k*m5admpQW)i$n8!AJcOb(;?)FR|yAYX>4P7PyMy(TX1go=W4EK1{S5*y~* z5Xn7?FA|QT0&xzJHM-D1v2Dd{&5JO!zBD;Ev}-tg9B1JTfhkI*1=ao=^;N T!kj!jIX5{w`IW4g`NIDKAwbI2 literal 0 HcmV?d00001 diff --git a/models/blip.py b/models/blip.py new file mode 100644 index 0000000..5d3619f --- /dev/null +++ b/models/blip.py @@ -0,0 +1,240 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li +''' +import warnings +warnings.filterwarnings("ignore") + +from models.vit import VisionTransformer, interpolate_pos_embed +from models.med import BertConfig, BertModel, BertLMHeadModel +from transformers import BertTokenizer + +import torch +from torch import nn +from pathlib import Path +import torch.nn.functional as F + +import os +from urllib.parse import urlparse +from timm.models.hub import download_cached_file + +class BLIP_Base(nn.Module): + def __init__(self, + med_config = 'configs/med_config.json', + image_size = 224, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + dirpath = str(Path(__file__).parent.parent) + med_config = dirpath + '/' + med_config + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) + + + def forward(self, image, caption, mode, device): + + assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal" + text = self.tokenizer(caption, return_tensors="pt").to(device) + + if mode=='image': + # return image features + image_embeds = self.visual_encoder(image) + return image_embeds + + elif mode=='text': + # return text features + text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, + return_dict = True, mode = 'text') + return text_output.last_hidden_state + + elif mode=='multimodal': + # return multimodel features + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(device) + + text.input_ids[:,0] = self.tokenizer.enc_token_id + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + return output.last_hidden_state + + + +class BLIP_Decoder(nn.Module): + def __init__(self, + med_config = 'configs/med_config.json', + image_size = 384, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + prompt = 'a picture of ', + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) + self.tokenizer = init_tokenizer() + med_config = BertConfig.from_json_file(med_config) + med_config.encoder_width = vision_width + self.text_decoder = BertLMHeadModel(config=med_config) + + self.prompt = prompt + self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1 + + + def forward(self, image, caption): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device) + + text.input_ids[:,0] = self.tokenizer.bos_token_id + + decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100) + decoder_targets[:,:self.prompt_length] = -100 + + decoder_output = self.text_decoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + labels = decoder_targets, + return_dict = True, + ) + loss_lm = decoder_output.loss + + return loss_lm + + def generate(self, image, sample=False, num_beams=3, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0): + image_embeds = self.visual_encoder(image) + + if not sample: + image_embeds = image_embeds.repeat_interleave(num_beams,dim=0) + + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts} + + prompt = [self.prompt] * image.size(0) + input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device) + input_ids[:,0] = self.tokenizer.bos_token_id + input_ids = input_ids[:, :-1] + + if sample: + #nucleus sampling + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + do_sample=True, + top_p=top_p, + num_return_sequences=1, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=1.1, + **model_kwargs) + else: + #beam search + outputs = self.text_decoder.generate(input_ids=input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + eos_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + repetition_penalty=repetition_penalty, + **model_kwargs) + + captions = [] + for output in outputs: + caption = self.tokenizer.decode(output, skip_special_tokens=True) + captions.append(caption[len(self.prompt):]) + return captions + + +def blip_decoder(pretrained='',**kwargs): + model = BLIP_Decoder(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def blip_feature_extractor(pretrained='',**kwargs): + model = BLIP_Base(**kwargs) + if pretrained: + model,msg = load_checkpoint(model,pretrained) + assert(len(msg.missing_keys)==0) + return model + +def init_tokenizer(): + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + tokenizer.add_special_tokens({'bos_token':'[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) + tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] + return tokenizer + + +def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): + + assert vit in ['base', 'large'], "vit parameter must be base or large" + if vit=='base': + vision_width = 768 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, + num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0 or drop_path_rate + ) + elif vit=='large': + vision_width = 1024 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, + num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0.1 or drop_path_rate + ) + return visual_encoder, vision_width + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + +def load_checkpoint(model,url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) + checkpoint = torch.load(cached_file, map_location='cpu') + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location='cpu') + else: + raise RuntimeError('checkpoint url or path is invalid') + + state_dict = checkpoint['model'] + + state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): + state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], + model.visual_encoder_m) + for key in model.state_dict().keys(): + if key in state_dict.keys(): + if state_dict[key].shape!=model.state_dict()[key].shape: + del state_dict[key] + + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from %s'%url_or_filename) + return model,msg + diff --git a/models/med.py b/models/med.py new file mode 100644 index 0000000..7b00a35 --- /dev/null +++ b/models/med.py @@ -0,0 +1,955 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +''' + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode=='multimodal': + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + if reduction=='none': + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/models/vit.py b/models/vit.py new file mode 100644 index 0000000..135c0d5 --- /dev/null +++ b/models/vit.py @@ -0,0 +1,305 @@ +''' + * Copyright (c) 2022, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on timm code base + * https://github.com/rwightman/pytorch-image-models/tree/master/timm +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath +from timm.models.helpers import named_apply, adapt_input_conv + +#from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + #if use_grad_checkpointing: + # self.attn = checkpoint_wrapper(self.attn) + # self.mlp = checkpoint_wrapper(self.mlp) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, + use_grad_checkpointing=False, ckpt_layer=0): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) +# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: +# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) +# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) +# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: +# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) +# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint