From cbef6be26ae102c03192a8a44dba2d30a024a8bb Mon Sep 17 00:00:00 2001 From: wxywb Date: Tue, 1 Nov 2022 19:46:28 +0800 Subject: [PATCH] init the operator. Signed-off-by: wxywb --- __init__.py | 18 + albef.py | 113 + models/__init__.py | 0 models/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 140 bytes .../model_retrieval.cpython-38.pyc | Bin 0 -> 6302 bytes .../tokenization_bert.cpython-38.pyc | Bin 0 -> 19556 bytes models/__pycache__/vit.cpython-38.pyc | Bin 0 -> 8249 bytes models/__pycache__/xbert.cpython-38.pyc | Bin 0 -> 55151 bytes models/model_nlvr.py | 128 ++ models/model_pretrain.py | 291 +++ models/model_pretrain_nlvr.py | 99 + models/model_retrieval.py | 217 ++ models/model_ve.py | 110 + models/model_vqa.py | 214 ++ models/tokenization_bert.py | 539 +++++ models/vit.py | 202 ++ models/xbert.py | 1916 +++++++++++++++++ requirements.txt | 0 18 files changed, 3847 insertions(+) create mode 100644 __init__.py create mode 100644 albef.py create mode 100644 models/__init__.py create mode 100644 models/__pycache__/__init__.cpython-38.pyc create mode 100644 models/__pycache__/model_retrieval.cpython-38.pyc create mode 100644 models/__pycache__/tokenization_bert.cpython-38.pyc create mode 100644 models/__pycache__/vit.cpython-38.pyc create mode 100644 models/__pycache__/xbert.cpython-38.pyc create mode 100644 models/model_nlvr.py create mode 100644 models/model_pretrain.py create mode 100644 models/model_pretrain_nlvr.py create mode 100644 models/model_retrieval.py create mode 100644 models/model_ve.py create mode 100644 models/model_vqa.py create mode 100644 models/tokenization_bert.py create mode 100644 models/vit.py create mode 100644 models/xbert.py create mode 100644 requirements.txt diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..feee145 --- /dev/null +++ b/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .albef import Albef + +def albef(model_name: str, modality: str): + return Albef(model_name, modality) diff --git a/albef.py b/albef.py new file mode 100644 index 0000000..7f60539 --- /dev/null +++ b/albef.py @@ -0,0 +1,113 @@ +# Copyright 2021 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from pathlib import Path +from PIL import Image +import torch +import yaml +from torchvision import transforms + +from towhee.types.image_utils import to_pil +from towhee.operator.base import NNOperator, OperatorFlag +from towhee.types.arg import arg, to_image_color +from towhee import register + +@register(output_schema=['vec']) +class Albef(NNOperator): + """ + ALBEF multi-modal embedding operator + """ + def prepare_model(checkpoint_path, model): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict = checkpoint['model'] + pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped + m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],model.visual_encoder_m) + state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped + for key in list(state_dict.keys()): + if 'bert' in key: + encoder_key = key.replace('bert.','') + state_dict[encoder_key] = state_dict[key] + del state_dict[key] + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from ' + checkpoint_path) + return model + + def __init__(self, model_name: str, modality: str): + self.modality = modality + config = self._configs()[model_name] + + self.device = "cuda" if torch.cuda.is_available() else "cpu" + + normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + tokenizer = BertTokenizer.from_pretrained(config) + model = ALBEF(config=config, text_encoder=config['text_encoder'], tokenizer=tokenizer) + cfg = yaml.load(open(config['cfg'], 'r'), Loader=yaml.Loader) + checkpoint_path = cfg['ckpt_path'] + + self.model = self.prepare_model(checkpoint_path, model) + + self.test_transform = transforms.Compose([ + transforms.Resize((cfg['image_res'],cfg['image_res']),interpolation=Image.BICUBIC), + transforms.ToTensor(), + normalize, + ]) + + + def inference_single_data(self, data): + if self.modality == 'image': + vec = self._inference_from_image(data) + elif self.modality == 'text': + vec = self._inference_from_text(data) + else: + raise ValueError("modality[{}] not implemented.".format(self._modality)) + return vec.detach().cpu().numpy().flatten() + + def __call__(self, data): + if not isinstance(data, list): + data = [data] + else: + data = data + results = [] + for single_data in data: + result = self.inference_single_data(single_data) + results.append(result) + if len(data) == 1: + return results[0] + else: + return results + + def _inference_from_text(self, text): + tokens = self.text_tokenizer(text, return_tensors='pt', padding=True)['input_ids'].to(self.device) + text_features = self.text_encoder(tokens).logits + return text_features + + @arg(1, to_image_color('RGB')) + def _inference_from_image(self, img): + image = to_pil(img) + image = self.processor(images=image, return_tensors="pt").to(self.device) + image_features = self.clip_model.get_image_features(**image) + return image_features + + def _configs(self): + config = {} + config['albef_4m'] = {} + config['albef_4m']['tokenizer'] = 'bert-base-uncased' + config['albef_4m']['text_encoder'] = 'bert-base-uncased' + config['albef_4m']['cfg_path'] = './configs/Retrieval_flickr.yaml' + config['albef_4m']['ckpt_path'] = '' + + + diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91c077913c8fd343afe5fdfdb3f3e377ee3ae46e GIT binary patch literal 140 zcmWIL<>g`kf-C0n$sqbMh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o6vIKeRZts93)$ zGbbmrNQSIcO19hAnQpsVf_AkgOIVC3?larH5Rc>3At&Q`04Um#-mlIKF zyzYK@{rdIm_w{9?VKMyu%kMayU*2Tw->7r`&p_uso>KkF%E(50&mIoEPQjApx6^(H=utZlu(%B;<^^3WABa6^C#KMIl{j=m6XlyqY;@I_n8tnc|keiBFB zpqEt%{(kKF;Xcr{DC!WA^MAB4`b&O<#%DJ7n5}vBpRg|XG*5qCwRO+%O3#^X_@#*n zwiL5qt6cP_Y*vcC<<%71)t@kf*%nxDWTlTk|K4X0_B+$nXvGSiIvyQQ^6#86$)2$Z zKZRgYEoF|D>Kv)$WLB<-yO5ETF9%MaR{wCHNCkL9rKNp^&9YL3ZO*=t>)i)mlUP7|0k#G7uMf+jnmBw99;q5*k6CW2X6Zdcs3M7 z``J{`BA_cht!IGCt&~l!Dygzb%Ui-uuB907ubgV98e~nQm)|(LjL~gxg>bu?w|WkL zO)mA{@mA1Y_tsAo9Z3I1e+RfXy$x^kgyeX0Hg;<^cFSYXh%5V9*$WaGgke_o2M50A zc)=j6KT`VaJPtg0Xjg-Q+w&daCokrqBLQj33(}Yuq()!#PX6=flVASB-`#^|>;5ol z<9-E$o|6Q}zHJR%**R3Or~*4HdHzryX4Pmka1MRfOYG_(96G|4L2OqZ9zAjn0ynX( z2-nC7-KW0DY6rMHPDkCO2e?>f4%!Q@=Q+bT4uhyypc>L23^VKD$RGJ?(X29v2Yw_+ zgBKbtU(5HR5Hd^pPo$H>)xdj_gD-FeSn$^`U%t$$IiaBx-*Rv+pALONv*_uNQ@fp& zk`czTisJ-PARQ;G%J|5SuyrBmVhOrcf|eF0nymUcZD}8y%bF1K$PFDo>cB7vLBh0~ zT@eqQeiBDcHwb-EqL1d(;86HdxIyH5SzR$c{cw?RYKMX6`BA=U^K+oNkX&(&LlO7E z8SOR)f8Yvt;7f2`lCkIk@#U(?9j+IH6$3Nb%LCVSU^*pxl(MLV%2dvp_?R* z^DX=D5BI)Ie39%O<2D4xd%ZyJx#5A|{a{a7%{{e?B!A&HND=st+;DgJR9pjPS|T|> z!%CKBa*J2+)40jB{Kx5C|DTG{wHyP z;>S(J(C$>}I;17jzxM&qP5T5-GLIP;7M5}t-jd{fEiFxSI1hNvuVKnDVtCq#HYugN zZz|(lCX+U)rX}F8ui<*~cNYf#lzZGWPPB0u=EX|2ehp7OH~uHaxRO@-jd4}x6q;%E zEflQOn9PA%>Q`T*ehJiD-)O>2=h92_B!_F**HXHe&d++6Qyr^qr%Sn9mJpew z`YKBsOKgRW;RMy}w@t>obhC0%PSBjE(5i8P1dnIQ>7Uyo#}ruBRI-%(EbkP3Ywf$Y&7&0irzc zYNb`uKm-gXbfJcD!yI49b>U~nz~B=ED$wdJC7IRYd|0~mwbq69dks*|b?mCbSbGab ztI$HDzXP4UmR_CQOs|1Dx$f2(d?x1}8*j^7>DDv;Df{Zy_#J%TPFv|aSQlD9xeZOe zo^DUx1^3tKZgKXF{_r_u_FlS;TSuC|4H`G_?9AwVSkSrp3Y{CEQ=QUDchDoboYDP9 zMfXNQ_lD~26ulkv=JE9n+_-$*n`(q8?99e)sj=;}G#hEF5i>PsBe&I=+)Qt!?ezBS zMBY_+SKf3#(s$o9I-~a#9!~HM(ZHjnTWFQd!h6@V(ygVL6`OuHvu3-pJlRX{ zq`Nq=y>v6(>uLykn)@%{++T`KSV-m0#TKA1G6WA#WLwXgu9OHF;A)(Kn;c~|*!q!( z5E*r3W{?*U>-eCJ6yV7BhX@43GWwa>9Z?q0HtlBNnjIBVCE`;|+n1+m?-HoVI5R!}QPA--{y_X5 z;k2nCcZd)!8jhqBcm*#DaC*`?&xp89RE$S~|2We-uFSX;Ycy2q!2S9}Te=7oH2+~% z>V`2e^kg*1%2Dj}gzIJV&NR|;Mnex<6MM8gxue`k{{}%HP($uIkB%td$jV9Fl>_%l zRtsZ8_->RfIi8;fVTcP5D&@f%|01i?(I8w!loUEDZ0Pw2G)AEeqIiI`EHeTG zgPG|LhkoQCPy_o%VwaZwkXCJU5L+Z5DFp%&$o4Kq?UITQ1v$^GVhLZaz0dt@wa; zHgkDkNvTQVV6fORiC%Rt%%4d_%~SUxbs8x|IK!CotB5~BJWgwQ$SIJTpj(`#MX_+` z9{6E0-M<`&`_PlT`It1OjO72ZE4+K?B!7d3A%eTE?eIE9e=kkMg*#fExA;6_z&heX zi|?Q}oxR0dT17>MTl|(r5n_dJv+@zQz!!Mp#!Ca#j~jVRS?r)gY0mVIG7}0hRY>^<0AS`B zoZ8(nmpnyKxW*>hDcmhp2MBtmVRrLRwXrrv&7iMO47ejWV$MADozlbW4^5SS8vD5c zCVTpzZHnK-JJXK?{f)xt5pRtB*LT|`B@}Ulo(QQKP(%5d7*cb8){qJcDj)qlo`kr~cXzy8 zqzkVY=nGXKN*QJw{9AM+&hr^Za1N&NF5#`?ZKg(Foj7VWRP~@Ss(~13c^OH?k(S(( zrN89BGBHXeHzW}$QiHU-0;sFqWob#xRo*yfs<|R{c;!1qgVtJRlx)!I9>0qf;MBET zliF6cfbsww7@1h7=gLFMDw9KZ=);o3*l%EOD#y*5A1BE7$j^T!L@dOg(9A5KRNk*$ zRq7>tn(9*Hp|Oi90-08nDwKs%Fno^p1TNuVR`kep6c1iKBO5e+fhVD`f$5YeRx}Hm zxTtS(`ZcxV_2SIl;QreB{{DWuMn#7RSv-mkmILSOjY5L!4$2sAn9r!pRP?b*9yNSP z5M2PJCxRHs8buD=@gXqh9Is@>gi@+9AW{&;uqv5y_Uq<%Do4n3%Xmza3&Oc@WpoUX z)u4BBAC0PI!bUu=NDA3BN<=9*h6cfo^fl-Z|0yg4LSKaU7(r5sAksu2tfdsVm5-lU zE&Tn!7gS|UT4zVxicyB48dB84KMuWxLdT=P^Su^gxg-|JuKM)$?c1#%yCQ;lZvpR09*i!ASbv?m41|Zqkb47R5IK0)?E!g-udrhc-!pjFL_dmf@==HiQ!DPZ8Ds zmG%%-V{+Y;=8HN#m zd3@tF4d1;R#vM0I?g5y7^8f*A#Z8_@9VZ^1#ksJuUx850O6}anvnJvX=W#4TFRvn5 zXOa;6_@u3izrf2ztw%|8db`ZrMdqCUInh>1BLMvqo`gz;%(QqN4sa3fO3+MuKC7yV z4(fM_Dv)Kl4NrC-1yYsG?0Q~t*?mFvL@z2<2CjVcICxpoW literal 0 HcmV?d00001 diff --git a/models/__pycache__/tokenization_bert.cpython-38.pyc b/models/__pycache__/tokenization_bert.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40946499a83bed9438f399ac40e4618cac1030ba GIT binary patch literal 19556 zcmeHvTZ|l6dR|raWqNupyoxtbREgAu&2V}sc{frfl$OIGDN{?0xSY{S8LiS&(^WIm zoWAi?H7WM?c%2oE^+VQ|wG9Wc01tgz$Pdm#5G2@_070IDAbCiD02Ld4a2~u!U>JxU z*vxvF@B2?xcU8{}hr6(YBuEeIRMn|d=knjqe=EK}F;O(|`;V_zs=v2v7=Of@(+Hk~Y z+~-~Ufw6SNeZf7A^U=2r_l*1E1H*mM8~rr1bPV?|xi91XW$!rdPvHI)_bl$u%Kfvh zwP2J=-BXLL4X;`2Izg?~w5xT;_dVZUZ7KV*r-JhLiLd!mCd}Tb`9WBCug!K&JDohsri*^s~M18ecBfelIx;|HhNvm*E?o zdQhtah#jZC7dYPe(RzF0%=X4jbsf~#e#s zb+4t|sfOck0A#yO?JIkCDwFyVx~%TqWkkz6bUL*PNCqA7+LV8%~#v*xes z*-hPjb+^UtMygNfGA4=LJKb&7_iA1M!FV?@Pq!0Pqgn1U#nQppv2p$r_rptXP!V-dbqopf zcIwI0R5L8Y2q)ZDrQW*dsS3MayjF#F*Z<{lHHu>nhkpx2U~HKWjZHJKdZPcinQv!) zesa_5Sz8&@XFf2#Xne)EWqfD^*z=#Oa$MhbY|#KZAldsibqc%I3|gpmYgN%>)cta3szXlD9--&6i{w^py$y3=d5VA`p# zc&o2X&)&Fv_1bi!<$860THc9twhAe(+`bsWI|26G79Nfd@vF!!4WaTM{VPN61+ z4WNrC)7an)dd8LksN?MKUo#eXFW3>dn%zscI!)h(x>2jz0|gFhqvtdMHQQ;rw%@MT zf`E5WC>^_9^QxZRTD1f3lb~F(!kj<{C$__VmJif%3@$Lx#dEj~4-JN>L`(MSc;Jg! zfu&{fS1{FS)Kzh$e$oat{tr-i28;@W2E|gw%P!@-{L+Y5SQ_<;OJnX!-!fKB8U+tV zm&Uybng$PyPtB!CT*E|=>wPp7pk|A)fb#(w3^*U8(SY+I8V)!ghPwTXcjUO?9o1)C z$-R60vy6MfefB|a>6rVG3R?mHwbx*m^qtyv0>bv)3yDy;4vu?>diIGlXl;>Ew zr!fl6fZWolZy8x*>3Oijxp3k#Xk<|<`k>Ko0X9V&zqwWu$mmo9;OX+!TZ?uKWmUE> zQ=KLXKY*TelSnMfJB#Zzf6xvLweG1}0Fyzr8upAB5Hrg^6)30a!-@b)_~jiegk_RX zb={_o08QPu8!bN&Dp~hz>h5;K3)Wk%uLl*bu>JK`r|#OyTlEyCUl)dkNUwnrj1p@E^D>| zwiO9W0gH8U#qCZ2n0n0^H<(*$GQzZdHu@WwklTs`LsB}~5MIQ{5D^hTP46zY2$2Hu zY_12O&Xu~C0?`os=lUX9$VG%{L`?S}HFdWbR?8vV?I^)g!pbe%?=|c9?R5tmMAPrm1N?XGlcb>US96TZ{IZtQ8n zB8np%4*hEXyzQ0O%64$CHK?NmNG+}3i3o_=YOSh;0w#f`kb2yE1VoOF;e2s|@Jas$ zVhoxLnaRE`C~)e2OV1eeLqx!35~K#z@WhUnSOgaaBC7@Kpi8e>bLtujk$fW>+StAI zs|n$=H!0nmy|OdBbzcHot_a-|vgc*t-X~6j(9}S8U=EN;Vrco~EVl;6P+l{z>t1s$ zSWnEfTK9jPna3p>wFXGf3kAiZ?`2 z8+tR;=7DgL+0?pTC9>&oc9b6r(Z9xRG9JjD(@rRx136V}Gzo$x`w6Tf4WnM$@UTNb zSh^nC*6u>BcZG4@aoP@jS@v1q)1uT~uK9i^4W^5+`vnU8dS?Zm%0|+2Ii1vwrsZ{i z`X^p{^ey=(S$lV$rXq-3;XwNwp0UPwNwn$bEJ z=O5){H^hlM2Tl}dI8j z8ksK@6(<@V*rrK@V+jo?9PMio;b?#74M+Q%LO9x&x?yo3NWvpAfu}hsoJdhm_q%gW z^OXQn3^CKTuo&AMWxib*yma>Ft*eW-X0Oj*y;6Diy(?F5ROV;jy;@pMS*IBQE{kIU zhkp#k8nE*5epvq|{R2I7)%r#2PIe(-I3iMEl`n)DOeDBi-y4>7P{`|06RN~0-l1vYxv;=mV@0x zZy>%Z@zEr#v|60P=GxlC>D&*&3Rz!5^7|T zpPPzJhDb#o{d?5=bdVTU_u2H2Df@2QxP7Mz-4FpshJXQ~zKvV>JTlO!G@5@B6+>f?etly?8phH9c$Z9zpGO@4WV$A>-110JP!E+n@j5A`r z-?rf6JHMHsyhVL>Bd^|oH!#=BJcJa#a}u0y+|F-7+CMa3M;~`iHpinHNNP$3NM38c zWD4PyMub%a-C+T8fzeJcJc#`e#;2&_F-wu5!@SdmYtL1eaRW{l%6Nef#>G^P+dhk` zVZ4p@St-O*pTIXjxOwwPX51XN4qM~fK-GUE1uBA#j#2>?1s|3NN1OnNXJGQ7`H;&r zyRXrUL!*!GPFWswlR%ZY>%s!8xiD6=niom)vR%q(v}b@QEw3!S&Yc!@m~mY8ucwN# zQgiY;n#LBg8{Eu8Ys-R% zRa0rUdohW=zC}R`Lwh!eOQ0`WH8DveSnA65Bh&6?>@#OVgAw}MFpVz-=5V@)_%wIKvBtBg-fe-kDX{OVBm0^vSW-#-jEr4d0A9r`$Jsy2b_Zb zmR7;N-@FWbrTS9~h*DpKMw##6-GyT;q zYxgP_d9hrcUaPl!A0egkdeEq!k!MpNb((M5geFq4ji^>z)j%P`gwRJD)R1tzuh)ZJ zu*t+;H{6wSnti!hs*h2G88i+vXfw>gws_8W3*B}B2bhCqiWgBt(k6q%?!ZzpPGk_qXuhG>N>O?)Gf%fbZc5;&L1SD z1BgJ?;DjDgC#^1W7Me-?W>4%qI{>kqr&aLGi>%zvM@nqmRtY?;>; zUYnu$Nx~Enovm=O?le|h=dIId{@>Bc=)RtoWi&w|&}og%uh40dH0N~>%Hv2fYF7PS zT!&dQeVA!DZFL>@u?^6de9Gk;qD6{@Br`g?%Yb9(*{2n69AKy#HY_aKBM4oW&F)K2 zHRbBCrbJmfhr|CW3aud}v?8rJm61&WPZ#u}%u)ESTx$)|aRDB%?X{?bB_*vgY(-tG zly&|rDJkb5>9LJDh|KyMT0)Wo(y^B|Q%5E61b?A~9cY2{$!bb8lpS;_M!6*T98+dr zSjrnR>aRZ(-|0n8{&*g`ZkTO){deF*)ha_mU@ntwbnSer z$v~!duS?Jw(R6U4R)8bTPofI#mXg2|wSEc~0{QF(Kj8}ge@xmxrL3eXM&)u@i|OEg z+Y?1|yACD|rr01-#OUgVgNr&f!ViS%TJ5ga?BkBOjq!Xo3F&!}1tJw-JuQR9L=0(B zFszTM6+U-Zh&+%&vAss{nEteihiN>I-rW zU8`rQ7omf}#%jWP4YF?bIAp{_WMDN9IIY=CSmH^+Q?zXSBdnKQ#r{#6)JS_|PN@&D5}kHs-cyVcD5gPe+mj&Nv{#v6VBXnrNF8 z_8n{E?hsnl8lJoZtf_Tebf?p}5*!R1=G*rg^qJZ4?sxqK=PoI8(3V8{_05@N4ZI$; zDpK*G2uj!=n>R#J2f-+>Y~Mt}75wehb+5Vs>W2TGB%bIGQiz*BctCa9&}3dv^t5QQ zo=~*0)!#t@-?9&?Q6KX9z*pa!o4s7QcKycH1#!nODDK-~-iLhhTpW3#uMmvQfZ5BpyNSt))$GKj@;m@N;<`Uw=Aqcw%=28~< zfDn1O%4spTl*b1}_{<13BmC3|xq$_KZiMqFKRLn~fkp_OXVjO_sq>!!F4>*t;+#DC zbJO7+&FK=7Rh{Ne*;Z#0pV`24h!Qc+>tr$5rSv8taVDsiBjqBkSfpYaKa$b)H;}WI zh>pfn`M)neOcWEaQaU;tuji@k*{1_-JkbBobND|r&!I~HSN!8OCFL9rpFve*MeyiBa zG4;R6#EVh3jB9#XsF4O#NA&R!aWaGquLOCddrVNd#7OyG9&$J1{3;gSHwa|8NPS3r z3<=9)IDOq-piE^}^l8aWhm1w6#Xl{UoldRuJ1g>Onf5J3N&4quYz$In`pK}-aGn}C zaMA+V#_!y@gd{w74v9d6^x;z9n}jX3zZSm5-8Ps z;J)4Ns5VlfktjCbx@Xsz5!gVk5sY|kNziSiHA|!wK-=WUp~wcj>(wRwY1?=+tN2u8 zX8Gp!4?@eacv>#?o$690S}K%Z*{cRnxTfgbC`}6ahPh4?x{UgVsKS?}^07fhJ{lm& zhex%MwN1#X(|D?xBqs?;!W`kj3ieKkgSF$_sxXf%P{e%WXT))()s$~mMKwkYnIx`W zU~!Ivjxm`j@+_&Zuv&E53B=gofsY@E79)jdG1MN~slL@I)4WS9Jo@4 z)epN&jsm}cH?rLq7x)PaR1lqg7JJ3hA9IA6`D<4~>)oc9U8P(&21Mce8P9bBCmd}u zr3Ig{cwvEYvNaf{>Lw5$~6Z@6zn|3d!R$03y7Ocx}mhx`yxWRT}(`MZ~ zY})LdbMA;+K+aj7*=9h%X!m82G(LE=|J^jj&agnzmha-V??1**%)6AW%RPn!!brG( z1chW!O>W|2m@SD(>-;ICO3-k|iH0EDudm^xR#^<;;ftvIr^EwEE{{E|`^6n$pJbt= zAqBwxX+T4VS0uqtjLLK(5`NTP^XU-Cjjvj!JO;2;U1>D{X^bxHji+kD1iXo|mt zx?krMh0Cn{!dKc}&~Y!=k5(4MLtC+E?Ls`s1?TpO={XEw8&j?3xUjt&4QT&VyGK!fjFApO= z7EftiHw8S>BM;%cpT$KFqG}_j_Ty}AnEp@WoWZ?~b2hrq;r;^d^U?hX?l0lK5Z#aB z{wD5=(ft_imvKKH-A~}Yjr+;yejn~1;C}yxsUElo5Pvubjyu%-+jI1$5ajw(o4K3x z<7_7AI2sB`-TMlq>z}}aVRkQ9B?JU z0a+l{@(X&e@e5jPs`8xz#3gt{UFP0{bQlx*8Mz?dhYD`CzYpcQmv4HCNfC^aA z{~YSVVO}_f%?Gh`^1_K>_zjelQg(4(yOiKd8dE*?zRBXJSbT%U>nx}oC~k9#0!+A1 z5z&fcC*~tF6Lo@qgRgQ@+!LWh94HowFBZm%M+y^#W8zh);z8<{!QCYsA{$eVjDlo6 z(~$w+Eu0fMcZBEu$zc$wh5t1KUx-oK$3ZyyZltODF0Y2LL^%Fe0)%KaAopR%Jj+?96JQ6n z3?~`?Xf5%*IUN4)qaaml6#)9vzX4^-#t#QKf@n0%Zf7=V$5^0N<2Gy&(D68&=twNz z%;VjWUS79^s_;F1wFw|^jv_t1fV$5>=TIkj_cPXUCvFW^= zH|^7%rn9=rPX|xq%hFms3bNTq+R&DvBR11tHO#Vpp23QTk1XP_={mOewRL+rElCpd z+97B*NG2e5G~#4_iBzX+BLC(^>~*a(XGBR{1ZSTHjVF{aVJK10CDWU7U3>h$gDVIw z@`W~leuY;wcN8`~DDT)SRvee#bwfc5i!gvA`zOrMs47-F_{_7j0t*$P1UU~gE@j`J zl5uBnVT{woUx0FkEO4kD{Np_pkb^j=gfJUi8%{h^otvZ>-ig^GGV_=Z{5$9!pXEPU z1`mXaT#2Zq6#U~@vS2D!F(;C-bW%(319+~ZD*p&q_{RT!n_&^k7wre9y1ev!^1Gkd zBo_LwAg)vQ6?ay>Cy7eTjcMs%>LuNdfxEEClmi`z(JQ*kw;6s=|A58Mu=te4DHek6 zGq{4awQ?JALfIi?6$y0ommC%@>PW|1`|ytc%P6uL0>1#;^q4uGDOv^i*q`F>PqL4* d$DXEL{s<-Z9)Ew5KT;SgjPC!2v4Z2!{{jMwp&kGL literal 0 HcmV?d00001 diff --git a/models/__pycache__/vit.cpython-38.pyc b/models/__pycache__/vit.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337092ff9dc369d6bd48881dbb8f560d2fb88075 GIT binary patch literal 8249 zcma)BO_1Eib;j=ufSsBB)@#E|~;V z=xMxujeh<5{o|8r)lqQ$>MgGszoIDrP7jlpfrs~SCtsowiclk^QT(eNt)Zz!U+-v| zqADGuVKPLEN}Y0}%zZtwI(EbEI1PuNjcBG*X;ipxM%7NOQR~b$W>w{uB1)qCL=ojc zYtKE_8}ooz!Un`1Ko$UT#0(%a1IQvE6;TDGI)E&RnwWi}G)@WiBc(of1iF>Ax|)}J zzDz?us%yF7HCr3GbJtIsn;&-81A*b13^u|f4W!rUih!`Hsoaj6Ufh)(Kk{;?Ypj@xZ6n}+%R^35+-3cc0ZPWoU}kkATJQg#XI4{K)QY`Tp2`u3gkFQ zA9Uqb;<&ihzGiyRG)u!C{;5y2>bR3NG^z4fIZzLAa+%(n%hVk`(|Tv{ucg|7o~fC3 zsAY<0WX59!|0YAVOlN2bPvy)!R54-+(x>mPyGuiG1GPgW0jKkFPaxISY((Nz~ ze3|R5=H*=Xn<@3K9Nui$`IhMQ_?gfO_MPb_^Ock(8hOgT_8^-$db zYYx;=PaE~X+QH}$22quyh;>8G;!n;|LmQIw1l8mc{&Mx9{1(Bud5IupNW}wRiqm*V zhxV^4bCL?I|0tb@kq+IBkET?@7}f_4MxhIC%R3~lT~`XaqGj|pDQzbXW2WbJyxs9O179T2tEL|X zd@Pd0GuQ8J-Ioj0v;8!UJyN0E=*ez7x2Y`vH6GiLz6imTB(LJR;Evbvdv&W2EM0^h zS*Ledc&kTrP|qvYyg>A<@F>yZ5i%e6NDi4=xDO>EIVBk^sg_n#m(>dNr-G;c!su9D ztVN_rtVN^>;I!hwy$m$tsVY|i9jF5LC*+TnVKMRV2j29CiR^^6RqcO!Ou}<8n<`L; z7xn38YDkhZr9GrXejm>uhqij0goTYDJx0N0pbaT_X-vUM#1ls4|6s=@ym-hC>Yt!x zxXJN(lY@E=EhiEG|IB!sR(|U_%g+yXI9{EwhO2+;1SiILP11+qMan?>$|vzlbTYNA ze5UW{JI0PVw66znN|_-bF>O5qw5^|04$T7t4p*%7?0u*~qeiyqsqQjOXa!1^dpFaBL*|TRyxSv7VNQ~+= zn1M|n{`C*=l$YTj`H3~R3~*;V4RSlBaG}?Q<9mbAl5Q*Q_zxxP@l^s6GmaFd<%c}Qsbt_lz=LV_TF{xueL2D%E&?wOT74;>J z?0};gunQIK99l>1&yNLQ=%GycI;IQ^He2nwdb94x?*J-YYF?p+++0EOA5o7MlYdMN zxhVPj)I85Iy@ml7H@iu=^{S;8KS?7^+*23H99_vZG$Z!}+YgB{=ojd1y3H1x4??@b zWtHHn$Z4q)!JU(vH==HH>uY|WGWXulC6QNur_`L_5tz=mY3h7W@Pz`(=g;If-X~Yh8l`crW}?@(eh>Hs9YnAGBq4aSU*G@AHYDMkW`1rpviRt^j@1N19KU<@=QDy(ui3f>g+@W*(q{0TK5qp9o1)N?}G zX)CM-pQ6JNn4-R{o>TkPVhck}gId|g)rt;D{vMj#zD@D;8aO!){a&nB#KSwZ9E&jc zs3FNPp4BRnMw+;%uGzs;@&P(yy+W{eiuH(+IU6Bgk;Iunsa+ao0A9rPQ(+xYl$nCb=tM1RP$LQ9lZzGwDcPmR=U!(0e$U;DyL%Me2N%RP!KK~M3FEXf8mZx}vwNB__sKZT6-h1T&kHN7Wu-&? z1Dx0wD&{ZjD!b~gwyWmS6kXM_jERPr_yE6Riku& zA7uu9st%u^Jf|^whJ3E5KnAM*mte$*@z>=>a&`1XM-g^5yd>-g?s=G+3vRmIi-P)9 zC!*rQBjIAgp!gaJkH#zakTZ)CWK)7Xj=PcY&Z)|u#Y)2=&3@YLeNq{KbH z-t9&pH;Dc9C~()2sIWo;!3|qb)a~gb9Of|Y`Bv2RX`|iyC>4Yvz>Zt~c9gpJwp>ng zh^AqB|)eKO(Ln&_R~#RpZk8a4Ks9dS@xM=9wd0U6#}6c;u3UPBu2RW*0U3JB;xxpI3X=<-is_ApWHO{FUoNmKYt4wUoJ4Oqj^E986+ zltbm`+C2j~C54ETbBAfrL9tI;xgmk=WKS5Ekx zQGl>=4dI?2CGVg!oXny92{Os&QW%p^pw48zx}&Brv3sRGW6#lY`KV&Q=O~E5DlF zP(+5`qaKHc%q9*uHUZniolpQ)GPM7mh@v>20=vHX`Y)IM@z%ff|L)zVIOil4YxT1e zWsV9Nmtn=PSeVlx4oUu;)+Dy)Gle|x`R6kRTGPzY5Tbqg~*ruW{^9D(B8(W`m3S_E?iQC`oA}7pG_getSF;0}dhHvSkchBq{uiVzR2l z7Y2A1-f&`cgpIg_u(^I_uE`EomN7MyGLvNE`pvtjbULV;$4TnP&54Rxad@TTyySV{ zLi-WEK}focoO+UkZB%XT9FMI33Zs9Edr%*y+SanV0PfDI{qH=7ca!olrTauLNjNi* z(g$MKriMU9Uy~2;EbmjpO7_;MH`2L$Htu@GK0FaL{YSq5PElFW`mdZM!^P$Ku#R3fA-`74fC>D z`6;H#t>HJ?-S7u0y8rpO42lz{VcZYXnn4>(}Pv3kj?G?Q})lHUPO+K~HFxB;UU z$Xx>e@5Pe_P<-5piw0GFgR7(Hzmp|%Py&oE(v?eU{F1mOef3Nf1-nPFXRgu zavO#Na;CO*z)+OS^fL-c7vs>gaYhmPBGoQTK#{g^zl17CX-k*C$sk<=i8CN&n1RCm z0EVIU)PGj^;LNoLz_JDHWCObCg_cSqsuV+6AZvrM>{ zHB*!bc}9hbhQKXRin3#;RZ+=IQO3styme7&Y0ID)1wwq6$PkUN*{ZG;!uu|muz?%b zSML@=%?C~)jl6~?FRzEqZQM7J@c#_~4Ek7w0)B*J^O{{5q%}p&#%bXTXc5`cS2_6CUYEW%;=2?6 z5cSJ1fSa2=Nm+!`SQ`0QWI{o6tJeiQuiXz5d|~o}xQQLeMr}l0;o*po#vD1y;33kF z!e1v1yDP&YY0IrRc)+-g$`p!2>#=)J8dbs>t$$1{^HUSlkFmbQ$_aS_s{dH(oVKi0 z;2f8u+v$g6OED;@CVt0H<5Zt9Y`GYG?peY3!@|-N&0-%SeP8*z&c?k$5pqy zR0K0zS1T%V6a%ty@-`+|H1;gkHPD0)gYS@5C(CFoiXKq6og+V|FPvJqxUjVFAeIM9eeIIjTFc=&Jo8X0kFav_3D2XB|f;dQ!zz`yaqSO@C(=$~w(=&Yx zt9o#>dTdFcB+485qj+JDy;@wp2th32FD?7* ziH9ffb6$-_B4(r*F{7qYjTWQwuTeDQ-&iq*e`D2nEm2HFStedh)>6e(EnQ5@vr)|y zGe}QVGqr3nTgw%5wR|yO8!8Uz{9(yYBHv3(RYz*0#nIYWaZHyPuMHWIXr#8QI3X$N z>h9WPaZ>Ix)jhST;#6&Kac^y3abInJaewVV@qpxIt9R5677y0$EZ$iw6brSxig(ox z6%W;wDG!~XlSp~gd<-d% z>6BAQdEERUQa-3tP9x?^T>H#=gcAJqRyE&UqH?aI_Ej$d|2naXnq7aAJI7%kn>TUQ#4;f&PzJyL&*7< z&UxAVIC4I&bDl>|N#`t>W#p7~&PC)b>YOFhM2@L*=1t>bWZG&?T(~;lu$NW~Po92$ zzEEqJR&}=UHSXHE>8P7A%gypqwd^?4hMRtFqgiRx%T+fqzqwJhrsHmM-l{td+fAC* zrOJ}!Cd}s54a+s^_35acMkRP{^rYQzoYVED-PpK#4!3UV{KeC}!3UBjPR>7gW^Ov+ z=Faom&uun0HkGU8%1)?gaCkX4!5AQ}6AUT52{*(xuXJ z!@gX$O?R(tZB)xkR>`)Sn|3`=!*s@-ezNRXfo8l|X|7%?et zuV|xDMGO0szUyRp(mgyC7_nzOZHY@1*$<+o%Hqa_}|D1KD zc@ZtK>PyxI+cGOl&4#T5~&S_=2VtMcFo^PyM z_3mU$@d&;!ZebTH>Slc%)pLx!(Jd@j>Sk%f-mF{YMal6>(Lf=Uaczn8bcc7TO*ssug0)+Mlr%pv)e)MC;R(2~V!iEfRwN6hHnh?!qU3Fnl48sd|Ep*hhWGKa4jTSN69*Eu6nuBx9# zrQ8)MHzws;I%iyRejrf8g3j3`IsdKXOvpQDTH}0D`AoqKJ%}0Fy`DuY{>Y!BjM>xQ zkD{Nicyl({9=R4ZcdsY#R;0NH^-b=@Xl;$6?$Ny%7nW@ggA6o2W1!DdXo+Dz9BAVN zayb%){Y*Q)J;k}%%D0ZTht~GC^J{yV<20N5%)Lzc;fT49srYyQk0h=|KOIH8A4R+O zq1}f4kus-IhPx?J9vJ@$fQ+0L<6rCP1Dj;&Oh$I8`3 zYx%)rC(b>2`phwzIp^3Fg812ut8V7e6{~Jt*{~n~1iKN(C2kmRwT@IVsio#}NezYr zf}teuJX&omm8;I%+{N|E#sN*^=d)k>BQ=!pg--e!$Ny`bkjkIF z_10U(T*;~}S|(Sb(;B|ORcqOL@>*jB5Iu#mZI`clNx4F;;?&9j_8?gfoUK=yl`@9R z@)m4o8BE{l8a8O@`eqFymhG%XH+QzS(XgARZJRVVi6+Z-6P7$04fJTE++1~2xSNm_;J zaxsMh()mmUL(G1n`g*yBo+f2JFiL#1pXFN#Npe#HbTF1ld>)y_oU_!}u;kuxV`z~Z zwK}WbzMt>z8L(tOj!dVGOT;juX(JuYM~9>FsPR@j&ihd#kEb~9#*v8gtQ3E+yqz@a(=mC`BZ&>|M+jw*|I;m6meja7BQNzf-l{6;4m5z-gcLHUGqc&Ty z#6|@F68gb8f*UIW!ApcfmtyR5k!8>nX6)5;F>a>Kq?vj(T1CS?G3Xl#A86aPwxoQnxQ=sbM^i02M__qwUD{m>Fx1Z|_1{`g(MG!i-;!UN`XF-GHwNGkJ|)n{2c69+PM* zMfNzi0j?7Opj0uh+=#j{Yr}E#Uaye|uGv10etZo8<1KfOpW_dgjE9@XhHX@_f2OCf zPc}EO5Yr_@}iXTR;;Y5`JpW5A-zhJGIb7bM>0AT=Fcw4IDzQln@odUxTu-^)D`Ybh$~5nZ6(`DlmDjLF`(zPeXbo@!)`Mp zpE25rcB-A&gS%yxh_wwVu_jAAiEEr?*Fe9&9@{e7amj6Ci6+~iiMNouof0}e-Hx}@ zdq}HC*3xEt-LQWKd6_oo{;g~~)6TwE@<4rMkb+X%IenMG0#3}?CVC?4^M+xkc+WoD z%eYMEF~Fn`2tw41B=AHOQ*G2&RKXhaHh7VVY4Gk9UN&)=&e<*e;{psrpu@f34hNPH zo~3SgNb()ErW|*21M-&Ax^)$hy9!B03AkhhI4!5DQAi}~Sg+^|`M#U=H7y;y-n5O?E91T?s}Gf3irXQ{1S;7^RY% zEm79A39(1X&6i5xyQ?}URic~;i<3*zehn9QNUzu>t6KF|t+bb9k>D4*gu9={&l$xf z9gQ1#S;%;ZMBb9z9{BF9S%-VbgYN>{0o?)HV`j{ZLmV6@NW;Gg#e)@-X2#6oIc4U| zJpN5X$UI_>0taLS2V}8c#@#WF$3<*AUk(;xSP8Y``0-!o9uP_$=q+&50BjW)K8a1T z9;VHCDNP9;$*Gm(4*N@3pn2=uJkY8F zoCWU0!r^<(=|aUR)Emu0xgZiNNCyjzWkJ9oQ5Hdd;t3>~5>ggK`X=wG4a$w}=54DA z*5#7LP3DdVpRi;t`>Oo_iV0LWGriYd;e#w&!d-ivRqVpYO8#e*kk3u6mK~(oZuZ06 zxI*38XQgnsCb}L zFENT!3gQ=mK>@5$XX`6$&bSF|Q-%n%gT~xpr;Gvd>$?#X*d^G-9j`d0B}%Y6`L=9Q zAH2x;{0r3W%;G{!lZ@^IC1*slZ-MGdq)F)+%2%){r0-MyzU0d{~7BQ zVl3Lax3^7w-Uu4BkKxwd#^pv-wWq5o<6t5pXTOS@a=bzAKy7XOQ#Ti5%-tq;@RWDCC3rx|0q&z)9q zti3V1d*B##%7=z1hEH&cWUd~@jiO>a+66NQFuMlo1hj<$+Q#=sC}`M@lSd%j+b9L)-=}Jj3d-wQH z`1~VP3?(pSkmtnNceXzQ?kF2=-Phayey`1)SxWOI{kI?glID>?_&zLrkVhB@5K17c zKyqb3FlFpVk&40C1{nr^fMk_=j_K!IJF#WdFM-&@8}<+QrBOCiT0KLM3N8e^%U*~* zR}2p3>kzQ&)HqUeerm2>xLIwI4{iklSO{j94H4`n+Z3{qf-zGq8-C+T-QeS zpvI{PS~T7sUmJxeam>uV&L`alh%rA2G4D7;w)yt>_1M-f2r_qJnGMw+3brm(W(TvuCMli4SAH9%xO|72lWTj>m)PqNIYMwc+0HJ_7vGlvr-d;ECl=4z6D;={sNY+{a1O}!Ue#UYq>SNxLP%jR-sgS z9CtRK!H4_L_6-!8F4+Gy-}_l!{su4Pv=xmXTY{V@3Goid3;P)+3uP}``7$3r$qV|5h7~ zb^FJeK?cGm?*;)}0~*(r&Bmr<{~!{k_bawy$m4t=f`Vv=$j^s%&bpf+?E`R)+ua@+ z5q@LROYVAeILv^!V_Goi7jueZwaOLCRMb*Qm|puw(X?WQY>PY>b3&(h4?@qV2Y1x3 zNP{!`vwU~hXTrf5m8xCOkOz2FOL4+03BjySGZv?OLe9(9)fW#A%26FC9vGO9?i5El z%@rc6xTh;YsICpmEDmv|pqSR3-b1h+N>U)+O{oEOx?sJqMHzy2O0FW=Y$6?tLzFcN zVk#~FPDZB;Jd;a{O&An|9l(ES2)u?NQWH@ct`L@?6w8c)%z}6=o{fQ{Gbm=8Li&UO zl(DxLWq8b8lFZ%Q8L+RPM>Fj2e^@O#X|f)4=53f8UUDODGchBR=JyFs?f>b;7D8$l5D$9a**KMBW`j z|0MF___1E+NnAR7nwFhW$_D5U9wh;B1jG&^cS}L$#x#g1ph%2@+afoX6$;dtv&*On zqqc&7)L0SQ+JBuF;TjSUR6+DB0s@<|4fYs< zzlVD_uS5|_EFYL|X))Kqam}4~|O{Fo3shCXrM_97Y z8mR|IoI-Im;(qfxON5Kx`(yJH_?^Vh`7$m|ED-2hqmZPHvZ+lh6QFR4o_VFvA#p7Z zO?Gt4XeK2Mk~nsD&44Icr9j87Q_?ymVTgpjIG4*v36Bwn=7x8rp zI`D-&u8Hj-E?o>}f-RhmYf|Ajns5IcF4M#I)4V^xON$qu2DkqgKHkBLPyij||4*1E zOP)B^O>EX7wfAWBgvb%x1Wb6WD_!dy5^&Csnw}W)$_I=;k7q_PFsGSR+EM0CFnV!(XzSf++CqJkyl!-+k-IFk8yOfcuip_&?1IfYklhy~ynU3AF`Qt$!){0p9-8OCFtrBC6vSI4=u>FNLp*}E!XPBG#tK`?v=;{8e1Y8_J}1nzr5)H zfohI#7bN4|5@a`{m8Ia6hef*Vzb%`LL}mw&%1qdTtOAm0`(NX;{iva@a26+#sYKCf za8OxrNJ;!hj;JNeLMFjqqFutV5xnbT-U6!A0lb%wzA=m#RVE{T}Q6zj!%^iyIe{`#8;d?0>^lSr=Ts_P^rgX+#H!=nB36){R0;5>h#x;mH^SM*a7A zR~Ztl;w~RtG|6adqF3^^0XNuYe& zFOO7+7Dn5!&xVcq?}&OK(H;kW*@ZU!I8+N!fy{X&zPG;&ei6CSQo&$Q)%+T`N1gWh zsBq;F*_)|rF(?lTXhTZ=P1#>>reVPhB}+77rnXX_i+nD+Y>2f)dJ78vwcJ`Bmh-#Y zyFgWN!%vViy`J2fM7v=s;HA74eJye=1@*(E;HD^)0I)`f0%g+7?Zvu%Z)9r(2Sn2Nmqk z8Z0a;upU(g%L@?m!d+*!e>LjO*}_GuDR$9;I?uqc*IJmJo$c9|Tx7B@Cvd|-|D3Tb zZV#`fsfZMIJ_K~I9t9M0l{z$-AzXOPcn!1lqtISz0Ic`2n^ZA$&)C3e+SWWgs2*gzB>hnr;UW*C znq?3Az}7pJ{)ST7YC!Mtp`G#ZExjSg9)q7+(*t#|+h|(uNSNkuL~`S1eR|1pN4p3L z2eb|0X>pgh6?PeO{439_&071$S_WP2l-@$XgYpU1N-K}+jVkbPPF(lNixH(_0!^Pi%|H(SD}cP+u` ztO(oY&YYPS$T(Ou@`va}j{Tv`hN6)GQRJy}zHYNr2uk5u@F)L$JEAoamt25*DGW04 zRsWCoq%#C3AHhwBE`lIL%;~@%`>LGn9U1)0VZV>xu!R2!+3N zDKCPz7&Pordj{2p))D)sorbtb#AYM_lGacjjspsJxtR;_5vW-ZlL=C`zZbbuK6Uew0fF9+MG!(31ISw#b8;STKs0@0u5nLK+Nw zM(~Xv3apeNnzAy3aD!Dkiz;fVBGBpp>vJODD-Kxz*qdI6xC zGjP^^-s>1Ki_&8R+S7lkxY7ALiJvo!i@({8HsW3=9dy*5KqEXg=LjUa5*k6{2$zy7 zhE|{?S6LqR+w^}c)H?Ag3AXO0n|v3V@N-@rz#DO!Devsm&0W0Ecj^dNI7*_l4NjdR z9ZX2CfCog;d;>_AmITwuz#N9bzB@{pxUwd~e6ChD22Sb!;B`1WrMjKp)|Bq;OsSp6 zy$g|KxK(O0`_1Jqg_`BhrPr*R&gC9_@9%hZ$Xqhz9hpl3*Z|i+{O}8_uW&w{p1tT# z=wVD%a6&r(dz07k^}Uk}#SM z@_;p$5e7%}4QAtVoxZGCR^qAY-EV;RRDoaQqhvm-&aB+icN&JB>}}sgC=+=Jxio5N zM%N7dV#_d$v5#Q+fok9qq?iqwTXdq@~(lkBMnkt<22EY;z8E;%x&?@2^(7Ln_O*CpJ+ zSdHiW{?ECrh_xcdcx(zJFU=b#qpiC%BOpLPuZY>un{&N(i+=^xs7N|_C8txalHwo? zUOw!0UmRl?79BGQ^S@lV;@(MBjl(cMl@)Z|Dp9k^*Wk|%JBk&XzFvWi*4b?bknumW zFC^c=-zUh=$D>KZW=_vmiJ{aJ_&HTvJf*=4&^N+V%_zn#Y!@;q7PiZyh%-^l@EK2W zdCKaiggoU?IPk&970V^oh<)6K$@(1^;{4SX82h> zA{+0Kh3~;0QM?%9Ak1=x!qqIO9diPH?vF@4+`v`%6BQ0GG;~0%1xBl2JSxj9TwaA5 zaABcZUc_!&5Roe^Z!c9$3m(-A3*sq1i;x%P=De`BD1z3A`$XZ9#^Tx|3x|&%onDyk ze2gEQZy@4@`fS;e{Qx6v&v}ee9DKP7kK4`iilt&{5E@7`#d#afXcQbFP=+Fs3RoPP zIPyEi?;Q35Ccu&$gwwQCp+?4?Wy7uhUrT zJFR;yeGFBpR0wJ-AZ!_yD&|D*6Fu=eyw<%F*e=ffz=e0XQCEnOkquPE-dgbu?w!KB z3jK@8S`lIBbZGRtH|dVc+3P+WKc_VS;r8L*(<1qu5m~t7wM%d_Tt8)JcbeR3OJSqB zsrJ3bSDhdXx{DgK^AjCWy1zZYt;y`vck46R^Um#0V`mhmIbI=98Jq=Cs*FE3_w2x|`SL&+>Z3yfk;gtGgVh@vRI%#3HaFW^@jr-t-s0e2`=5N+ukD{n#0(*`# zgB017CHXEyOsfr3ZJugn(Ix^AGMcZU33vsXmI9ljT6fsl1iAp44HDKU1>aC0Rib)0 zbQ8VuOQMPFY(~BIXcNs6GN}ctxiqVQNpK7teAa0c%6ceoYt_3Il=feIe4$j5t~;BHGD@?B7Z4^WF6zyLj8uY)b zzeYL%6eAaUL%QZg7VRBkju-sHxG6+lAR9owC<6~l1>rKW>RIKYwTf|T*b2S+GQR*Z zrBDDs)j*2uq$sFC6BW=_tE#DmO0;yC%U#wa(8vF|XR1bA~y@d=g zFE2jA1=>xAcCG$diL3KKE$(;XmA{(4hhyh6B1ptuvD(ol&-PZ zgp>q>3nK?i&ER!0&)OO01kmB46&`avtRd>YGphcZvzFK^Ey!BE)prp5gUZ>`0-;A} z#7)H1f@(S3y@eN7f!fQUHPz^xJ>`6`@aXL9F~nzc9Ka*BizRagli}}Qt{orah&yC> zo&rF;my3{z3KDBw82CK62Ckp-rAEap0HC5THIBFcMJ#6QB#DruCKBzJiJC3Q%PPbR zk(#x{y#OS)u<+6YM+?VaURVh8FEfSX0%`SnXnF&3A!UF@|@yZVx z=1vb-vhQH~MI?S}XvC$B64{AiZ^9#f)19%yfM2v$h;qV1$r+GQ1vCNJf#OqEq5~xf zfG4BHQMm5WiLykTS%5CJVYsGxhCj3*`%T3I07y2l*akiUyht3hK$}i%1*>pkK{c>@ zE{^Z?(v$k7K@%cFs#xy6X^l!e&b5r}!k14y6H zr?UqL=|qyGc7xv$?q-DvW(UObDqx#n>kJx2|H}i$(97>0K~f=a7kYGv>1lu#+Y?PN zS(qc|Xu&TM<_QOYnK}XwR+bS?(Hkj68)5u8UPuiP#zdwF#Ds-@J8;I^PKxB?XK43! zGe|S&tzKB8HldDq6$TGC1IR}UA)Fy(bE;T`o@ErgZyHMO@0H@rkC;j<|$IHf+unn#iRf zWX%2Y5@LXYmh_o+kU9uAy+P8`u3YJ~TMG|-knY3mMHr2Qv=s=R{C9&rt z+vJ@<0AW*S(;yIW6$E8LO#EiEt&h)RnJp4`QrxR~iU6UgB`RX>YL_qTJ?n{VNb!?) zM^M#*vtcbEy6loLdL);cjg8qQaLS=S+OzUcqKbNgaS@A8iqSiRx@86jb+ghI!%XL{ z!}ZC^S$Q*W4OzqH(Ccw)#2U56ARr&Nc3C6l@D~#j_6?3UI1^(6VWn}(2yFCsi;eyy z1m#o8gG&ag{1}*anL`eT>^e<=gJA>jreKD$<}Vx;K1Z0FFsPnp!OMt(r+7q}-2qquw_K*R*0htNNgowVEEy&La2g2XctMn)+!YaZ#69%$S8SF|0 zuBzB&VAw2Y34v@iC3?b>;{f{0SGm>+ChKqn>+sfP`%2}KaMn=C9XtNs2Oge%;N9F2#M73&uDW(>-TzT*PoKtvE3T(Q&QwylvJhUP^erM};rtV*M@Q^FEF0xAf&2Sq+10Eoe< zA1TmdhiQre#qyT21X)eWD5c~U1qwhCPDiSsf01#|s}88h3?JusbKoM0Q5_pYuRv+JD)h$wpfKE% zz<6<1@50MB;>8i@B5{rjg5zs7GIaN_EBMx}={zdp)MlVZzAqrQUs081QNo=#(b{tMK z6Kw-aBDs~K^#KCz0!Jd&T3P}_=vtQHrV2|ZgpoizvzV8|uP|qwA#*cWM2svg_pn)j z6DywO-}Q+7FTMMAMx`MdS{C2qH`A=Qon3~>GI9}(4YrXPM0bPN8`?o9S-&UY9)i#8;cK)50u4v?vo!!F4YMj~o)puE6RhfHdDQxGTf6`unFMg(>utP*npb2jU* zaR?6u4F!a^{J2l|_F*#Tm6E%Q@K#NR{7I7)!Xsd-f*~PdS8X8AKpal(%F%*^FAm9r zAUKg0cIakSt6-;65^0)KPP=*uOvuaKYIVLaPg+CHY|xM-g!L3y@kf9!sI#&j@hDm) zd{>Es>Q}jhImD#>KHz`hdEpqkMpz+m^(8%`(r&2qF+RlEKC#1jJnD(F`8Axt1=2{& z9)Q_3@!%Dg8Jm^dfY?C-&1sM5sEQYN9D5shg0Mivnl_mbb>rnL6{mILMH?bxJ&a^) z)GUa^Mvd(!wxGNNi4*!Tl=S+rjA{G7W6cEsjuqU%|LY(AGHhXfo{4k3T*iffX9KQT z_AfGp&_+%Hu)oZ_I4LaquQBmaT)eO_Utxj|#_UH3?)G@Y)(HtK_hZtaKHEsoKY_WquPo*}y$coZ8xt=dm_DtRrjV zaBpWE1=eBM1*FmEm6n>x_HHweXb=Yw;xe^$067zF1l2_BhID&B!<8z()%GaZ(A?G? z?K{|~t%E4HTY7ESi}=p&_QC5U>!VwDwuf<2(I?vn+jn9#?=Z2JRCFhJyiPK;eaG4! z{HE~Ri{C!@zS4|xwEVJXzeUj7mxY1VQV>4wD1+;4@IFRJp6ElYHg=HBUG$8oNJECq zIeIKO2!N|V$Ugro2tap3)Uu*BVX+>jSnekP(HMN{kGiS8;1VN#YOK!GzvG zb;H|tVxVqG+`F?@iJj@IS@KY%Zok4Kv%q!eG%*`HF9sKtx{oov(diw)TN||75;7CX zd34G>1xc5Qn`0*|6*cbgHuzxIE5!=3-q#3$-ugMy%wh ztQFkZA_Vg+J0U(-)S9Q)SG!pakKT2fJ*F5wv?dQjPDoKamxtM?-{0hTi+xbVrWB{o^a z4`hqj#}OAy^!K6; zMtg=M_?K|2h*^pt?EipE0#XN&JV+=MIaokZomv_g@cQq>K}p!3;`>Xye43YQyu6AF zBzZ!R{}K~AAsXN0BjduWI1V^998w2fQoIQ3k+i>wLPg~VY{1?5HXDdx*q$Oy?;$*6FwaGHpcyK5!mtqc`88#P|S?VRA_ zXZaE79tYok0%^r@uj&qLFYfWuy0*R>EfpucJdZje$5y=4OC7-H6i0pOrJk$ei1&&T zHaT|3-{&5-?n}JL!lReZ&&J4};3je}&fIv*K*;Dgj`@i*I5b!$@4r!fC;uX7bOGU^ zCsAS?XSEp&1eIVUy$NGjViQathXJPZMm&W;)MTo>|Gt?{llk(WgLi+L9O5xUNz|kA z<9HMQy`ch3g*L==bRGd)LVt=0LsW@=cA2Z}^}9J)5ApIaFAU%2?&g8srL%JvUYNgF zI`!Pii}TN)oqI}nTbulwJ6@W9?%C6GXN#wwFP(Yrd6tn?2XoGo&&{1V`;?zbFkxdO zN8~^kSrBjlg7{l4ZVvb0_)#-x(PNy9M^P-Dh-Zd}B=&VW-x;DeKCJt%P(d9)mH$-`!D`EZ4gj`rc!gB(A%!6XwkK!h%Uui;eD1i|J zQJbMh0h1Zjqoe|Q6wZcy2QTz{b7Mibg9z_Nmq z_$g;&(UPo1OGZae@*J85B4x^3p?41oD-)m5kf2O#e+?QV3Bg4 z^3C-e7!FjanO14)t9j|Jhu2K`0VBKW)(gYU{)7CvDK~kctJbG zfM|pO%vvcgfyei>M~WpDCTWq@9wzP4p!>mr-u^o2QG=YmQl{_oBVoJZ#~*)OIgtd+ zcVIdu$1IC=kL(JXh#!^5Fmmv}Mi~LC(l5$t?8%gc&#DTKg{$ti&OdzrB6RdKR36W4 z)@cA^-XC@*X-3L&q&nQC-eXWF*0+ikZn7qFtepggjyROm%S58t>@d6!5#6ycNbS zsURlKU<6$X1(GtlR&cPprC;SoI!uxu6ahzy(;FDW1Rt?9ucc)~mm5|vyi|pSE^}0* zxXve#CXs8q$Q3z$;3A0=68Mq($!GO&L4TIa^XMKP=)-fZr@&3cp{|NV?i|8f>2y9# zZUtwG#QJh4U$VI$xdFDwL)a!KPnSDxIBGrMtycZTP=YSTZ2!&jz%){vP43o&Jmo4Yg% zF>LPDW_}~)q`3!gjS5pS2Bu=4E8%}su5sX87_>4H(Qc*cb8c+`N5|)&2>) zgqz5jrQNuCe|O-9s$pRteh~Ru#ha&!SF;aLK?+e`5QT0xD}kW#0cGF_2>96RBOv5= z%gKb3!cM4@_$KUjdk@wM)PA@kth~Ax8=m-ekosUXru4Vo+^Hna;zNt19;lVoA>7-j zVT*?bjG8H&SIG1D_HM(t`M+|Pi)`(O`V+P~>#+$~DaxDs&18EQj{TX!F+aQ7`zLs0 z%@ov~QK!}3Lq-PtzE*Z_9cWMKuiykh6Au5!b#m1NB`c=y%woPqUUG)qY=wzK^QX z<7>3>=X?~vFeK=dj(3Jf`LWI(8;-x>MIAnAz`kZMVcYE6&WnaF>p;usZ2#-BLE@f{v4I=KZ6eMjv~{z3#RN?{*$ zGeC%&qG+>!4tZbuA`iYM3yC14l)Sh})tJ6&^6|bBn7_+!bctc8!$%Cj&6=q1-RK!N zhk8poK95cNh`wLA;}+yK0mb_ZXtAoNpP_;b1CMnT6+Dg?Lv$b|)p6WJxw^4h=0Vx? z1=Z3mb=agxcPQ8?rnK0mxEKCr2sZ@(B%hw}!q3n2 zJxlny|3~`+vhB36ki(u}XBEYk@m2J7LtI%`Hwt%<#o=J8fMhnD?V!V2^MDD_~y@5V25kjOTs=QRwyK1f8o zvqZcw2ZsQ7-Qk>Bo@N1WLTz7E%nB7KMs7EvZhFxw*Gd&rpGGS-ABXXg4{(IIbZ~@M zq;Q2uwpNeaI(JrNLe;x-_GQO6#f!TASs)8k#nAnu_- zQ99x2rX4fw``F4=jE}vus8m4Svn>1?3)gt}3NNqoQ93M_Pw-Jj|0U%97)D+jdB7>b zxQXDj*VN+=hwIs0Hfw6V`)!L;`-o0?bP)eS;Y4p=bZX|rYdZgW+}v%Bnd4e~N}=g4 za{^w{9kJ;iB{qd5`#j9zEc4v?j@Z=x7&_QvK7gT9Tkm!xr1s048~V%*w!P>q!=UUl z(8BM8?DIUo8kBtozxSU)_IY8a2J9gFybIG|zYhbd9bg1FEpc0TfBSGVil2x+8FN~D zyukCt7kv)(SWZyfnelxeMj-eehPA^kITLI`gq;&WWV_qr_B<^Y0D~vtXF{8XPm9&W z9*X!vRufs@I${#SOP2i2CW0!9HwT2MuyrunqYyvJ9mJ0iU($1Bovx2L2tB7vSVutk zI0@n77)6i}K2ElWoNu&uL%g^PdIV4OxECjrvHq{OCy}#P1dn?mc+A7A1LvRNgv?!Q z!|-H@0~=(^zlb66iNwEjQH+vR43NR}7q;{WSp43Qt5QV!Y{*36oe{QZpF}hCxo=b0 z;u{OxT-w6$*I&W9R(7}4^Qhy{ukr3FOlp7x`W2>iY;F5d@}%4C1-?cjTp`|X#Fg5# z%v9g>R-e#i@2oKu``SgmNJ7Ilc@YH#VSS*oeH{wymU|=g`L7J>lEqIg^RmLrS6R*{ zzU_}PjnuWh%F91tu3$*H2!9RS655ZY6| zH}30A-jz@^-I8Ne?(uo?73U1OMs58!?CNzt3 zDlZRrfiFGO9mX195xm_H!u@&_PRs&LwhSEql4`>z4|Z7cKAw3%JJcGl$G4K|aF})y z}6mdA|d?H85NCFVhyQfWTO4T653n1 z1C978CUDt8@y;G9-ml6#4!5$)7B9~XNp*1rcH@XoRJA|Je2&a?s(aB?IhUV77HpW8 zmqCrZ1Gx5`7Nd_#pfE1l~`_f<&Ls zE5fq3uiiXN{{8#mXKdEidbSVYClYec3?U#TA7Fj!MbYWirRUL1{z-d1U3$;}*FubW zDEbgJ0@`xXoik_va3HnnNQ1#`R&?V$@j2tl0T-CPIhbNx)Rq_*e5SjH#^ZM6&pC=q zw-)?%ZEtzN$Svr;{|=`~u_4;__CAjEz{dO3pcs%UUGP#4?e_lYp1@~;C%=Ik9OsZ* zPud5YI7`bkn#OemhG^%uVVwzz27wUF7-)wh?W|BofGjv4oTqpP?Mg`-;Xns%SIYCk zK|yV&+C!*e7^lCEz><`;d>;-ii{R|o(e^M^s4$SG^{6OQVLdw1&N_cv&gsr0*n-%m zip}VVy!++$2&k7apyY|5g~f)J0q&TWmCWY0B0we z^Wdrx&!x7}gdF_P*?QqD%)$^aaHG;{m0{dzUxH60h{t6)@Pv+d46lJOR9=8aMs~0i6A7^P5Ny}3_|*D zLIr9wQuJjwKWg_>cpplsDpjj}su#sQZL5REjH;&yI~n|Tk16B*8_hnpmc6Eo-Y1TN ztDpk-v2F|G!!I3wdAe7uuSC{pn67fROQ25fg3Alh4gsLC!+A;uW8yyWa^cayV=aQ3#~?}hTTLO+*Hy$MwiRT24N6! zht5a{nHN=}8{24X^p)Iqg!VLv3()%G-DKORnZLw~?+<79A@1Fzmc)LQMajY!dxPWa zbKi>Wj?njmNc&$Vf_)M-MegB2wxEB$6_4W>TafEME#%$77&47mV2ZHkA-7!wy_ks& z1Vr9xfZk!XqzR|n%ClC3Yw zk_%980efmMn2cvtp^mI#*h8V|k0G8sU zmgPYBL`rbb+0KXjv~C2>W_7bF$HrS&ku@l;uhMoZrJmOThA)c4T}R(vD%DBt<*Y zCn*QtBuKa5n?zrvJZMMW#pMoBY2c%zV!mvn4)CrBFZRi167=Kn;|qcbSr48BaM(Dx&>U-M(>B& ztNYL?nCxf6uEl)>5Pyop1A(>vW;fOnr?hgR2G67lkBLGwY*4UW2wNhGnOa0lSl;lvVwmU(K5rOEIu>eE{Kz2Z<>Hw4;R2o={Gj#dn@pnLe3i$>yy>QGp>9r-N+zAJ* z4g)a}WTq};wx*%Q#LSD`G+Q{_a|BNJApzw@aDpA4R_$J8HS-JHz{2)*n7trIe3tn? z!wa>;w>*|l zS%Y*;o75q*n)r{>dCp8o+zk-w@WP=}ol*mI;%?;JeHwF$r*N3-6SI%OIF#zlAPyxu z@K`^%C@Hr%ggSp4?4Ou|Y+;rLnu7dePPs6^hy$92Bb;ztzQP8pa+mB15({cAV{ zVLRUjFMU0RAQbR8YmW!7qft z8wP_n;$frKpS%;OsG9`2J#Me>F1;m$W7mxvF9g zXRcEZ%n>UN`45!Rb{<6@I?xQ@x3~idh9Y}!&VbR>lY10s23D}6i0hk2Jl`H>?9dMT zw;xXFIpkAx#j6~>E~#OM9h0$rmayeDmK!KK{3MF}efIG%ilsA2GC2jP&S2i$sL$YJ?#npHlM$dO4un8)-6~xC z&?XEH4twH7`J{*#{peAeKI(^&7_UNyG{K=~Y!M1rl)UrIKcq+Egf|d3K}k=bHMfqEd>Lf;%nc%;(3u;ui@O**U)tzqUn%Qw zM>mNebq}mqJcdqC7etPC>4o3Z(ZOiTlm2q#_u%KFEym`%jR;7THj>pr-M5vl(BH+dqgWRXT;7bRg#! zBq|c?NuzF&L;{&HSXiu$n9R_;r#ttMIgX24Gb{}?lf_6%qlhVOz}9erk&?{xeu=9D z)@v4JRAi-&?cbz|tOVxkh{RT!6m*9JfuKM4Vk;>PdPZU^Wy^QP0_>OU5S*9d2(jiz zhw0Xs1w!`p`efcJvh%)DI4qb!qMr&6B)e2^Y{6bt=(J=56L^GOHwWX$5Wc(M_lSZ> zfTtQ53jzp!u%oI}ESOg3Yk$-(X#yYw&g@a+Lurx2us5o)p)Y!`NgyUUat!A-)S-C8 zN6Sk~jZGZ0*4rJy?tVRhOMl68LCE;snJ5&36jDc7N}EY|B3uJ7Qr=D~&WxK^+eo1l zH_K^M_W^omsGFwxENh`&(oH#7ffh{Pp2m|f;cilvY)Lsp5fTP!+H(t(lRMJ2vx`Ha z#by5r>+D#Fil$YpCWWZ_9NYF9FE8R!9MZ1E@P2^}5xfpqNlsH)q_Vk+0i)Z~6uMudwpJ&CC0Exq?(TXO^2~p{{TO2JQ7h zWnbL@)uax5r>at!P(mN9Daex21r296#0_^>~P-|{8K1y*s^nxmP zkQwg^Wk{=>*+r1Pvq~$bkNYk$$-WbBxcT54?k+kozz}1Rc;jlxvh9ZLX6aS|u&{#1 z&$A5=^CHWN2A;)35*7Bs^QY&ZKXG>M^r_PM=T4nIS32?h$*0eL_;l&q*^Be`$N8<~ zc{xJWmXp%HpOyS7FTcslKjY1/3: + idx = torch.multinomial(weights[b], 1).item() + if torch.rand(1)>0.5: + image_inputs[0].append(image_embeds[b]) + image_inputs[1].append(image_embeds[idx]) + labels.append(0) + else: + image_inputs[1].append(image_embeds[b]) + image_inputs[0].append(image_embeds[idx]) + labels.append(1) + else: + idx = torch.multinomial(weights[b], 2) + image_inputs[0].append(image_embeds[idx[0]]) + image_inputs[1].append(image_embeds[idx[1]]) + labels.append(2) + + image_inputs[0] = torch.stack(image_inputs[0],dim=0) + image_inputs[1] = torch.stack(image_inputs[1],dim=0) + labels = torch.LongTensor(labels).to(image.device) + + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_inputs, + encoder_attention_mask = [image_atts,image_atts], + return_dict = True, + ) + + pred = self.ta_head(output.last_hidden_state[:,0,:]) + loss = F.cross_entropy(pred, labels) + + return loss + + + + def share_cross_attention(self, model): + + for i in range(6): + layer_num = 6+i*2 + modules_0 = model.layer[layer_num].crossattention.self._modules + modules_1 = model.layer[layer_num+1].crossattention.self._modules + + for name in modules_0.keys(): + if 'key' in name or 'value' in name: + module_0 = modules_0[name] + module_1 = modules_1[name] + if hasattr(module_0, "weight"): + module_0.weight = module_1.weight + if hasattr(module_0, "bias"): + module_0.bias = module_1.bias \ No newline at end of file diff --git a/models/model_retrieval.py b/models/model_retrieval.py new file mode 100644 index 0000000..f27f7e6 --- /dev/null +++ b/models/model_retrieval.py @@ -0,0 +1,217 @@ +from functools import partial +from models.vit import VisionTransformer +from models.xbert import BertConfig, BertModel + +import torch +from torch import nn +import torch.nn.functional as F + +class ALBEF(nn.Module): + def __init__(self, + text_encoder = None, + tokenizer = None, + config = None, + ): + super().__init__() + + self.tokenizer = tokenizer + self.distill = config['distill'] + embed_dim = config['embed_dim'] + vision_width = config['vision_width'] + self.visual_encoder = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + + bert_config = BertConfig.from_json_file(config['bert_config']) + self.text_encoder = BertModel.from_pretrained(text_encoder, config=bert_config, add_pooling_layer=False) + + text_width = self.text_encoder.config.hidden_size + self.vision_proj = nn.Linear(vision_width, embed_dim) + self.text_proj = nn.Linear(text_width, embed_dim) + + self.temp = nn.Parameter(torch.ones([]) * config['temp']) + self.queue_size = config['queue_size'] + self.momentum = config['momentum'] + self.itm_head = nn.Linear(text_width, 2) + + # create momentum models + self.visual_encoder_m = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + self.vision_proj_m = nn.Linear(vision_width, embed_dim) + self.text_encoder_m = BertModel.from_pretrained(text_encoder, config=bert_config, add_pooling_layer=False) + self.text_proj_m = nn.Linear(text_width, embed_dim) + + self.model_pairs = [[self.visual_encoder,self.visual_encoder_m], + [self.vision_proj,self.vision_proj_m], + [self.text_encoder,self.text_encoder_m], + [self.text_proj,self.text_proj_m], + ] + self.copy_params() + + # create the queue + self.register_buffer("image_queue", torch.randn(embed_dim, self.queue_size)) + self.register_buffer("text_queue", torch.randn(embed_dim, self.queue_size)) + self.register_buffer("idx_queue", torch.full((1,self.queue_size),-100)) + self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long)) + + self.image_queue = nn.functional.normalize(self.image_queue, dim=0) + self.text_queue = nn.functional.normalize(self.text_queue, dim=0) + + + def forward(self, image, text, alpha, idx): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1) + text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, + return_dict = True, mode = 'text') + text_embeds = text_output.last_hidden_state + text_feat = F.normalize(self.text_proj(text_embeds[:,0,:]),dim=-1) + + idx = idx.view(-1,1) + idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()],dim=1) + pos_idx = torch.eq(idx, idx_all).float() + sim_targets = pos_idx / pos_idx.sum(1,keepdim=True) + + with torch.no_grad(): + self._momentum_update() + image_embeds_m = self.visual_encoder_m(image) + image_feat_m = F.normalize(self.vision_proj_m(image_embeds_m[:,0,:]),dim=-1) + image_feat_all = torch.cat([image_feat_m.t(),self.image_queue.clone().detach()],dim=1) + text_output_m = self.text_encoder_m(text.input_ids, attention_mask = text.attention_mask, + return_dict = True, mode = 'text') + text_feat_m = F.normalize(self.text_proj_m(text_output_m.last_hidden_state[:,0,:]),dim=-1) + text_feat_all = torch.cat([text_feat_m.t(),self.text_queue.clone().detach()],dim=1) + + if self.distill: + sim_i2t_m = image_feat_m @ text_feat_all / self.temp + sim_t2i_m = text_feat_m @ image_feat_all / self.temp + + sim_i2t_targets = alpha * F.softmax(sim_i2t_m, dim=1) + (1 - alpha) * sim_targets + sim_t2i_targets = alpha * F.softmax(sim_t2i_m, dim=1) + (1 - alpha) * sim_targets + + sim_i2t = image_feat @ text_feat_all / self.temp + sim_t2i = text_feat @ image_feat_all / self.temp + + if self.distill: + loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_i2t_targets,dim=1).mean() + loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_t2i_targets,dim=1).mean() + else: + loss_i2t = -torch.sum(F.log_softmax(sim_i2t, dim=1)*sim_targets,dim=1).mean() + loss_t2i = -torch.sum(F.log_softmax(sim_t2i, dim=1)*sim_targets,dim=1).mean() + + loss_ita = (loss_i2t+loss_t2i)/2 + + self._dequeue_and_enqueue(image_feat_m, text_feat_m, idx) + + ###=================================### + # forward the positve image-text pair + output_pos = self.text_encoder(encoder_embeds = text_embeds, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + mode = 'fusion', + ) + with torch.no_grad(): + bs = image.size(0) + weights_i2t = F.softmax(sim_i2t[:,:bs]+1e-4,dim=1) + weights_t2i = F.softmax(sim_t2i[:,:bs]+1e-4,dim=1) + + mask = torch.eq(idx, idx.T) + weights_i2t.masked_fill_(mask, 0) + weights_t2i.masked_fill_(mask, 0) + + # select a negative image for each text + image_embeds_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_t2i[b], 1).item() + image_embeds_neg.append(image_embeds[neg_idx]) + image_embeds_neg = torch.stack(image_embeds_neg,dim=0) + + # select a negative text for each image + text_embeds_neg = [] + text_atts_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_i2t[b], 1).item() + text_embeds_neg.append(text_embeds[neg_idx]) + text_atts_neg.append(text.attention_mask[neg_idx]) + text_embeds_neg = torch.stack(text_embeds_neg,dim=0) + text_atts_neg = torch.stack(text_atts_neg,dim=0) + + text_embeds_all = torch.cat([text_embeds, text_embeds_neg],dim=0) + text_atts_all = torch.cat([text.attention_mask, text_atts_neg],dim=0) + + image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0) + image_atts_all = torch.cat([image_atts,image_atts],dim=0) + + output_neg = self.text_encoder(encoder_embeds = text_embeds_all, + attention_mask = text_atts_all, + encoder_hidden_states = image_embeds_all, + encoder_attention_mask = image_atts_all, + return_dict = True, + mode = 'fusion', + ) + + vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0) + vl_output = self.itm_head(vl_embeddings) + + itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)], + dim=0).to(image.device) + loss_itm = F.cross_entropy(vl_output, itm_labels) + + return loss_ita, loss_itm + + + + @torch.no_grad() + def copy_params(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data.copy_(param.data) # initialize + param_m.requires_grad = False # not update by gradient + + + @torch.no_grad() + def _momentum_update(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum) + + + @torch.no_grad() + def _dequeue_and_enqueue(self, image_feat, text_feat, idx): + # gather keys before updating queue + image_feats = concat_all_gather(image_feat) + text_feats = concat_all_gather(text_feat) + idxs = concat_all_gather(idx) + + batch_size = image_feats.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_size % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.image_queue[:, ptr:ptr + batch_size] = image_feats.T + self.text_queue[:, ptr:ptr + batch_size] = text_feats.T + self.idx_queue[:, ptr:ptr + batch_size] = idxs.T + ptr = (ptr + batch_size) % self.queue_size # move pointer + + self.queue_ptr[0] = ptr + + +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + tensors_gather = [torch.ones_like(tensor) + for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output + diff --git a/models/model_ve.py b/models/model_ve.py new file mode 100644 index 0000000..d659842 --- /dev/null +++ b/models/model_ve.py @@ -0,0 +1,110 @@ +from functools import partial +from models.vit import VisionTransformer +from models.xbert import BertConfig, BertModel + +import torch +from torch import nn +import torch.nn.functional as F + +class ALBEF(nn.Module): + def __init__(self, + text_encoder = None, + tokenizer = None, + config = None, + ): + super().__init__() + + self.tokenizer = tokenizer + self.distill = config['distill'] + + self.visual_encoder = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + + bert_config = BertConfig.from_json_file(config['bert_config']) + + self.text_encoder = BertModel.from_pretrained(text_encoder, config=bert_config, add_pooling_layer=False) + + self.cls_head = nn.Sequential( + nn.Linear(self.text_encoder.config.hidden_size, self.text_encoder.config.hidden_size), + nn.ReLU(), + nn.Linear(self.text_encoder.config.hidden_size, 3) + ) + + if self.distill: + self.visual_encoder_m = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + self.text_encoder_m = BertModel.from_pretrained(text_encoder, config=bert_config, add_pooling_layer=False) + self.cls_head_m = nn.Sequential( + nn.Linear(self.text_encoder.config.hidden_size, self.text_encoder.config.hidden_size), + nn.ReLU(), + nn.Linear(self.text_encoder.config.hidden_size, 3) + ) + + self.model_pairs = [[self.visual_encoder,self.visual_encoder_m], + [self.text_encoder,self.text_encoder_m], + [self.cls_head,self.cls_head_m], + ] + self.copy_params() + self.momentum = 0.995 + + + def forward(self, image, text, targets, alpha=0, train=True): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + if train: + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True + ) + prediction = self.cls_head(output.last_hidden_state[:,0,:]) + if self.distill: + with torch.no_grad(): + self._momentum_update() + image_embeds_m = self.visual_encoder_m(image) + output_m = self.text_encoder_m(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds_m, + encoder_attention_mask = image_atts, + return_dict = True + ) + prediction_m = self.cls_head_m(output_m.last_hidden_state[:,0,:]) + + loss = (1-alpha)*F.cross_entropy(prediction, targets) - alpha*torch.sum( + F.log_softmax(prediction, dim=1)*F.softmax(prediction_m, dim=1),dim=1).mean() + else: + loss = F.cross_entropy(prediction, targets) + return loss + + else: + output = self.text_encoder(text.input_ids, + attention_mask = text.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True + ) + prediction = self.cls_head(output.last_hidden_state[:,0,:]) + return prediction + + + + @torch.no_grad() + def copy_params(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data.copy_(param.data) # initialize + param_m.requires_grad = False # not update by gradient + + + @torch.no_grad() + def _momentum_update(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum) + + diff --git a/models/model_vqa.py b/models/model_vqa.py new file mode 100644 index 0000000..c445a63 --- /dev/null +++ b/models/model_vqa.py @@ -0,0 +1,214 @@ +from functools import partial +from models.vit import VisionTransformer +from models.xbert import BertConfig, BertModel, BertLMHeadModel + +import torch +from torch import nn +import torch.nn.functional as F + +import numpy as np + +class ALBEF(nn.Module): + def __init__(self, + text_encoder = None, + text_decoder = None, + tokenizer = None, + config = None, + ): + super().__init__() + + self.tokenizer = tokenizer + self.distill = config['distill'] + + self.visual_encoder = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + + config_encoder = BertConfig.from_json_file(config['bert_config']) + self.text_encoder = BertModel.from_pretrained(text_encoder, config=config_encoder, add_pooling_layer=False) + + config_decoder = BertConfig.from_json_file(config['bert_config']) + config_decoder.fusion_layer = 0 + config_decoder.num_hidden_layers = 6 + self.text_decoder = BertLMHeadModel.from_pretrained(text_decoder, config=config_decoder) + + if self.distill: + self.visual_encoder_m = VisionTransformer( + img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12, + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)) + self.text_encoder_m = BertModel.from_pretrained(text_encoder, config=config_encoder, add_pooling_layer=False) + self.text_decoder_m = BertLMHeadModel.from_pretrained(text_decoder, config=config_decoder) + self.model_pairs = [[self.visual_encoder,self.visual_encoder_m], + [self.text_encoder,self.text_encoder_m], + [self.text_decoder,self.text_decoder_m], + ] + self.copy_params() + self.momentum = 0.995 + + + def forward(self, image, quesiton, answer=None, alpha=0, k=None, weights=None, train=True): + + image_embeds = self.visual_encoder(image) + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) + + if train: + ''' + k: number of answers for each question + weights: weight for each answer + ''' + answer_targets = answer.input_ids.masked_fill(answer.input_ids == self.tokenizer.pad_token_id, -100) + + question_output = self.text_encoder(quesiton.input_ids, + attention_mask = quesiton.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True) + + question_states = [] + question_atts = [] + for b, n in enumerate(k): + question_states += [question_output.last_hidden_state[b]]*n + question_atts += [quesiton.attention_mask[b]]*n + question_states = torch.stack(question_states,0) + question_atts = torch.stack(question_atts,0) + + if self.distill: + with torch.no_grad(): + self._momentum_update() + image_embeds_m = self.visual_encoder_m(image) + question_output_m = self.text_encoder_m(quesiton.input_ids, + attention_mask = quesiton.attention_mask, + encoder_hidden_states = image_embeds_m, + encoder_attention_mask = image_atts, + return_dict = True) + + question_states_m = [] + for b, n in enumerate(k): + question_states_m += [question_output_m.last_hidden_state[b]]*n + question_states_m = torch.stack(question_states_m,0) + + logits_m = self.text_decoder_m(answer.input_ids, + attention_mask = answer.attention_mask, + encoder_hidden_states = question_states_m, + encoder_attention_mask = question_atts, + return_logits = True, + ) + + answer_output = self.text_decoder(answer.input_ids, + attention_mask = answer.attention_mask, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + labels = answer_targets, + return_dict = True, + soft_labels = F.softmax(logits_m,dim=-1), + alpha = alpha, + reduction = 'none', + ) + else: + answer_output = self.text_decoder(answer.input_ids, + attention_mask = answer.attention_mask, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + labels = answer_targets, + return_dict = True, + reduction = 'none', + ) + loss = weights * answer_output.loss + loss = loss.sum()/image.size(0) + + return loss + + + else: + question_output = self.text_encoder(quesiton.input_ids, + attention_mask = quesiton.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True) + topk_ids, topk_probs = self.rank_answer(question_output.last_hidden_state, quesiton.attention_mask, + answer.input_ids, answer.attention_mask, k) + return topk_ids, topk_probs + + + + @torch.no_grad() + def copy_params(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data.copy_(param.data) # initialize + param_m.requires_grad = False # not update by gradient + + + @torch.no_grad() + def _momentum_update(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()): + param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum) + + + def rank_answer(self, question_states, question_atts, answer_ids, answer_atts, k): + + num_ques = question_states.size(0) + start_ids = answer_ids[0,0].repeat(num_ques,1) # bos token + + start_output = self.text_decoder(start_ids, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + return_dict = True, + reduction = 'none') + logits = start_output.logits[:,0,:] # first token's logit + + # topk_probs: top-k probability + # topk_ids: [num_question, k] + answer_first_token = answer_ids[:,1] + prob_first_token = F.softmax(logits,dim=1).index_select(dim=1, index=answer_first_token) + topk_probs, topk_ids = prob_first_token.topk(k,dim=1) + + # answer input: [num_question*k, answer_len] + input_ids = [] + input_atts = [] + for b, topk_id in enumerate(topk_ids): + input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) + input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) + input_ids = torch.cat(input_ids,dim=0) + input_atts = torch.cat(input_atts,dim=0) + + targets_ids = input_ids.masked_fill(input_ids == self.tokenizer.pad_token_id, -100) + + # repeat encoder's output for top-k answers + question_states = tile(question_states, 0, k) + question_atts = tile(question_atts, 0, k) + + output = self.text_decoder(input_ids, + attention_mask = input_atts, + encoder_hidden_states = question_states, + encoder_attention_mask = question_atts, + labels = targets_ids, + return_dict = True, + reduction = 'none') + + answer_loss = output.loss + answer_loss = answer_loss.view(input_ids.size(0),-1) + + # topk_prob: first token probability + topk_probs = topk_probs.view(-1,1) + log_probs = torch.cat([topk_probs.log(), -answer_loss],dim=1) + + # re-calculate log probabilities for the answer sequences using chain rule + log_probs_sum = log_probs.sum(1) + log_probs_sum = log_probs_sum.view(num_ques,k) + + topk_probs = F.softmax(log_probs_sum, dim=-1) + # get top-k after re-ranking + topk_probs, rerank_id = topk_probs.topk(k,dim=1) + topk_ids = torch.gather(topk_ids, 1, rerank_id) + + return topk_ids, topk_probs + +def tile(x, dim, n_tile): + init_dim = x.size(dim) + repeat_idx = [1] * x.dim() + repeat_idx[dim] = n_tile + x = x.repeat(*(repeat_idx)) + order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])) + return torch.index_select(x, dim, order_index.to(x.device)) diff --git a/models/tokenization_bert.py b/models/tokenization_bert.py new file mode 100644 index 0000000..2017be7 --- /dev/null +++ b/models/tokenization_bert.py @@ -0,0 +1,539 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for Bert.""" + + +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", + "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", + "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", + "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", + "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt", + "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", + "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", + "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", + "bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt", + "bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt", + "bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt", + "bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt", + "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt", + "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", + "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt", + "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt", + "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt", + "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt", + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "bert-base-uncased": 512, + "bert-large-uncased": 512, + "bert-base-cased": 512, + "bert-large-cased": 512, + "bert-base-multilingual-uncased": 512, + "bert-base-multilingual-cased": 512, + "bert-base-chinese": 512, + "bert-base-german-cased": 512, + "bert-large-uncased-whole-word-masking": 512, + "bert-large-cased-whole-word-masking": 512, + "bert-large-uncased-whole-word-masking-finetuned-squad": 512, + "bert-large-cased-whole-word-masking-finetuned-squad": 512, + "bert-base-cased-finetuned-mrpc": 512, + "bert-base-german-dbmdz-cased": 512, + "bert-base-german-dbmdz-uncased": 512, + "TurkuNLP/bert-base-finnish-cased-v1": 512, + "TurkuNLP/bert-base-finnish-uncased-v1": 512, + "wietsedv/bert-base-dutch-cased": 512, +} + +PRETRAINED_INIT_CONFIGURATION = { + "bert-base-uncased": {"do_lower_case": True}, + "bert-large-uncased": {"do_lower_case": True}, + "bert-base-cased": {"do_lower_case": False}, + "bert-large-cased": {"do_lower_case": False}, + "bert-base-multilingual-uncased": {"do_lower_case": True}, + "bert-base-multilingual-cased": {"do_lower_case": False}, + "bert-base-chinese": {"do_lower_case": False}, + "bert-base-german-cased": {"do_lower_case": False}, + "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, + "bert-large-cased-whole-word-masking": {"do_lower_case": False}, + "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, + "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, + "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, + "bert-base-german-dbmdz-cased": {"do_lower_case": False}, + "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, + "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, + "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, + "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, +} + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class BertTokenizer(PreTrainedTokenizer): + r""" + Construct a BERT tokenizer. Based on WordPiece. + This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. + Users should refer to this superclass for more information regarding those methods. + Args: + vocab_file (:obj:`str`): + File containing the vocabulary. + do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to do basic tokenization before WordPiece. + never_split (:obj:`Iterable`, `optional`): + Collection of tokens which will never be split during tokenization. Only has an effect when + :obj:`do_basic_tokenize=True` + unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to tokenize Chinese characters. + This should likely be deactivated for Japanese (see this `issue + `__). + strip_accents: (:obj:`bool`, `optional`): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for :obj:`lowercase` (as in the original BERT). + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """ Converts a sequence of tokens (string) in a single string. """ + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + - single sequence: ``[CLS] X `` + - pair of sequences: ``[CLS] A [SEP] B [SEP]`` + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + Returns: + :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` method. + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether or not the token list is already formatted with special tokens for the model. + Returns: + :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + if token_ids_1 is not None: + raise ValueError( + "You should not supply a second sequence if the provided sequence of " + "ids is already formatted with special tokens for the model." + ) + return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + :: + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). + Args: + token_ids_0 (:obj:`List[int]`): + List of IDs. + token_ids_1 (:obj:`List[int]`, `optional`): + Optional second list of IDs for sequence pairs. + Returns: + :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given + sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + "Saving vocabulary to {}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!".format(vocab_file) + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + Args: + do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to lowercase the input when tokenizing. + never_split (:obj:`Iterable`, `optional`): + Collection of tokens which will never be split during tokenization. Only has an effect when + :obj:`do_basic_tokenize=True` + tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`): + Whether or not to tokenize Chinese characters. + This should likely be deactivated for Japanese (see this `issue + `__). + strip_accents: (:obj:`bool`, `optional`): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for :obj:`lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + Args: + **never_split**: (`optional`) list of str + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + :func:`PreTrainedTokenizer.tokenize`) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`. + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer`. + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/models/vit.py b/models/vit.py new file mode 100644 index 0000000..d0f7f24 --- /dev/null +++ b/models/vit.py @@ -0,0 +1,202 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint diff --git a/models/xbert.py b/models/xbert.py new file mode 100644 index 0000000..77df5b1 --- /dev/null +++ b/models/xbert.py @@ -0,0 +1,1916 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch BERT model. """ + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss, MSELoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + +import transformers +transformers.logging.set_verbosity_error() + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "BertConfig" +_TOKENIZER_FOR_DOC = "BertTokenizer" + +BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "bert-base-uncased", + "bert-large-uncased", + "bert-base-cased", + "bert-large-cased", + "bert-base-multilingual-uncased", + "bert-base-multilingual-cased", + "bert-base-chinese", + "bert-base-german-cased", + "bert-large-uncased-whole-word-masking", + "bert-large-cased-whole-word-masking", + "bert-large-uncased-whole-word-masking-finetuned-squad", + "bert-large-cased-whole-word-masking-finetuned-squad", + "bert-base-cased-finetuned-mrpc", + "bert-base-german-dbmdz-cased", + "bert-base-german-dbmdz-uncased", + "cl-tohoku/bert-base-japanese", + "cl-tohoku/bert-base-japanese-whole-word-masking", + "cl-tohoku/bert-base-japanese-char", + "cl-tohoku/bert-base-japanese-char-whole-word-masking", + "TurkuNLP/bert-base-finnish-cased-v1", + "TurkuNLP/bert-base-finnish-uncased-v1", + "wietsedv/bert-base-dutch-cased", + # See all BERT models at https://huggingface.co/models?filter=bert +] + + +def load_tf_weights_in_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info("Skipping {}".format("/".join(name))) + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + + self.has_cross_attention = (layer_num >= config.fusion_layer) + if self.has_cross_attention: + self.layer_num = layer_num + self.crossattention = BertAttention(config, is_cross_attention=True) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if self.has_cross_attention: + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + if type(encoder_hidden_states) == list: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)], + encoder_attention_mask[(self.layer_num-self.config.fusion_layer)%len(encoder_hidden_states)], + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] + + else: + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multi_modal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + + if mode=='text': + start_layer = 0 + output_layer = self.config.fusion_layer + + elif mode=='fusion': + start_layer = self.config.fusion_layer + output_layer = self.config.num_hidden_layers + + elif mode=='multi_modal': + start_layer = 0 + output_layer = self.config.num_hidden_layers + + for i in range(start_layer, output_layer): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " + "`use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +@dataclass +class BertForPreTrainingOutput(ModelOutput): + """ + Output type of :class:`~transformers.BertForPreTraining`. + Args: + loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): + Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape :obj:`(batch_size, sequence_length, hidden_size)`. + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): + Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, + sequence_length, sequence_length)`. + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +BERT_START_DOCSTRING = r""" + This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic + methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, + pruning heads etc.) + This model is also a PyTorch `torch.nn.Module `__ + subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to + general usage and behavior. + Parameters: + config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model + weights. +""" + +BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): + Indices of input sequence tokens in the vocabulary. + Indices can be obtained using :class:`~transformers.BertTokenizer`. See + :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for + details. + `What are input IDs? <../glossary.html#input-ids>`__ + attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + `What are attention masks? <../glossary.html#attention-mask>`__ + token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, + 1]``: + - 0 corresponds to a `sentence A` token, + - 1 corresponds to a `sentence B` token. + `What are token type IDs? <../glossary.html#token-type-ids>`_ + position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, + config.max_position_embeddings - 1]``. + `What are position IDs? <../glossary.html#position-ids>`_ + head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): + Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): + Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert :obj:`input_ids` indices into associated + vectors than the model's internal embedding lookup matrix. + output_attentions (:obj:`bool`, `optional`): + Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned + tensors for more detail. + output_hidden_states (:obj:`bool`, `optional`): + Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for + more detail. + return_dict (:obj:`bool`, `optional`): + Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", + BERT_START_DOCSTRING, +) +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype + ), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multi_modal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + BERT_START_DOCSTRING, +) +class BertForPreTraining(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + next_sentence_label=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): + Used to hide legacy arguments that have been deprecated. + Returns: + Example:: + >>> from transformers import BertTokenizer, BertForPreTraining + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertForPreTraining.from_pretrained('bert-base-uncased') + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.prediction_logits + >>> seq_relationship_logits = outputs.seq_relationship_logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return BertForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """Bert Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_START_DOCSTRING +) +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=True, + reduction='mean', + mode='multi_modal', + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if soft_labels is not None: + loss_distill = -torch.sum(F.log_softmax(shifted_prediction_scores, dim=-1)*soft_labels,dim=-1) + loss_distill = (loss_distill * (labels!=-100)).sum(1) + lm_loss = (1-alpha)*lm_loss + alpha*loss_distill + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + +@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, BERT_START_DOCSTRING) +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multi_modal', + soft_labels=None, + alpha=0, + return_logits=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_embeds=encoder_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if soft_labels is not None: + loss_distill = -torch.sum(F.log_softmax(prediction_scores, dim=-1)*soft_labels,dim=-1) + loss_distill = loss_distill[labels!=-100].mean() + masked_lm_loss = (1-alpha)*masked_lm_loss + alpha*loss_distill + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + assert self.config.pad_token_id is not None, "The PAD token should be defined for generation" + attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) + dummy_token = torch.full( + (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """Bert Model with a `next sentence prediction (classification)` head on top. """, + BERT_START_DOCSTRING, +) +class BertForNextSentencePrediction(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + + self.init_weights() + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **kwargs + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see ``input_ids`` docstring). Indices should be in ``[0, 1]``: + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + Returns: + Example:: + >>> from transformers import BertTokenizer, BertForNextSentencePrediction + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + >>> model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." + >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt') + >>> outputs = model(**encoding, labels=torch.LongTensor([1])) + >>> logits = outputs.logits + >>> assert logits[0, 0] < logits[0, 1] # next sentence was random + """ + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + BERT_START_DOCSTRING, +) +class BertForSequenceClassification(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., + config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), + If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + BERT_START_DOCSTRING, +) +class BertForMultipleChoice(BertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + self.init_weights() + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., + num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See + :obj:`input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + BERT_START_DOCSTRING, +) +class BertForTokenClassification(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config, add_pooling_layer=False) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - + 1]``. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) + ) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BERT_START_DOCSTRING, +) +class BertForQuestionAnswering(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.bert = BertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="bert-base-uncased", + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the + sequence are not taken into account for computing the loss. + end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the + sequence are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e69de29