logo
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Readme
Files and versions

41 lines
2.0 KiB

from bertviz.transformers_neuron_view import BertModel, BertConfig
from transformers import BertTokenizer
from typing import NamedTuple
import numpy
import torch
from towhee.operator import Operator
class TorchBert(Operator):
"""
Text to embedding using BERT
"""
def __init__(self, max_length: int = 256, framework: str = 'pytorch') -> None:
super().__init__()
config = BertConfig.from_pretrained("bert-base-cased", output_attentions=True, output_hidden_states=True,
return_dict=True)
self.tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
config.max_position_embeddings = max_length
self.max_length = max_length
model = BertModel(config)
self.model = model.eval()
def __call__(self, text: str) -> NamedTuple('Outputs', [('embs', numpy.ndarray)]):
inputs = self.tokenizer(text, truncation=True, padding=True, max_length=self.max_length,
return_tensors='pt')
f1 = torch.index_select(self.model.embeddings.word_embeddings.weight, 0,
inputs['input_ids'][0]) # words embeddings
+ torch.index_select(self.model.embeddings.position_embeddings.weight, 0,
torch.tensor(range(inputs['input_ids'][0].size(0))).long()) # pos embeddings
+ torch.index_select(self.model.embeddings.token_type_embeddings.weight, 0,
inputs['token_type_ids'][0]) # token embeddings
# single example normalization
ex1 = f1[0, :]
ex1_mean = ex1.mean()
ex1_std = (ex1 - ex1_mean).pow(2).mean()
norm_embedding = ((ex1 - ex1_mean) / torch.sqrt(ex1_std + 1e-12))
norm_embedding_centered = self.model.embeddings.LayerNorm.weight * norm_embedding \
+ self.model.embeddings.LayerNorm.bias
Outputs = NamedTuple('Outputs', [('embs', numpy.ndarray)])
return Outputs(norm_embedding_centered.detach().numpy())