clip-caption-reward
copied
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Readme
Files and versions
105 lines
3.4 KiB
105 lines
3.4 KiB
# Copyright 2021 Zilliz. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
import pathlib
|
|
|
|
from torch import nn
|
|
from timm.models.vision_transformer import resize_pos_embed
|
|
from towhee.types.image_utils import to_pil
|
|
|
|
|
|
class ClipCaptionReward(NNOperator):
|
|
"""
|
|
BLIP multi-modal embedding operator
|
|
"""
|
|
def __init__(self, model_name: str):
|
|
super().__init__()
|
|
sys.path.append(str(Path(__file__).parent))
|
|
from utils import opts
|
|
import clip
|
|
opt = opts.parse_opt(parse=False, cfg=cfg)
|
|
path = pathlib.Path(__file__).parent
|
|
dict_json = json.load(open("{}/data/cocotalk.json".format(path)))
|
|
ix_to_word = dict_json["ix_to_word"]
|
|
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
clip_model, clip_transform = clip.load("RN50", jit=False, device=self.device)
|
|
self.clip_model = clip_model
|
|
self.clip_transform = clip_transform
|
|
|
|
vocab_size = len(ix_to_word)
|
|
seq_length = 1
|
|
opt.vocab_size = vocab_size
|
|
opt.seq_length = seq_length
|
|
opt.batch_size = 1
|
|
opt.vocab = ix_to_word
|
|
|
|
num_patches = 196 # 600 * 1000 // 32 // 32
|
|
|
|
pos_embed = nn.Parameter(
|
|
torch.zeros(
|
|
1,
|
|
num_patches + 1,
|
|
clip_model.visual.attnpool.positional_embedding.shape[-1],
|
|
device=self.device,
|
|
),
|
|
)
|
|
pos_embed.weight = resize_pos_embed(
|
|
clip_model.visual.attnpool.positional_embedding.unsqueeze(0), pos_embed
|
|
)
|
|
self.clip_model.visual.attnpool.positional_embedding = pos_embed
|
|
|
|
self.model = TransformerModel(opt)
|
|
self.image_mean = (
|
|
torch.Tensor([0.48145466, 0.4578275, 0.40821073])
|
|
.to(self.device)
|
|
.reshape(3, 1, 1)
|
|
)
|
|
self.image_std = (
|
|
torch.Tensor([0.26862954, 0.26130258, 0.27577711])
|
|
.to(self.device)
|
|
.reshape(3, 1, 1)
|
|
)
|
|
|
|
@arg(1, to_image_color('RGB'))
|
|
def inference_single_data(self, data):
|
|
text = self._inference_from_image(data)
|
|
return text
|
|
|
|
@arg(1, to_image_color('RGB'))
|
|
def _inference_from_image(self, img):
|
|
img = to_pil(img)
|
|
img = self._preprocess(img)
|
|
self._inference_from_image(img)
|
|
img -= self.image_mean
|
|
img /= self.image_std
|
|
tmp_att, tmp_fc = self.clip_model.encode_image(img)
|
|
tmp_att = tmp_att[0].permute(1, 2, 0)
|
|
|
|
att_feat = tmp_att
|
|
|
|
return att_feat
|
|
|
|
def __call__(self, data):
|
|
if not isinstance(data, list):
|
|
data = [data]
|
|
else:
|
|
data = data
|
|
for single_data in data:
|
|
result = self.inference_single_data(single_data)
|
|
results.append(result)
|
|
if len(data) == 1:
|
|
return results[0]
|
|
else:
|
|
return results
|
|
|