# Copyright 2021 Zilliz. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from pathlib import Path import torch from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from towhee import register from towhee.operator.base import NNOperator, OperatorFlag from towhee.types.arg import arg, to_image_color from towhee.types.image_utils import from_pil, to_pil @register(output_schema=['vec']) class Blip(NNOperator): """ BLIP multi-modal embedding operator """ def __init__(self, model_name: str): super().__init__() sys.path.append(str(Path(__file__).parent)) from models.blip import blip_decoder image_size = 384 model_url = self._configs()[model_name]['weights'] self.model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base') self._modality = modality self.device = "cuda" if torch.cuda.is_available() else "cpu" self.model.to(self.device) self.model.eval() self.tfms = transforms.Compose([ transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) ]) @arg(1, to_image_color('RGB')) def __call__(self, data:): vec = self._inference_from_image(data) return vec @arg(1, to_image_color('RGB')) def _inference_from_image(self, img): img = self._preprocess(img) caption = model.generate(img, sample=False, num_beams=3, max_length=20, min_length=5) return caption[0] def _preprocess(self, img): img = to_pil(img) processed_img = self.tfms(img).unsqueeze(0).to(self.device) return processed_img def _configs(self): config = {} config['blip_base'] = {} config['blip_base']['weights'] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' return config