From 8acd17081770ae14fdde34208fc84b7985cbfcc0 Mon Sep 17 00:00:00 2001 From: wxywb Date: Fri, 17 Jun 2022 11:40:58 +0800 Subject: [PATCH] fix for gpu inference Signed-off-by: wxywb --- blip.py | 3 ++- requirements.txt | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/blip.py b/blip.py index e224790..ea6e062 100644 --- a/blip.py +++ b/blip.py @@ -39,6 +39,8 @@ class Blip(NNOperator): self._modality = modality self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.model.to(self.device) + self.model.eval() self.tfms = transforms.Compose([ transforms.Resize((image_size,image_size),interpolation=InterpolationMode.BICUBIC), @@ -47,7 +49,6 @@ class Blip(NNOperator): ]) def __call__(self, data): - print('call') if self._modality == 'image': vec = self._inference_from_image(data) elif self._modality == 'text': diff --git a/requirements.txt b/requirements.txt index b601a2f..9c8c547 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,4 @@ torchvision>=0.10.0 Pillow towhee timm -transformers +transformers>=4.15.0