logo
Browse Source

Add SWAG

Signed-off-by: Jael Gu <mengjia.gu@zilliz.com>
main
Jael Gu 2 years ago
parent
commit
75e875544d
  1. 98
      README.md
  2. 19
      __init__.py
  3. 5
      requirements.txt
  4. BIN
      result1.png
  5. BIN
      result2.png
  6. 181
      swag.py

98
README.md

@ -1,2 +1,98 @@
# swag
# Image Embedding with SWAG
*author: [Jael Gu](https://github.com/jaelgu*
<br />
## Description
An image embedding operator generates a vector given an image.
This operator extracts features for image with pretrained [SWAG](https://github.com/facebookresearch/SWAG) models from [Torch Hub](https://pytorch.org/docs/stable/hub.html).
SWAG implements models from the paper [Revisiting Weakly Supervised Pre-Training of Visual Perception Models](https://arxiv.org/abs/2201.08371).
To achieve higher accuracy in image classification, SWAG uses hashtags to perform weakly supervised learning instead of fully supervised pretraining with image class labels.
<br />
## Code Example
Load an image from path './towhee.jpg'
and use the pretrained SWAG model 'vit_b16_in1k' to generate an image embedding.
*Write the pipeline in simplified style:*
```python
import towhee
(
towhee.glob('./towhee.jpg')
.image_decode()
.image_embedding.swag(model_name='vit_b16_in1k')
.show()
)
```
<img src="./result1.png" width="800px"/>
*Write a same pipeline with explicit inputs/outputs name specifications:*
```python
import towhee
(
towhee.glob['path']('./towhee.jpg')
.image_decode['path', 'img']()
.image_embedding.swag['img', 'vec'](model_name='vit_b16_in1k')
.select['img', 'vec']()
.show()
)
```
<img src="./result2.png" width="800px"/>
<br />
## Factory Constructor
Create the operator via the following factory method
***image_embedding.swag(model_name='vit_b16_in1k', skip_preprocess=False)***
**Parameters:**
***model_name:*** *str*
The model name in string. The default value is "vit_b16_in1k".
Supported model names:
- vit_b16_in1k
- vit_l16_in1k
- vit_h14_in1k
- regnety_16gf_in1k
- regnety_32gf_in1k
- regnety_128gf_in1k
***skip_preprocess:*** *bool*
The flag to control whether to skip image preprocess.
The default value is False.
If set to True, it will skip image preprocessing steps (transforms).
In this case, input image data must be prepared in advance in order to properly fit the model.
<br />
## Interface
An image embedding operator takes a towhee image as input.
It uses the pre-trained model specified by model name to generate an image embedding in ndarray.
**Parameters:**
***img:*** *towhee.types.Image (a sub-class of numpy.ndarray)*
The decoded image data in numpy.ndarray.
**Returns:** *numpy.ndarray*
The image embedding extracted by model.

19
__init__.py

@ -0,0 +1,19 @@
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .swag import Swag
def swag(**kwargs):
return Swag(**kwargs)

5
requirements.txt

@ -0,0 +1,5 @@
numpy
pillow
towhee>=0.6.1
torch>=1.8.0
torchvision>=0.9.0

BIN
result1.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

BIN
result2.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

181
swag.py

@ -0,0 +1,181 @@
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy
import os
from pathlib import Path
import towhee
from towhee.operator.base import NNOperator, OperatorFlag
from towhee.types.arg import arg, to_image_color
from towhee import register
import torch
from torch import nn
from torchvision import transforms
from PIL import Image as PILImage
import warnings
warnings.filterwarnings('ignore')
log = logging.getLogger()
@register(output_schema=['vec'])
class Swag(NNOperator):
"""
Pytorch image embedding operator that uses the Pytorch Image Model (timm) collection.
Args:
model_name (`str`):
Which model to use for the embeddings.
skip_preprocess (`bool = False`):
Whether skip image transforms.
"""
def __init__(self, model_name: str, skip_preprocess: bool = False) -> None:
super().__init__()
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.skip_tfms = skip_preprocess
self.tfms = self.get_transforms(model_name)
self.model_name = model_name
self.model = torch.hub.load("facebookresearch/swag", model=model_name)
self.model.to(self.device)
self.model.eval()
self.extract_features = FeatureExtractor(self.model)
@arg(1, to_image_color('RGB'))
def __call__(self, img: towhee._types.Image) -> numpy.ndarray:
img = PILImage.fromarray(img.astype('uint8'), 'RGB')
if not self.skip_tfms:
img = self.tfms(img).unsqueeze(0)
img = img.to(self.device)
features, _ = self.extract_features(img)
if features.dim() == 4:
global_pool = nn.AdaptiveAvgPool2d(1)
features = global_pool(features)
features = features.to('cpu')
vec = features.flatten().detach().numpy()
return vec
def save_model(self, format: str = 'pytorch', path: str = 'default'):
if path == 'default':
path = str(Path(__file__).parent)
name = self.model_name.replace('/', '-')
path = os.path.join(path, name)
inputs = torch.ones(1, 3, 224, 224)
if format == 'pytorch':
torch.save(self.model, path)
elif format == 'torchscript':
path = path + '.pt'
try:
try:
jit_model = torch.jit.script(self.model)
except Exception:
jit_model = torch.jit.trace(self.model, inputs, strict=False)
torch.jit.save(jit_model, path)
except Exception as e:
log.error(f'Fail to save as torchscript: {e}.')
raise RuntimeError(f'Fail to save as torchscript: {e}.')
elif format == 'onxx':
pass # todo
else:
log.error(f'Save model: unsupported format "{format}".')
@staticmethod
def supported_model_names(format: str = None):
full_list = [
'vit_h14_in1k',
'vit_l16_in1k',
'vit_b16_in1k',
'regnety_16gf_in1k',
'regnety_32gf_in1k',
'regnety_128gf_in1k',
]
full_list.sort()
if format is None:
model_list = full_list
elif format == 'pytorch':
to_remove = []
assert set(to_remove).issubset(set(full_list))
model_list = list(set(full_list) - set(to_remove))
else: # todo: format in {'torchscript', 'onnx', 'tensorrt'}
log.error(f'Invalid format "{format}". Currently supported formats: "pytorch".')
return model_list
@staticmethod
def get_transforms(model_name):
model_resolution = {
'vit_h14_in1k': 518,
'vit_l16_in1k': 512,
'vit_b16_in1k': 384,
'regnety_16gf_in1k': 384,
'regnety_32gf_in1k': 384,
'regnety_128gf_in1k': 384
}
if model_name not in model_resolution.keys():
log.warning('No transforms specified for model "%s", using resolution 384.', model_name)
resolution = 384
else:
resolution = model_resolution[model_name]
transform = transforms.Compose([
transforms.Resize(
resolution,
interpolation=transforms.InterpolationMode.BICUBIC,
),
transforms.CenterCrop(resolution),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
])
return transform
class FeatureExtractor(nn.Module):
def __init__(self, model: nn.Module):
super().__init__()
self.model = model
self.features = None
for name, child in self.model.named_children():
if name == 'trunk_output':
self.handler = child.register_forward_hook(self.save_outputs_hook())
def save_outputs_hook(self):
def fn(_, __, output):
self.features = output
return fn
def forward(self, x):
outs = self.model(x)
self.handler.remove()
return self.features, outs
if __name__ == '__main__':
from towhee import ops
path = '/Users/mengjiagu/Desktop/models/data/image/animals10/bird.jpg'
decoder = ops.image_decode.cv2()
img = decoder(path)
op = Swag('vit_b16_in1k')
# op = Swag('regnety_16gf_in1k')
out = op(img)
print(out.shape)
Loading…
Cancel
Save