From a1809f9ff8f0046d1606f33feebffd79cde43fa7 Mon Sep 17 00:00:00 2001 From: shiyu22 Date: Mon, 27 Mar 2023 15:48:47 +0800 Subject: [PATCH] Add image embedding Signed-off-by: shiyu22 Add image embedding Signed-off-by: shiyu22 Add image embedding Signed-off-by: shiyu22 Update README Signed-off-by: shiyu22 --- README.md | 67 +++++++++++++++++++++++++++++++++++++++++++++- image_embedding.py | 38 ++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 image_embedding.py diff --git a/README.md b/README.md index d64e882..f329ad8 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,67 @@ -# image-embedding +# Image Embedding + +## **Description** + +An image embedding pipeline generates a vector given an image. This Pipeline extracts features for image with 'ResNet50' models provided by [Timm](https://github.com/rwightman/pytorch-image-models). Timm is a deep-learning library developed by [Ross Wightman](https://twitter.com/wightmanr), who maintains SOTA deep-learning models and tools in computer vision. + + + +## Code Example + +- Create image embedding pipeline with the default configuration. + +```python +from towhee import AutoPipes + +p = AutoPipes.pipeline('image-embedding') +res = p('https://github.com/towhee-io/towhee/raw/main/towhee_logo.png') +res.get() +``` + +- Create image embedding pipeline and set the configuration. + +> More parameters refer to the API Interface. + +```python +from towhee import AutoPipes, AutoConfig + +conf = AutoConfig.load_config('image-embedding') +conf.model_name = 'resnet34' + +p = AutoPipes.pipeline('image-embedding', conf) +res = p('https://github.com/towhee-io/towhee/raw/main/towhee_logo.png') +res.get() +``` + + + +## **Interface** + +**ImageEmbeddingConfig** + +> You can find some parameters in [image_decode.cv2](https://towhee.io/image-decode/cv2) and [image_embedding.timm](https://towhee.io/image-embedding/timm) operators. + +***mode:*** str + +The mode for image, 'BGR' or 'RGB', defaults to 'BGR'. + +***model_name:*** *str* + +The model name in string. The default value is "resnet50". Refer to [Timm Docs](https://fastai.github.io/timmdocs/#List-Models-with-Pretrained-Weights) to get a full list of supported models. + +***num_classes:*** *int* + +The number of classes. The default value is 1000. It is related to model and dataset. + +***skip_preprocess:*** *bool* + +The flag to control whether to skip image pre-process. The default value is False. If set to True, it will skip image preprocessing steps (transforms). In this case, input image data must be prepared in advance in order to properly fit the model. + +***device***: int + +The number of GPU device, defaults to -1, which means using CPU. + + + + diff --git a/image_embedding.py b/image_embedding.py new file mode 100644 index 0000000..53f8061 --- /dev/null +++ b/image_embedding.py @@ -0,0 +1,38 @@ +from towhee import pipe, ops, AutoPipes, AutoConfig + + +@AutoConfig.register +class ImageEmbeddingConfig: + def __init__(self): + # config for audio_decode.ffmpeg + self.mode = 'BGR' + + # config for audio_embedding.vggish + self.model_name = 'resnet50' + self.num_classes: int = 1000 + self.skip_preprocess: bool = False + + # config for triton + self.device = -1 + + +@AutoPipes.register +def ImageEmbedding(config=None): + if not config: + config = ImageEmbeddingConfig() + + if config.device >= 0: + op_config = AutoConfig.TritonGPUConfig(device_ids=[config.device], max_batch_size=128) + else: + op_config = AutoConfig.TritonCPUConfig() + + + return ( + pipe.input('path') + .map('path', 'img', ops.image_decode(mode=config.mode)) + .map('img', 'embedding', ops.image_embedding.timm(model_name=config.model_name, + num_classes=config.num_classes, + skip_preprocess=config.skip_preprocess), + config=op_config) + .output('embedding') + ) \ No newline at end of file