Browse Source
update the readme.
Signed-off-by: wxywb <xy.wang@zilliz.com>
main
wxywb
2 years ago
2 changed files with
10 additions and
18 deletions
-
README.md
-
clip_caption_reward.py
|
@ -17,28 +17,19 @@ This operator generates the caption with [CLIPReward](https://arxiv.org/abs/2205 |
|
|
|
|
|
|
|
|
Load an image from path './animals.jpg' to generate the caption. |
|
|
Load an image from path './animals.jpg' to generate the caption. |
|
|
|
|
|
|
|
|
*Write the pipeline in simplified style*: |
|
|
|
|
|
|
|
|
*Write a pipeline with explicit inputs/outputs name specifications:* |
|
|
|
|
|
|
|
|
```python |
|
|
```python |
|
|
import towhee |
|
|
|
|
|
|
|
|
from towhee.dc2 import pipe, ops, DataCollection |
|
|
|
|
|
|
|
|
towhee.glob('./animals.jpg') \ |
|
|
|
|
|
.image_decode() \ |
|
|
|
|
|
.image_captioning.clip_caption_reward(model_name='clipRN50_clips_grammar') \ |
|
|
|
|
|
.show() |
|
|
|
|
|
``` |
|
|
|
|
|
<img src="./cap.png" alt="result1" style="height:20px;"/> |
|
|
|
|
|
|
|
|
|
|
|
*Write a same pipeline with explicit inputs/outputs name specifications:* |
|
|
|
|
|
|
|
|
|
|
|
```python |
|
|
|
|
|
import towhee |
|
|
|
|
|
|
|
|
p = ( |
|
|
|
|
|
pipe.input('url') |
|
|
|
|
|
.map('url', 'img', ops.image_decode.cv2_rgb()) |
|
|
|
|
|
.map('img', 'text', ops.image_captioning.clip_caption_reward(model_name='clipRN50_clips_grammar')) |
|
|
|
|
|
.output('img', 'text') |
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
towhee.glob['path']('./animals.jpg') \ |
|
|
|
|
|
.image_decode['path', 'img']() \ |
|
|
|
|
|
.image_captioning.clip_caption_reward['img', 'text'](model_name='clipRN50_clips_grammar') \ |
|
|
|
|
|
.select['img', 'text']() \ |
|
|
|
|
|
.show() |
|
|
|
|
|
|
|
|
DataCollection(p('./animals.jpg')).show() |
|
|
``` |
|
|
``` |
|
|
<img src="./tabular.png" alt="result2" style="height:60px;"/> |
|
|
<img src="./tabular.png" alt="result2" style="height:60px;"/> |
|
|
|
|
|
|
|
|
|
@ -80,6 +80,7 @@ class ClipCaptionReward(NNOperator): |
|
|
|
|
|
|
|
|
self.model = TransformerModel(opt) |
|
|
self.model = TransformerModel(opt) |
|
|
self.model.load_state_dict(raw_state_dict) |
|
|
self.model.load_state_dict(raw_state_dict) |
|
|
|
|
|
self.model.to(self.device) |
|
|
|
|
|
|
|
|
self.image_mean = ( |
|
|
self.image_mean = ( |
|
|
torch.Tensor([0.48145466, 0.4578275, 0.40821073]) |
|
|
torch.Tensor([0.48145466, 0.4578275, 0.40821073]) |
|
|