|
|
@ -29,11 +29,11 @@ import towhee |
|
|
|
towhee.dc(['./demo_video.mp4']) \ |
|
|
|
.video_decode.ffmpeg(sample_type='uniform_temporal_subsample', args={'num_samples': 12}) \ |
|
|
|
.runas_op(func=lambda x: [y for y in x]) \ |
|
|
|
.clip4clip(model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1') \ |
|
|
|
.clip4clip(model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1', device='cpu') \ |
|
|
|
.show() |
|
|
|
|
|
|
|
towhee.dc(['kids feeding and playing with the horse']) \ |
|
|
|
.clip4clip(model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1') \ |
|
|
|
.clip4clip(model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1', device='cpu') \ |
|
|
|
.show() |
|
|
|
``` |
|
|
|
![](vect_simplified_video.png) |
|
|
@ -47,11 +47,11 @@ import towhee |
|
|
|
towhee.dc['path'](['./demo_video.mp4']) \ |
|
|
|
.video_decode.ffmpeg['path', 'frames'](sample_type='uniform_temporal_subsample', args={'num_samples': 12}) \ |
|
|
|
.runas_op['frames', 'frames'](func=lambda x: [y for y in x]) \ |
|
|
|
.clip4clip['frames', 'vec'](model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1') \ |
|
|
|
.clip4clip['frames', 'vec'](model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1', device='cpu') \ |
|
|
|
.show() |
|
|
|
|
|
|
|
towhee.dc['text'](["kids feeding and playing with the horse"]) \ |
|
|
|
.clip4clip['text','vec'](model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1') \ |
|
|
|
.clip4clip['text','vec'](model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1', device='cpu') \ |
|
|
|
.select['text', 'vec']() \ |
|
|
|
.show() |
|
|
|
``` |
|
|
|