From 1e4938675937571b8c83ca15598bf40c985ae7b5 Mon Sep 17 00:00:00 2001 From: ChengZi Date: Mon, 30 May 2022 14:21:33 +0800 Subject: [PATCH] set default weights is None in readme. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 38c0493..120fe66 100644 --- a/README.md +++ b/README.md @@ -29,11 +29,11 @@ import towhee towhee.dc(['./demo_video.mp4']) \ .video_decode.ffmpeg(sample_type='uniform_temporal_subsample', args={'num_samples': 12}) \ .runas_op(func=lambda x: [y for y in x]) \ - .clip4clip(model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1', device='cpu') \ + .clip4clip(model_name='clip_vit_b32', modality='video', device='cpu') \ .show() towhee.dc(['kids feeding and playing with the horse']) \ - .clip4clip(model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1', device='cpu') \ + .clip4clip(model_name='clip_vit_b32', modality='text', device='cpu') \ .show() ``` ![](vect_simplified_video.png) @@ -47,11 +47,11 @@ import towhee towhee.dc['path'](['./demo_video.mp4']) \ .video_decode.ffmpeg['path', 'frames'](sample_type='uniform_temporal_subsample', args={'num_samples': 12}) \ .runas_op['frames', 'frames'](func=lambda x: [y for y in x]) \ - .clip4clip['frames', 'vec'](model_name='clip_vit_b32', modality='video', weight_path='./pytorch_model.bin.1', device='cpu') \ + .clip4clip['frames', 'vec'](model_name='clip_vit_b32', modality='video', device='cpu') \ .show() towhee.dc['text'](["kids feeding and playing with the horse"]) \ - .clip4clip['text','vec'](model_name='clip_vit_b32', modality='text', weight_path='./pytorch_model.bin.1', device='cpu') \ + .clip4clip['text','vec'](model_name='clip_vit_b32', modality='text', device='cpu') \ .select['text', 'vec']() \ .show() ```