diff --git a/README.md b/README.md index e6bf2e5..15d3d25 100644 --- a/README.md +++ b/README.md @@ -15,8 +15,8 @@ and maps vectors with labels provided by datasets used for pre-training. ## Code Example -Use the pretrained Movinet model to classify and generate a vector for the given video path './archery.mp4' -([download](https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4)). +Use the pretrained Movinet model to classify and generate a vector for the given video path './jumpingjack.gif' +([download](https://github.com/tensorflow/models/raw/f8af2291cced43fc9f1d9b41ddbf772ae7b0d7d2/official/projects/movinet/files/jumpingjack.gif)). *Write the pipeline in simplified style*: @@ -25,7 +25,7 @@ Use the pretrained Movinet model to classify and generate a vector for the given import towhee ( - towhee.glob('./archery.mp4') + towhee.glob('./jumpingjack.gif') .video_decode.ffmpeg() .action_classification.movinet( model_name='movineta0', topk=5) @@ -40,7 +40,7 @@ import towhee import towhee ( - towhee.glob['path']('./archery.mp4') + towhee.glob['path']('./jumpingjack.gif') .video_decode.ffmpeg['path', 'frames']() .action_classification.movinet['frames', ('labels', 'scores', 'features')]( model_name='movineta0') @@ -57,14 +57,14 @@ import towhee Create the operator via the following factory method -***video_classification.omnivore( +***video_classification.movinet( model_name='movineta0', skip_preprocess=False, classmap=None, topk=5)*** **Parameters:** ​ ***model_name***: *str* -​ The name of pre-trained movinet model. +​ The name of pre-trained MoViNet model. ​ Supported model names: - movineta0 diff --git a/movinet.py b/movinet.py index efaf6e5..3dbeb4e 100644 --- a/movinet.py +++ b/movinet.py @@ -71,7 +71,7 @@ class Movinet(NNOperator): self.transform_cfgs = get_configs( side_size=172, crop_size=172, - num_frames=30, + num_frames=13, mean=self.input_mean, std=self.input_std, ) @@ -104,6 +104,7 @@ class Movinet(NNOperator): ) inputs = data.to(self.device)[None, ...] + self.model.clean_activation_buffers() feats = self.model.forward_features(inputs) features = feats.to('cpu').squeeze(0).detach().numpy() diff --git a/result1.png b/result1.png new file mode 100644 index 0000000..8730ba5 Binary files /dev/null and b/result1.png differ diff --git a/result2.png b/result2.png new file mode 100644 index 0000000..6dfadb7 Binary files /dev/null and b/result2.png differ