|
@ -17,47 +17,13 @@ The [default model weight](clmr_checkpoint_10000.pt) provided is pretrained on [ |
|
|
|
|
|
|
|
|
Generate embeddings for the audio "test.wav". |
|
|
Generate embeddings for the audio "test.wav". |
|
|
|
|
|
|
|
|
*Write the pipeline in simplified style*: |
|
|
|
|
|
|
|
|
*Write a pipeline with explicit inputs/outputs name specifications:* |
|
|
|
|
|
|
|
|
```python |
|
|
```python |
|
|
import towhee |
|
|
|
|
|
|
|
|
|
|
|
( |
|
|
|
|
|
towhee.glob('test.wav') |
|
|
|
|
|
.audio_decode.ffmpeg() |
|
|
|
|
|
.runas_op(func=lambda x:[y[0] for y in x]) |
|
|
|
|
|
.audio_embedding.clmr() |
|
|
|
|
|
.show() |
|
|
|
|
|
) |
|
|
|
|
|
``` |
|
|
|
|
|
| [-2.1045141, 0.55381, 0.4537212, ...] shape=(6, 512) | |
|
|
|
|
|
|
|
|
|
|
|
*Write a same pipeline with explicit inputs/outputs name specifications:* |
|
|
|
|
|
|
|
|
|
|
|
```python |
|
|
|
|
|
import towhee |
|
|
|
|
|
|
|
|
|
|
|
( |
|
|
|
|
|
towhee.glob['path']('test.wav') |
|
|
|
|
|
.audio_decode.ffmpeg['path', 'frames']() |
|
|
|
|
|
.runas_op['frames', 'frames'](func=lambda x:[y[0] for y in x]) |
|
|
|
|
|
.audio_embedding.clmr['frames', 'vecs']() |
|
|
|
|
|
.select['path', 'vecs']() |
|
|
|
|
|
.show() |
|
|
|
|
|
) |
|
|
|
|
|
``` |
|
|
``` |
|
|
[array([[-2.1045141 , 0.55381 , 0.4537212 , ..., 0.18805158, |
|
|
|
|
|
0.3079657 , -1.216063 ], |
|
|
|
|
|
[-2.1045141 , 0.55381036, 0.45372102, ..., 0.18805173, |
|
|
|
|
|
0.3079657 , -1.216063 ], |
|
|
|
|
|
[-2.0874703 , 0.5511826 , 0.46051833, ..., 0.18650496, |
|
|
|
|
|
0.33218473, -1.2182183 ], |
|
|
|
|
|
[-2.0874703 , 0.55118287, 0.4605182 , ..., 0.18650502, |
|
|
|
|
|
0.3321851 , -1.2182183 ], |
|
|
|
|
|
[-2.0771544 , 0.5641223 , 0.43814823, ..., 0.18220925, |
|
|
|
|
|
0.33022994, -1.2070589 ], |
|
|
|
|
|
[-2.0771549 , 0.5641221 , 0.43814805, ..., 0.1822092 , |
|
|
|
|
|
0.33022994, -1.2070588 ]], dtype=float32)] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
<img src="./result.png" width="800px"/> |
|
|
|
|
|
|
|
|
<br /> |
|
|
<br /> |
|
|
|
|
|
|
|
|