forked from modelscope/FunASR
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdemo_vad_online.py
31 lines (27 loc) · 924 Bytes
/
demo_vad_online.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from funasr_onnx import Fsmn_vad_online
import soundfile
from pathlib import Path
model_dir = "damo/speech_fsmn_vad_zh-cn-16k-common-pytorch"
wav_path = "{}/.cache/modelscope/hub/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav".format(
Path.home()
)
model = Fsmn_vad_online(model_dir)
##online vad
speech, sample_rate = soundfile.read(wav_path)
speech_length = speech.shape[0]
#
sample_offset = 0
step = 1600
param_dict = {"in_cache": []}
for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
if sample_offset + step >= speech_length - 1:
step = speech_length - sample_offset
is_final = True
else:
is_final = False
param_dict["is_final"] = is_final
segments_result = model(
audio_in=speech[sample_offset : sample_offset + step], param_dict=param_dict
)
if segments_result:
print(segments_result)