From b2126521459e8be6a082f3f5c4bc46121c68cd90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B8=B8=E9=9B=81?= Date: Wed, 31 Jan 2024 22:39:13 +0800 Subject: [PATCH] funasr1.0.5 audio samples input --- .../seaco_paraformer/demo.py | 20 ++++++++++++++++++- funasr/auto/auto_model.py | 2 +- funasr/models/transformer/model.py | 6 +++--- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/examples/industrial_data_pretraining/seaco_paraformer/demo.py b/examples/industrial_data_pretraining/seaco_paraformer/demo.py index 065b698a3..e9e226d1c 100644 --- a/examples/industrial_data_pretraining/seaco_paraformer/demo.py +++ b/examples/industrial_data_pretraining/seaco_paraformer/demo.py @@ -15,8 +15,26 @@ # spk_model_revision="v2.0.2", ) + +# example1 res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", hotword='达摩院 魔搭', # sentence_timestamp=True, # return sentence level information when spk_model is not given ) -print(res) \ No newline at end of file +print(res) + +# example2 +import torchaudio +wav_file = os.path.join(model.model_path, "example/asr_example.wav") +input_tensor, sample_rate = torchaudio.load(wav_file) +input_tensor = input_tensor.mean(0) +res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True) + + +# example3 +import soundfile +import os +wav_file = os.path.join(model.model_path, "example/asr_example.wav") +speech, sample_rate = soundfile.read(wav_file) +res = model.generate(input=[speech], batch_size_s=300, is_final=True) + diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py index 3986a110c..d99fc5613 100644 --- a/funasr/auto/auto_model.py +++ b/funasr/auto/auto_model.py @@ -228,7 +228,7 @@ def inference(self, input, input_len=None, model=None, kwargs=None, key=None, ** data_batch = data_list[beg_idx:end_idx] key_batch = key_list[beg_idx:end_idx] batch = {"data_in": data_batch, "key": key_batch} - if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank + if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank batch["data_in"] = data_batch[0] batch["data_lengths"] = input_len diff --git a/funasr/models/transformer/model.py b/funasr/models/transformer/model.py index 4ad466b4f..e813e2205 100644 --- a/funasr/models/transformer/model.py +++ b/funasr/models/transformer/model.py @@ -439,13 +439,13 @@ def inference(self, token = tokenizer.ids2tokens(token_int) text = tokenizer.tokens2text(token) - # text_postprocessed, _ = postprocess_utils.sentence_postprocess(token) - result_i = {"key": key[i], "token": token, "text": text} + text_postprocessed, _ = postprocess_utils.sentence_postprocess(token) + result_i = {"key": key[i], "token": token, "text": text_postprocessed} results.append(result_i) if ibest_writer is not None: ibest_writer["token"][key[i]] = " ".join(token) - ibest_writer["text"][key[i]] = text + ibest_writer["text"][key[i]] = text_postprocessed return results, meta_data