first commit
This commit is contained in:
192
examples/test_model_12hz_base.py
Normal file
192
examples/test_model_12hz_base.py
Normal file
@@ -0,0 +1,192 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2026 The Alibaba Qwen team.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import time
|
||||
import torch
|
||||
import soundfile as sf
|
||||
|
||||
from qwen_tts import Qwen3TTSModel
|
||||
|
||||
|
||||
def ensure_dir(d: str):
|
||||
os.makedirs(d, exist_ok=True)
|
||||
|
||||
|
||||
def run_case(tts: Qwen3TTSModel, out_dir: str, case_name: str, call_fn):
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.time()
|
||||
|
||||
wavs, sr = call_fn()
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t1 = time.time()
|
||||
print(f"[{case_name}] time: {t1 - t0:.3f}s, n_wavs={len(wavs)}, sr={sr}")
|
||||
|
||||
for i, w in enumerate(wavs):
|
||||
sf.write(os.path.join(out_dir, f"{case_name}_{i}.wav"), w, sr)
|
||||
|
||||
|
||||
def main():
|
||||
device = "cuda:0"
|
||||
MODEL_PATH = "Qwen/Qwen3-TTS-12Hz-1.7B-Base/"
|
||||
OUT_DIR = "qwen3_tts_test_voice_clone_output_wav"
|
||||
ensure_dir(OUT_DIR)
|
||||
|
||||
tts = Qwen3TTSModel.from_pretrained(
|
||||
MODEL_PATH,
|
||||
device_map=device,
|
||||
dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
|
||||
# Reference audio(s)
|
||||
ref_audio_path_1 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/clone_2.wav"
|
||||
ref_audio_path_2 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/clone_1.wav"
|
||||
|
||||
ref_audio_single = ref_audio_path_1
|
||||
ref_audio_batch = [ref_audio_path_1, ref_audio_path_2]
|
||||
|
||||
ref_text_single = "Okay. Yeah. I resent you. I love you. I respect you. But you know what? You blew it! And thanks to you."
|
||||
ref_text_batch = [
|
||||
"Okay. Yeah. I resent you. I love you. I respect you. But you know what? You blew it! And thanks to you.",
|
||||
"甚至出现交易几乎停滞的情况。",
|
||||
]
|
||||
|
||||
# Synthesis targets
|
||||
syn_text_single = "Good one. Okay, fine, I'm just gonna leave this sock monkey here. Goodbye."
|
||||
syn_lang_single = "Auto"
|
||||
|
||||
syn_text_batch = [
|
||||
"Good one. Okay, fine, I'm just gonna leave this sock monkey here. Goodbye.",
|
||||
"其实我真的有发现,我是一个特别善于观察别人情绪的人。",
|
||||
]
|
||||
syn_lang_batch = ["Chinese", "English"]
|
||||
|
||||
common_gen_kwargs = dict(
|
||||
max_new_tokens=2048,
|
||||
do_sample=True,
|
||||
top_k=50,
|
||||
top_p=1.0,
|
||||
temperature=0.9,
|
||||
repetition_penalty=1.05,
|
||||
subtalker_dosample=True,
|
||||
subtalker_top_k=50,
|
||||
subtalker_top_p=1.0,
|
||||
subtalker_temperature=0.9,
|
||||
)
|
||||
|
||||
for xvec_only in [False, True]:
|
||||
mode_tag = "xvec_only" if xvec_only else "icl"
|
||||
|
||||
# Case 1: prompt single + synth single, direct
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case1_promptSingle_synSingle_direct_{mode_tag}",
|
||||
lambda: tts.generate_voice_clone(
|
||||
text=syn_text_single,
|
||||
language=syn_lang_single,
|
||||
ref_audio=ref_audio_single,
|
||||
ref_text=ref_text_single,
|
||||
x_vector_only_mode=xvec_only,
|
||||
**common_gen_kwargs,
|
||||
),
|
||||
)
|
||||
|
||||
# Case 1b: prompt single + synth single, via create_voice_clone_prompt
|
||||
def _case1b():
|
||||
prompt_items = tts.create_voice_clone_prompt(
|
||||
ref_audio=ref_audio_single,
|
||||
ref_text=ref_text_single,
|
||||
x_vector_only_mode=xvec_only,
|
||||
)
|
||||
return tts.generate_voice_clone(
|
||||
text=syn_text_single,
|
||||
language=syn_lang_single,
|
||||
voice_clone_prompt=prompt_items,
|
||||
**common_gen_kwargs,
|
||||
)
|
||||
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case1_promptSingle_synSingle_promptThenGen_{mode_tag}",
|
||||
_case1b,
|
||||
)
|
||||
|
||||
# Case 2: prompt single + synth batch, direct
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case2_promptSingle_synBatch_direct_{mode_tag}",
|
||||
lambda: tts.generate_voice_clone(
|
||||
text=syn_text_batch,
|
||||
language=syn_lang_batch,
|
||||
ref_audio=ref_audio_single,
|
||||
ref_text=ref_text_single,
|
||||
x_vector_only_mode=xvec_only,
|
||||
**common_gen_kwargs,
|
||||
),
|
||||
)
|
||||
|
||||
# Case 2b: prompt single + synth batch, via create_voice_clone_prompt
|
||||
def _case2b():
|
||||
prompt_items = tts.create_voice_clone_prompt(
|
||||
ref_audio=ref_audio_single,
|
||||
ref_text=ref_text_single,
|
||||
x_vector_only_mode=xvec_only,
|
||||
)
|
||||
return tts.generate_voice_clone(
|
||||
text=syn_text_batch,
|
||||
language=syn_lang_batch,
|
||||
voice_clone_prompt=prompt_items,
|
||||
**common_gen_kwargs,
|
||||
)
|
||||
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case2_promptSingle_synBatch_promptThenGen_{mode_tag}",
|
||||
_case2b,
|
||||
)
|
||||
|
||||
# Case 3: prompt batch + synth batch, direct
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case3_promptBatch_synBatch_direct_{mode_tag}",
|
||||
lambda: tts.generate_voice_clone(
|
||||
text=syn_text_batch,
|
||||
language=syn_lang_batch,
|
||||
ref_audio=ref_audio_batch,
|
||||
ref_text=ref_text_batch,
|
||||
x_vector_only_mode=[xvec_only, xvec_only],
|
||||
**common_gen_kwargs,
|
||||
),
|
||||
)
|
||||
|
||||
# Case 3b: prompt batch + synth batch, via create_voice_clone_prompt
|
||||
def _case3b():
|
||||
prompt_items = tts.create_voice_clone_prompt(
|
||||
ref_audio=ref_audio_batch,
|
||||
ref_text=ref_text_batch,
|
||||
x_vector_only_mode=[xvec_only, xvec_only],
|
||||
)
|
||||
return tts.generate_voice_clone(
|
||||
text=syn_text_batch,
|
||||
language=syn_lang_batch,
|
||||
voice_clone_prompt=prompt_items,
|
||||
**common_gen_kwargs,
|
||||
)
|
||||
|
||||
run_case(
|
||||
tts, OUT_DIR, f"case3_promptBatch_synBatch_promptThenGen_{mode_tag}",
|
||||
_case3b,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
77
examples/test_model_12hz_custom_voice.py
Normal file
77
examples/test_model_12hz_custom_voice.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2026 The Alibaba Qwen team.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
import torch
|
||||
import soundfile as sf
|
||||
|
||||
from qwen_tts import Qwen3TTSModel
|
||||
|
||||
|
||||
def main():
|
||||
device = "cuda:0"
|
||||
MODEL_PATH = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice/"
|
||||
|
||||
tts = Qwen3TTSModel.from_pretrained(
|
||||
MODEL_PATH,
|
||||
device_map=device,
|
||||
dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
|
||||
# -------- Single (with instruct) --------
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.time()
|
||||
|
||||
wavs, sr = tts.generate_custom_voice(
|
||||
text="其实我真的有发现,我是一个特别善于观察别人情绪的人。",
|
||||
language="Chinese",
|
||||
speaker="Vivian",
|
||||
instruct="用特别愤怒的语气说",
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t1 = time.time()
|
||||
print(f"[CustomVoice Single] time: {t1 - t0:.3f}s")
|
||||
|
||||
sf.write("qwen3_tts_test_custom_single.wav", wavs[0], sr)
|
||||
|
||||
# -------- Batch (some empty instruct) --------
|
||||
texts = ["其实我真的有发现,我是一个特别善于观察别人情绪的人。", "She said she would be here by noon."]
|
||||
languages = ["Chinese", "English"]
|
||||
speakers = ["Vivian", "Ryan"]
|
||||
instructs = ["", "Very happy."]
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.time()
|
||||
|
||||
wavs, sr = tts.generate_custom_voice(
|
||||
text=texts,
|
||||
language=languages,
|
||||
speaker=speakers,
|
||||
instruct=instructs,
|
||||
max_new_tokens=2048,
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t1 = time.time()
|
||||
print(f"[CustomVoice Batch] time: {t1 - t0:.3f}s")
|
||||
|
||||
for i, w in enumerate(wavs):
|
||||
sf.write(f"qwen3_tts_test_custom_batch_{i}.wav", w, sr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
80
examples/test_model_12hz_voice_design.py
Normal file
80
examples/test_model_12hz_voice_design.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2026 The Alibaba Qwen team.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import time
|
||||
import torch
|
||||
import soundfile as sf
|
||||
|
||||
from qwen_tts import Qwen3TTSModel
|
||||
|
||||
|
||||
def main():
|
||||
device = "cuda:0"
|
||||
MODEL_PATH = "Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign/"
|
||||
|
||||
tts = Qwen3TTSModel.from_pretrained(
|
||||
MODEL_PATH,
|
||||
device_map=device,
|
||||
dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
|
||||
# -------- Single --------
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.time()
|
||||
|
||||
wavs, sr = tts.generate_voice_design(
|
||||
text="哥哥,你回来啦,人家等了你好久好久了,要抱抱!",
|
||||
language="Chinese",
|
||||
instruct="体现撒娇稚嫩的萝莉女声,音调偏高且起伏明显,营造出黏人、做作又刻意卖萌的听觉效果。",
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t1 = time.time()
|
||||
print(f"[VoiceDesign Single] time: {t1 - t0:.3f}s")
|
||||
|
||||
sf.write("qwen3_tts_test_voice_design_single.wav", wavs[0], sr)
|
||||
|
||||
# -------- Batch --------
|
||||
texts = [
|
||||
"哥哥,你回来啦,人家等了你好久好久了,要抱抱!",
|
||||
"It's in the top drawer... wait, it's empty? No way, that's impossible! I'm sure I put it there!"
|
||||
]
|
||||
languages = ["Chinese", "English"]
|
||||
instructs = [
|
||||
"体现撒娇稚嫩的萝莉女声,音调偏高且起伏明显,营造出黏人、做作又刻意卖萌的听觉效果。",
|
||||
"Speak in an incredulous tone, but with a hint of panic beginning to creep into your voice."
|
||||
]
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t0 = time.time()
|
||||
|
||||
wavs, sr = tts.generate_voice_design(
|
||||
text=texts,
|
||||
language=languages,
|
||||
instruct=instructs,
|
||||
max_new_tokens=2048,
|
||||
)
|
||||
|
||||
torch.cuda.synchronize()
|
||||
t1 = time.time()
|
||||
print(f"[VoiceDesign Batch] time: {t1 - t0:.3f}s")
|
||||
|
||||
for i, w in enumerate(wavs):
|
||||
sf.write(f"qwen3_tts_test_voice_design_batch_{i}.wav", w, sr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
65
examples/test_tokenizer_12hz.py
Normal file
65
examples/test_tokenizer_12hz.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2026 The Alibaba Qwen team.
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import io
|
||||
import requests
|
||||
import soundfile as sf
|
||||
|
||||
from qwen_tts import Qwen3TTSTokenizer
|
||||
|
||||
audio_1 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/tokenizer_demo_1.wav"
|
||||
audio_2 = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/tokenizer_demo_2.wav"
|
||||
|
||||
# -------- Single input: wav path --------
|
||||
tokenizer_12hz = Qwen3TTSTokenizer.from_pretrained(
|
||||
"Qwen/Qwen3-TTS-Tokenizer-12Hz",
|
||||
device_map="cuda:0",
|
||||
)
|
||||
|
||||
enc1 = tokenizer_12hz.encode(audio_1)
|
||||
wavs1, out_sr1 = tokenizer_12hz.decode(enc1)
|
||||
sf.write("decoded_single_12hz.wav", wavs1[0], out_sr1)
|
||||
|
||||
# -------- Batch input: wav path list --------
|
||||
enc2 = tokenizer_12hz.encode([audio_1, audio_2])
|
||||
wavs2, out_sr2 = tokenizer_12hz.decode(enc2)
|
||||
for i, w in enumerate(wavs2):
|
||||
sf.write(f"decoded_batch_12hz_{i}.wav", w, out_sr2)
|
||||
|
||||
# -------- Decode input as dict (12hz) --------
|
||||
# Take the first sample codes and pass as a dict.
|
||||
dict_input_12hz = {"audio_codes": enc2.audio_codes[0]} # torch.Tensor
|
||||
wavs_d1, out_sr_d1 = tokenizer_12hz.decode(dict_input_12hz)
|
||||
sf.write("decoded_dict_12hz.wav", wavs_d1[0], out_sr_d1)
|
||||
|
||||
# -------- Decode input as list[dict] (12hz) --------
|
||||
list_dict_input_12hz = [{"audio_codes": c} for c in enc2.audio_codes] # list of torch.Tensor
|
||||
wavs_d2, out_sr_d2 = tokenizer_12hz.decode(list_dict_input_12hz)
|
||||
for i, w in enumerate(wavs_d2):
|
||||
sf.write(f"decoded_listdict_12hz_{i}.wav", w, out_sr_d2)
|
||||
|
||||
# -------- Decode input as list[dict] with numpy (12hz) --------
|
||||
# Convert codes to numpy to simulate "serialized" payload.
|
||||
list_dict_numpy_12hz = [{"audio_codes": c.cpu().numpy()} for c in enc2.audio_codes]
|
||||
wavs_d3, out_sr_d3 = tokenizer_12hz.decode(list_dict_numpy_12hz)
|
||||
for i, w in enumerate(wavs_d3):
|
||||
sf.write(f"decoded_listdict_numpy_12hz_{i}.wav", w, out_sr_d3)
|
||||
|
||||
# -------- Numpy input (must pass sr) --------
|
||||
data = requests.get(audio_2, timeout=30).content
|
||||
y, sr = sf.read(io.BytesIO(data))
|
||||
enc3 = tokenizer_12hz.encode(y, sr=sr)
|
||||
wavs3, out_sr3 = tokenizer_12hz.decode(enc3)
|
||||
sf.write("decoded_numpy_12hz.wav", wavs3[0], out_sr3)
|
||||
Reference in New Issue
Block a user