Back to Chattts

Colab

examples/ipynb/colab.ipynb

0.2.55.2 KB
Original Source

Clone Repo

python
!cd /content
!rm -rf sample_data ChatTTS
!git clone https://github.com/2noise/ChatTTS.git

Install Requirements

python
!pip install -r /content/ChatTTS/requirements.txt
!ldconfig /usr/lib64-nvidia

Import Packages

python
import torch

torch._dynamo.config.cache_size_limit = 64
torch._dynamo.config.suppress_errors = True
torch.set_float32_matmul_precision("high")

from ChatTTS import ChatTTS
from ChatTTS.tools.logger import get_logger
from ChatTTS.tools.normalizer import normalizer_en_nemo_text, normalizer_zh_tn
from IPython.display import Audio

Load Models

python
logger = get_logger("ChatTTS", format_root=True)
chat = ChatTTS.Chat(logger)

# try to load normalizer
try:
    chat.normalizer.register("en", normalizer_en_nemo_text())
except ValueError as e:
    logger.error(e)
except:
    logger.warning("Package nemo_text_processing not found!")
    logger.warning(
        "Run: conda install -c conda-forge pynini=2.1.5 && pip install nemo_text_processing",
    )
try:
    chat.normalizer.register("zh", normalizer_zh_tn())
except ValueError as e:
    logger.error(e)
except:
    logger.warning("Package WeTextProcessing not found!")
    logger.warning(
        "Run: conda install -c conda-forge pynini=2.1.5 && pip install WeTextProcessing",
    )

Here are three choices for loading models,

1. Load models from Hugging Face (recommend)

python
# use force_redownload=True if the weights have been updated.
chat.load(source="huggingface")

2. Load models from local directories 'asset' and 'config'

python
chat.load()
# chat.load(source='local') same as above

3. Load models from a custom path

python
# write the model path into custom_path
chat.load(source="custom", custom_path="YOUR CUSTOM PATH")

You can also unload models to save the memory

python
chat.unload()

Inference

Batch infer

python
texts = [
    "So we found being competitive and collaborative was a huge way of staying motivated towards our goals, so one person to call when you fall off, one person who gets you back on then one person to actually do the activity with.",
] * 3 + [
    "我觉得像我们这些写程序的人,他,我觉得多多少少可能会对开源有一种情怀在吧我觉得开源是一个很好的形式。现在其实最先进的技术掌握在一些公司的手里的话,就他们并不会轻易的开放给所有的人用。"
] * 3

wavs = chat.infer(texts)
python
Audio(wavs[0], rate=24_000, autoplay=True)
python
Audio(wavs[3], rate=24_000, autoplay=True)

Custom params

python
params_infer_code = ChatTTS.Chat.InferCodeParams(
    prompt="[speed_5]",
    temperature=0.3,
)
params_refine_text = ChatTTS.Chat.RefineTextParams(
    prompt="[oral_2][laugh_0][break_6]",
)

wav = chat.infer(
    "四川美食可多了,有麻辣火锅、宫保鸡丁、麻婆豆腐、担担面、回锅肉、夫妻肺片等,每样都让人垂涎三尺。",
    params_refine_text=params_refine_text,
    params_infer_code=params_infer_code,
)
python
Audio(wav[0], rate=24_000, autoplay=True)

fix random speaker

python
rand_spk = chat.sample_random_speaker()
print(rand_spk)  # save it for later timbre recovery

params_infer_code = ChatTTS.Chat.InferCodeParams(
    spk_emb=rand_spk,
)

wav = chat.infer(
    "四川美食确实以辣闻名,但也有不辣的选择。比如甜水面、赖汤圆、蛋烘糕、叶儿粑等,这些小吃口味温和,甜而不腻,也很受欢迎。",
    params_infer_code=params_infer_code,
)
python
Audio(wav[0], rate=24_000, autoplay=True)

Zero shot (simulate speaker)

python
from ChatTTS.tools.audio import load_audio

spk_smp = chat.sample_audio_speaker(load_audio("sample.mp3", 24000))
print(spk_smp)  # save it in order to load the speaker without sample audio next time

params_infer_code = ChatTTS.Chat.InferCodeParams(
    spk_smp=spk_smp,
    txt_smp="与sample.mp3内容完全一致的文本转写。",
)

wav = chat.infer(
    "四川美食确实以辣闻名,但也有不辣的选择。比如甜水面、赖汤圆、蛋烘糕、叶儿粑等,这些小吃口味温和,甜而不腻,也很受欢迎。",
    params_infer_code=params_infer_code,
)
python
Audio(wav[0], rate=24_000, autoplay=True)

Two stage control

python
text = "So we found being competitive and collaborative was a huge way of staying motivated towards our goals, so one person to call when you fall off, one person who gets you back on then one person to actually do the activity with."
refined_text = chat.infer(text, refine_text_only=True)
refined_text
python
wav = chat.infer(refined_text, skip_refine_text=True)
python
Audio(wav[0], rate=24_000, autoplay=True)

LLM Call

python
from ChatTTS.tools.llm import ChatOpenAI

API_KEY = ""
client = ChatOpenAI(
    api_key=API_KEY, base_url="https://api.deepseek.com", model="deepseek-chat"
)
python
user_question = "四川有哪些好吃的美食呢?"
python
text = client.call(user_question, prompt_version="deepseek")
text
python
text = client.call(text, prompt_version="deepseek_TN")
text
python
wav = chat.infer(text)
python
Audio(wav[0], rate=24_000, autoplay=True)