mirror of https://github.com/coqui-ai/TTS.git
9.6 KiB
9.6 KiB
None
<html lang="en">
<head>
</head>
</html>
Mozilla TTS on CPU Real-Time Speech Synthesis¶
We use Tacotron2 and MultiBand-Melgan models and LJSpeech dataset.
Tacotron2 is trained using Double Decoder Consistency (DDC) only for 130K steps (3 days) with a single GPU.
MultiBand-Melgan is trained 1.45M steps with real spectrograms.
Note that both model performances can be improved with more training.
Download Models¶
In [ ]:
!gdown --id 1dntzjWFg7ufWaTaFy80nRz-Tu02xWZos -O data/tts_model.pth.tar !gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O data/config.json
In [ ]:
!gdown --id 1X09hHAyAJOnrplCUMAdW_t341Kor4YR4 -O data/vocoder_model.pth.tar !gdown --id "1qN7vQRIYkzvOX_DtiZtTajzoZ1eW1-Eg" -O data/config_vocoder.json !gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O data/scale_stats.npy
Define TTS function¶
In [ ]:
def tts(model, text, CONFIG, use_cuda, ap, use_gl, figures=True): t_1 = time.time() waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None, truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars) # mel_postnet_spec = ap._denormalize(mel_postnet_spec.T) if not use_gl: waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0)) waveform = waveform.flatten() if use_cuda: waveform = waveform.cpu() waveform = waveform.numpy() rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate) tps = (time.time() - t_1) / len(waveform) print(waveform.shape) print(" > Run-time: {}".format(time.time() - t_1)) print(" > Real-time factor: {}".format(rtf)) print(" > Time per step: {}".format(tps)) IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) return alignment, mel_postnet_spec, stop_tokens, waveform
Load Models¶
In [ ]:
import os import torch import time import IPython from TTS.tts.utils.generic_utils import setup_model from TTS.utils.io import load_config from TTS.tts.utils.text.symbols import symbols, phonemes from TTS.utils.audio import AudioProcessor from TTS.tts.utils.synthesis import synthesis
In [ ]:
# runtime settings use_cuda = False
In [ ]:
# model paths TTS_MODEL = "data/tts_model.pth.tar" TTS_CONFIG = "data/config.json" VOCODER_MODEL = "data/vocoder_model.pth.tar" VOCODER_CONFIG = "data/config_vocoder.json"
In [ ]:
# load configs TTS_CONFIG = load_config(TTS_CONFIG) VOCODER_CONFIG = load_config(VOCODER_CONFIG)
In [ ]:
# load the audio processor TTS_CONFIG.audio['stats_path'] = 'data/scale_stats.npy' ap = AudioProcessor(**TTS_CONFIG.audio)
In [ ]:
# LOAD TTS MODEL # multi speaker speaker_id = None speakers = [] # load the model num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols) model = setup_model(num_chars, len(speakers), TTS_CONFIG) # load model state cp = torch.load(TTS_MODEL, map_location=torch.device('cpu')) # load the model model.load_state_dict(cp['model']) if use_cuda: model.cuda() model.eval() # set model stepsize if 'r' in cp: model.decoder.set_r(cp['r'])
In [ ]:
from TTS.vocoder.utils.generic_utils import setup_generator # LOAD VOCODER MODEL vocoder_model = setup_generator(VOCODER_CONFIG) vocoder_model.load_state_dict(torch.load(VOCODER_MODEL, map_location="cpu")["model"]) vocoder_model.remove_weight_norm() vocoder_model.inference_padding = 0 ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) if use_cuda: vocoder_model.cuda() vocoder_model.eval()
Run Inference¶
In [ ]:
sentence = "Bill got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go." align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, use_cuda, ap, use_gl=False, figures=True)