mirror of https://github.com/coqui-ai/TTS.git
8.4 KiB
8.4 KiB
None
<html lang="en">
<head>
</head>
</html>
This is a notebook to generate mel-spectrograms from a TTS model to be used for WaveRNN training.
In [ ]:
TTS_PATH = "/home/erogol/projects/"
In [ ]:
import os import sys sys.path.append(TTS_PATH) import torch import importlib import numpy as np from tqdm import tqdm as tqdm from torch.utils.data import DataLoader from TTS.models.tacotron2 import Tacotron2 from TTS.datasets.TTSDataset import MyDataset from TTS.utils.audio import AudioProcessor from TTS.utils.visual import plot_spectrogram from TTS.utils.generic_utils import load_config from TTS.datasets.preprocess import ljspeech %matplotlib inline import os os.environ['CUDA_VISIBLE_DEVICES']='0'
In [ ]:
def set_filename(wav_path, out_path): wav_file = os.path.basename(wav_path) file_name = wav_file.split('.')[0] os.makedirs(os.path.join(out_path, "quant"), exist_ok=True) os.makedirs(os.path.join(out_path, "mel"), exist_ok=True) os.makedirs(os.path.join(out_path, "wav_gl"), exist_ok=True) wavq_path = os.path.join(out_path, "quant", file_name) mel_path = os.path.join(out_path, "mel", file_name) wav_path = os.path.join(out_path, "wav_gl", file_name) return file_name, wavq_path, mel_path, wav_path
In [ ]:
OUT_PATH = "/home/erogol/Data/LJSpeech-1.1/wavernn_4152/" DATA_PATH = "/home/erogol/Data/LJSpeech-1.1/" METADATA_FILE = "metadata_train.csv" CONFIG_PATH = "/media/erogol/data_ssd/Data/models/ljspeech_models/4258_nancy/config.json" MODEL_FILE = "/home/erogol/checkpoint_92000.pth.tar" DRY_RUN = True # if False, does not generate output files, only computes loss and visuals. BATCH_SIZE = 16 use_cuda = torch.cuda.is_available() C = load_config(CONFIG_PATH) ap = AudioProcessor(bits=9, **C.audio)
In [ ]:
dataset = MyDataset(DATA_PATH, METADATA_FILE, C.r, C.text_cleaner, ap, ljspeech, use_phonemes=C.use_phonemes, phoneme_cache_path=C.phoneme_cache_path) loader = DataLoader(dataset, batch_size=BATCH_SIZE, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False)
In [ ]:
from utils.text.symbols import symbols, phonemes from utils.generic_utils import sequence_mask from layers.losses import L1LossMasked # load the model MyModel = importlib.import_module('TTS.models.'+C.model.lower()) MyModel = getattr(MyModel, C.model) num_chars = len(phonemes) if C.use_phonemes else len(symbols) model = MyModel(num_chars, C.r, attn_win=False) checkpoint = torch.load(MODEL_FILE) model.load_state_dict(checkpoint['model']) print(checkpoint['step']) model.eval() if use_cuda: model = model.cuda()
Generate model outputs¶
In [ ]:
import pickle file_idxs = [] losses = [] postnet_losses = [] criterion = L1LossMasked() for data in tqdm(loader): # setup input data text_input = data[0] text_lengths = data[1] linear_input = data[2] mel_input = data[3] mel_lengths = data[4] stop_targets = data[5] item_idx = data[6] # dispatch data to GPU if use_cuda: text_input = text_input.cuda() text_lengths = text_lengths.cuda() mel_input = mel_input.cuda() mel_lengths = mel_lengths.cuda() # linear_input = linear_input.cuda() stop_targets = stop_targets.cuda() mask = sequence_mask(text_lengths) mel_outputs, mel_postnet_outputs, alignments, stop_tokens = model.forward(text_input, text_lengths, mel_input, mask) loss = criterion(mel_outputs, mel_input, mel_lengths) loss_postnet = criterion(mel_postnet_outputs, mel_input, mel_lengths) losses.append(loss.item()) postnet_losses.append(loss_postnet.item()) if not DRY_RUN: for idx in range(text_input.shape[0]): wav_file_path = item_idx[idx] wav = ap.load_wav(wav_file_path) file_name, wavq_path, mel_path, wav_path = set_filename(wav_file_path, OUT_PATH) file_idxs.append(file_name) # quantize and save wav wavq = ap.quantize(wav) np.save(wavq_path, wavq) # save TTS mel mel = mel_postnet_outputs[idx] mel = mel.data.cpu().numpy() mel_length = mel_lengths[idx] mel = mel[:mel_length, :].T np.save(mel_path, mel) # save GL voice # wav_gen = ap.inv_mel_spectrogram(mel.T) # mel to wav # wav_gen = ap.quantize(wav_gen) # np.save(wav_path, wav_gen) if not DRY_RUN: pickle.dump(file_idxs, open(OUT_PATH+"/dataset_ids.pkl", "wb")) print(np.mean(losses)) print(np.mean(postnet_losses))
Check model performance¶
In [ ]:
idx = 1 mel_example = mel_postnet_outputs[idx].data.cpu().numpy() plot_spectrogram(mel_example[:mel_lengths[idx], :], ap); print(mel_example[:mel_lengths[1], :].shape)
In [ ]:
wav = ap.load_wav(item_idx[idx]) melt = ap.melspectrogram(wav) print(melt.shape) plot_spectrogram(melt.T, ap);
In [ ]:
from matplotlib import pylab as plt mel_diff = mel_outputs[idx] - mel_postnet_outputs[idx] plt.figure(figsize=(16, 10)) plt.imshow(abs(mel_diff.detach().cpu().numpy()[:mel_lengths[idx],:]).T,aspect="auto", origin="lower"); plt.colorbar() plt.tight_layout()
In [ ]:
from matplotlib import pylab as plt # mel = mel_poutputs[idx].detach().cpu().numpy() mel = mel_postnet_outputs[idx].detach().cpu().numpy() mel_diff2 = melt.T - mel[:melt.shape[1]] plt.figure(figsize=(16, 10)) plt.imshow(abs(mel_diff2).T,aspect="auto", origin="lower"); plt.colorbar() plt.tight_layout()