coqui-tts/notebooks/ExtractTTSpectrogram.ipynb

9.6 KiB

None <html lang="en"> <head> </head>

This is a notebook to generate mel-spectrograms from a TTS model to be used for WaveRNN training.

In [ ]:
TTS_PATH = "/home/erogol/projects/"
In [ ]:
%load_ext autoreload
%autoreload 2
import os
import sys
sys.path.append(TTS_PATH)
import torch
import importlib
import numpy as np
from tqdm import tqdm as tqdm
from torch.utils.data import DataLoader
from TTS.models.tacotron2 import Tacotron2
from TTS.datasets.TTSDataset import MyDataset
from TTS.utils.audio import AudioProcessor
from TTS.utils.visual import plot_spectrogram
from TTS.utils.generic_utils import load_config, setup_model
from TTS.datasets.preprocess import ljspeech
%matplotlib inline

import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
In [ ]:
def set_filename(wav_path, out_path):
    wav_file = os.path.basename(wav_path)
    file_name = wav_file.split('.')[0]
    os.makedirs(os.path.join(out_path, "quant"), exist_ok=True)
    os.makedirs(os.path.join(out_path, "mel"), exist_ok=True)
    os.makedirs(os.path.join(out_path, "wav_gl"), exist_ok=True)
    wavq_path = os.path.join(out_path, "quant", file_name)
    mel_path = os.path.join(out_path, "mel", file_name)
    wav_path = os.path.join(out_path, "wav_gl", file_name)
    return file_name, wavq_path, mel_path, wav_path
In [ ]:
OUT_PATH = "/home/erogol/Data/Mozilla/wavernn/4841/"
DATA_PATH = "/home/erogol/Data/Mozilla/"
DATASET = "mozilla"
METADATA_FILE = "metadata.txt"
CONFIG_PATH = "/media/erogol/data_ssd/Data/models/mozilla_models/4841/config.json"
MODEL_FILE = "/media/erogol/data_ssd/Data/models/mozilla_models/4841/best_model.pth.tar"
DRY_RUN = False   # if False, does not generate output files, only computes loss and visuals.
BATCH_SIZE = 32

use_cuda = torch.cuda.is_available()
print(" > CUDA enabled: ", use_cuda)

C = load_config(CONFIG_PATH)
ap = AudioProcessor(bits=9, **C.audio)
C.prenet_dropout = False
C.separate_stopnet = True
In [ ]:
preprocessor = importlib.import_module('datasets.preprocess')
preprocessor = getattr(preprocessor, DATASET.lower())

dataset = MyDataset(DATA_PATH, METADATA_FILE, C.r, C.text_cleaner, ap, preprocessor, use_phonemes=C.use_phonemes,  phoneme_cache_path=C.phoneme_cache_path)
loader = DataLoader(dataset, batch_size=BATCH_SIZE, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False)
In [ ]:
from TTS.utils.text.symbols import symbols, phonemes
from TTS.utils.generic_utils import sequence_mask
from TTS.layers.losses import L1LossMasked
from TTS.utils.text.symbols import symbols, phonemes

# load the model
num_chars = len(phonemes) if C.use_phonemes else len(symbols)
model = setup_model(num_chars, C)
checkpoint = torch.load(MODEL_FILE)
model.load_state_dict(checkpoint['model'])
print(checkpoint['step'])
model.eval()
if use_cuda:
    model = model.cuda()

Generate model outputs

In [ ]:
import pickle

file_idxs = []
losses = []
postnet_losses = []
criterion = L1LossMasked()
for data in tqdm(loader):
    # setup input data
    text_input = data[0]
    text_lengths = data[1]
    linear_input = data[2]
    mel_input = data[3]
    mel_lengths = data[4]
    stop_targets = data[5]
    item_idx = data[6]
    
    # dispatch data to GPU
    if use_cuda:
        text_input = text_input.cuda()
        text_lengths = text_lengths.cuda()
        mel_input = mel_input.cuda()
        mel_lengths = mel_lengths.cuda()
#         linear_input = linear_input.cuda()
        stop_targets = stop_targets.cuda()
    
    mask = sequence_mask(text_lengths)
    mel_outputs, postnet_outputs, alignments, stop_tokens = model.forward(text_input, text_lengths, mel_input)
    
    # compute mel specs from linear spec if model is Tacotron
    mel_specs = []
    if C.model == "Tacotron":
        postnet_outputs = postnet_outputs.data.cpu().numpy()
        for b in range(postnet_outputs.shape[0]):
            postnet_output = postnet_outputs[b]
            mel_specs.append(torch.FloatTensor(ap.out_linear_to_mel(postnet_output.T).T).cuda())
    postnet_outputs = torch.stack(mel_specs)
    
    loss = criterion(mel_outputs, mel_input, mel_lengths)
    loss_postnet = criterion(postnet_outputs, mel_input, mel_lengths)
    losses.append(loss.item())
    postnet_losses.append(loss_postnet.item())
    if not DRY_RUN:
        for idx in range(text_input.shape[0]):
            wav_file_path = item_idx[idx]
            wav = ap.load_wav(wav_file_path)
            file_name, wavq_path, mel_path, wav_path = set_filename(wav_file_path, OUT_PATH)
            file_idxs.append(file_name)

#             # quantize and save wav
#             wavq = ap.quantize(wav)
#             np.save(wavq_path, wavq)

            # save TTS mel
            mel = postnet_outputs[idx]
            mel = mel.data.cpu().numpy()
            mel_length = mel_lengths[idx]
            mel = mel[:mel_length, :].T
            np.save(mel_path, mel)

            # save GL voice
    #         wav_gen = ap.inv_mel_spectrogram(mel.T) # mel to wav
    #         wav_gen = ap.quantize(wav_gen)
    #         np.save(wav_path, wav_gen)

if not DRY_RUN:
    pickle.dump(file_idxs, open(OUT_PATH+"/dataset_ids.pkl", "wb"))      
    

print(np.mean(losses))
print(np.mean(postnet_losses))

Check model performance

In [ ]:
idx = 1
mel_example = postnet_outputs[idx].data.cpu().numpy()
plot_spectrogram(mel_example[:mel_lengths[idx], :], ap);
print(mel_example[:mel_lengths[1], :].shape)
In [ ]:
mel_example = mel_outputs[idx].data.cpu().numpy()
plot_spectrogram(mel_example[:mel_lengths[idx], :], ap);
print(mel_example[:mel_lengths[1], :].shape)
In [ ]:
wav = ap.load_wav(item_idx[idx])
melt = ap.melspectrogram(wav)
print(melt.shape)
plot_spectrogram(melt.T, ap);
In [ ]:
# postnet, decoder diff
from matplotlib import pylab as plt
mel_diff = mel_outputs[idx] - postnet_outputs[idx]
plt.figure(figsize=(16, 10))
plt.imshow(abs(mel_diff.detach().cpu().numpy()[:mel_lengths[idx],:]).T,aspect="auto", origin="lower");
plt.colorbar()
plt.tight_layout()
In [ ]:
from matplotlib import pylab as plt
# mel = mel_poutputs[idx].detach().cpu().numpy()
mel = postnet_outputs[idx].detach().cpu().numpy()
mel_diff2 = melt.T - mel[:melt.shape[1]]
plt.figure(figsize=(16, 10))
plt.imshow(abs(mel_diff2).T,aspect="auto", origin="lower");
plt.colorbar()
plt.tight_layout()
In [ ]:
 
</html>