coqui-tts/notebooks/TacotronPlayGround.ipynb

3.0 MiB
Raw Blame History

None <html lang="en"> <head> </head>
In [1]:
%load_ext autoreload
%autoreload 2
import os
import sys
import io
import torch 
import time
import numpy as np
from collections import OrderedDict
from matplotlib import pylab as plt

%pylab inline
rcParams["figure.figsize"] = (16,5)
sys.path.append('/home/erogol/projects/')

import librosa
import librosa.display

from TTS.models.tacotron import Tacotron 
from TTS.layers import *
from TTS.utils.data import *
from TTS.utils.audio import AudioProcessor
from TTS.utils.generic_utils import load_config
from TTS.utils.text import text_to_sequence

import IPython
from IPython.display import Audio
from utils import *
Populating the interactive namespace from numpy and matplotlib
/home/erogol/miniconda3/envs/pytorch4/lib/python3.6/site-packages/IPython/core/magics/pylab.py:160: UserWarning: pylab import has clobbered these variables: ['plt']
`%matplotlib` prevents importing * from pylab and numpy
  "\n`%matplotlib` prevents importing * from pylab and numpy"
In [2]:
ls /data/shared/erogol_models/May-18-2018_02:26AM-los_sen_attn-debug 
checkpoint_33464.pth.tar  checkpoint_36096.pth.tar
checkpoint_33840.pth.tar  checkpoint_36472.pth.tar
checkpoint_34216.pth.tar  checkpoint_36848.pth.tar
checkpoint_34592.pth.tar  checkpoint_37224.pth.tar
checkpoint_34968.pth.tar  checkpoints/
checkpoint_35344.pth.tar  config.json
checkpoint_35720.pth.tar  events.out.tfevents.1526635608.mlc1
In [3]:
def tts(model, text, CONFIG, use_cuda, ap, figures=True):
    t_1 = time.time()
    waveform, alignment, spectrogram, stop_tokens = create_speech(model, text, CONFIG, use_cuda, ap) 
    print(" >  Run-time: {}".format(time.time() - t_1))
    if figures:                                                                                                         
        visualize(alignment, spectrogram, stop_tokens, CONFIG)                                                                       
    IPython.display.display(Audio(waveform, rate=CONFIG.sample_rate))  
    return alignment, spectrogram, stop_tokens
In [5]:
# Set constants
ROOT_PATH = '/data/shared/erogol_models/May-18-2018_02:26AM-los_sen_attn-debug/'
MODEL_PATH = ROOT_PATH + '/checkpoint_37224.pth.tar'
CONFIG_PATH = ROOT_PATH + '/config.json'
OUT_FOLDER = ROOT_PATH + '/test/'
CONFIG = load_config(CONFIG_PATH)
use_cuda = True
In [6]:
# load the model
model = Tacotron(CONFIG.embedding_size, CONFIG.num_freq, CONFIG.num_mels, CONFIG.r)

# load the audio processor

ap = AudioProcessor(CONFIG.sample_rate, CONFIG.num_mels, CONFIG.min_level_db,
                    CONFIG.frame_shift_ms, CONFIG.frame_length_ms, CONFIG.preemphasis,
                    CONFIG.ref_level_db, CONFIG.num_freq, CONFIG.power, griffin_lim_iters=30)         


# load model state
if use_cuda:
    cp = torch.load(MODEL_PATH)
else:
    cp = torch.load(MODEL_PATH, map_location=lambda storage, loc: storage)

# load the model
model.load_state_dict(cp['model'])
if use_cuda:
    model.cuda()
model.eval()
 | > Number of characters : 149
Out[6]:
Tacotron(
  (embedding): Embedding(149, 256)
  (encoder): Encoder(
    (prenet): Prenet(
      (layers): ModuleList(
        (0): Linear(in_features=256, out_features=256, bias=True)
        (1): Linear(in_features=256, out_features=128, bias=True)
      )
      (relu): ReLU()
      (dropout): Dropout(p=0.5)
    )
    (cbhg): CBHG(
      (relu): ReLU()
      (conv1d_banks): ModuleList(
        (0): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(1,), stride=(1,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (1): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(2,), stride=(1,), padding=(1,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (2): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (3): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(4,), stride=(1,), padding=(2,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (4): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (5): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(6,), stride=(1,), padding=(3,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (6): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (7): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(8,), stride=(1,), padding=(4,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (8): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(9,), stride=(1,), padding=(4,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (9): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(10,), stride=(1,), padding=(5,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (10): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(11,), stride=(1,), padding=(5,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (11): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(12,), stride=(1,), padding=(6,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (12): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(13,), stride=(1,), padding=(6,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (13): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(14,), stride=(1,), padding=(7,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (14): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(15,), stride=(1,), padding=(7,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (15): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(16,), stride=(1,), padding=(8,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
      )
      (max_pool1d): MaxPool1d(kernel_size=2, stride=1, padding=1, dilation=1, ceil_mode=False)
      (conv1d_projections): ModuleList(
        (0): BatchNormConv1d(
          (conv1d): Conv1d(2048, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
          (activation): ReLU()
        )
        (1): BatchNormConv1d(
          (conv1d): Conv1d(128, 128, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
          (bn): BatchNorm1d(128, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        )
      )
      (pre_highway): Linear(in_features=128, out_features=128, bias=False)
      (highways): ModuleList(
        (0): Highway(
          (H): Linear(in_features=128, out_features=128, bias=True)
          (T): Linear(in_features=128, out_features=128, bias=True)
          (relu): ReLU()
          (sigmoid): Sigmoid()
        )
        (1): Highway(
          (H): Linear(in_features=128, out_features=128, bias=True)
          (T): Linear(in_features=128, out_features=128, bias=True)
          (relu): ReLU()
          (sigmoid): Sigmoid()
        )
        (2): Highway(
          (H): Linear(in_features=128, out_features=128, bias=True)
          (T): Linear(in_features=128, out_features=128, bias=True)
          (relu): ReLU()
          (sigmoid): Sigmoid()
        )
        (3): Highway(
          (H): Linear(in_features=128, out_features=128, bias=True)
          (T): Linear(in_features=128, out_features=128, bias=True)
          (relu): ReLU()
          (sigmoid): Sigmoid()
        )
      )
      (gru): GRU(128, 128, batch_first=True, bidirectional=True)
    )
  )
  (decoder): Decoder(
    (prenet): Prenet(
      (layers): ModuleList(
        (0): Linear(in_features=400, out_features=256, bias=True)
        (1): Linear(in_features=256, out_features=128, bias=True)
      )
      (relu): ReLU()
      (dropout): Dropout(p=0.5)
    )
    (attention_rnn): AttentionRNN(
      (rnn_cell): GRUCell(384, 256)
      (alignment_model): LocationSensitiveAttention(
        (loc_conv): Conv1d(2, 32, kernel_size=(31,), stride=(1,), padding=(15,), bias=False)
        (loc_linear): Linear(in_features=32, out_features=256, bias=True)
        (query_layer): Linear(in_features=256, out_features=256, bias=True)
        (annot_layer): Linear(in_features=256, out_features=256, bias=True)
        (v): Linear(in_features=256, out_features=1, bias=False)
      )
    )
    (project_to_decoder_in): Linear(in_features=512, out_features=256, bias=True)
    (decoder_rnns): ModuleList(
      (0): GRUCell(256, 256)
      (1): GRUCell(256, 256)
    )
    (proj_to_mel): Linear(in_features=256, out_features=400, bias=True)
    (stopnet): StopNet(
      (rnn): GRUCell(400, 400)
      (relu): ReLU()
      (linear): Linear(in_features=400, out_features=1, bias=True)
      (sigmoid): Sigmoid()
    )
  )
  (postnet): CBHG(
    (relu): ReLU()
    (conv1d_banks): ModuleList(
      (0): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(1,), stride=(1,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (1): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(2,), stride=(1,), padding=(1,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (2): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (3): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(4,), stride=(1,), padding=(2,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (4): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (5): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(6,), stride=(1,), padding=(3,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (6): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(7,), stride=(1,), padding=(3,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (7): BatchNormConv1d(
        (conv1d): Conv1d(80, 80, kernel_size=(8,), stride=(1,), padding=(4,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
    )
    (max_pool1d): MaxPool1d(kernel_size=2, stride=1, padding=1, dilation=1, ceil_mode=False)
    (conv1d_projections): ModuleList(
      (0): BatchNormConv1d(
        (conv1d): Conv1d(640, 256, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (bn): BatchNorm1d(256, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
        (activation): ReLU()
      )
      (1): BatchNormConv1d(
        (conv1d): Conv1d(256, 80, kernel_size=(3,), stride=(1,), padding=(1,), bias=False)
        (bn): BatchNorm1d(80, eps=0.001, momentum=0.99, affine=True, track_running_stats=True)
      )
    )
    (pre_highway): Linear(in_features=80, out_features=80, bias=False)
    (highways): ModuleList(
      (0): Highway(
        (H): Linear(in_features=80, out_features=80, bias=True)
        (T): Linear(in_features=80, out_features=80, bias=True)
        (relu): ReLU()
        (sigmoid): Sigmoid()
      )
      (1): Highway(
        (H): Linear(in_features=80, out_features=80, bias=True)
        (T): Linear(in_features=80, out_features=80, bias=True)
        (relu): ReLU()
        (sigmoid): Sigmoid()
      )
      (2): Highway(
        (H): Linear(in_features=80, out_features=80, bias=True)
        (T): Linear(in_features=80, out_features=80, bias=True)
        (relu): ReLU()
        (sigmoid): Sigmoid()
      )
      (3): Highway(
        (H): Linear(in_features=80, out_features=80, bias=True)
        (T): Linear(in_features=80, out_features=80, bias=True)
        (relu): ReLU()
        (sigmoid): Sigmoid()
      )
    )
    (gru): GRU(80, 80, batch_first=True, bidirectional=True)
  )
  (last_linear): Linear(in_features=160, out_features=1025, bias=True)
)

EXAMPLES FROM TRAINING SET

In [8]:
import pandas as pd
df = pd.read_csv('/data/shared/KeithIto/LJSpeech-1.0/metadata_val.csv', delimiter='|')
In [9]:
sentence = df.iloc[175, 1]
print(sentence)
model.decoder.max_decoder_steps = 250
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
Hosty took the necessary steps to have the Dallas office of the FBI, rather than the New Orleans office, reestablished as the office with principal responsibility.
 >  Run-time: 9.547307014465332
Your browser does not support the audio element.
No description has been provided for this image
In [10]:
sentence =  "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent."
model.decoder.max_decoder_steps = 250
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap, figures=True)
 >  Run-time: 5.965982913970947
Your browser does not support the audio element.
No description has been provided for this image
In [11]:
sentence = "Be a voice,not an echo."  # 'echo' is not in training set. 
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
 >  Run-time: 1.3593213558197021
Your browser does not support the audio element.
No description has been provided for this image
In [12]:
sentence = "The human voice is the most perfect instrument of all."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
 >  Run-time: 3.3218443393707275
Your browser does not support the audio element.
No description has been provided for this image
In [13]:
sentence = "I'm sorry Dave. I'm afraid I can't do that."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
 >  Run-time: 3.3077054023742676
Your browser does not support the audio element.
No description has been provided for this image
In [ ]:
sentence = "This cake is great. It's so delicious and moist."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
 >  Run-time: 3.859889507293701
Your browser does not support the audio element.
In [ ]:
sentence = "Generative adversarial network or variational auto-encoder."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
In [ ]:
sentence = "Scientists at the CERN laboratory say they have discovered a new particle."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
In [ ]:
sentence = "heres a way to measure the acute emotional intelligence that has never gone out of style."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
In [ ]:
sentence = "President Trump met with other leaders at the Group of 20 conference."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
In [ ]:
sentence = "The buses aren't the problem, they actually provide a solution."
align, spec, stop_tokens = tts(model, sentence, CONFIG, use_cuda, ap)
</html>