Merge pull request #533 from Edresson/dev

add Speaker Conditional GST support
This commit is contained in:
Eren Gölge 2020-09-30 02:56:37 +02:00 committed by GitHub
commit 592bb668fd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 192 additions and 26 deletions

View File

@ -10,6 +10,8 @@ import traceback
import numpy as np
import torch
from random import randrange
from torch.utils.data import DataLoader
from TTS.tts.datasets.preprocess import load_meta_data
from TTS.tts.datasets.TTSDataset import MyDataset
@ -39,7 +41,6 @@ from TTS.utils.training import (NoamLR, adam_weight_decay, check_update,
use_cuda, num_gpus = setup_torch_training_env(True, False)
def setup_loader(ap, r, is_val=False, verbose=False, speaker_mapping=None):
if is_val and not c.run_eval:
loader = None
@ -432,6 +433,14 @@ def evaluate(model, criterion, ap, global_step, epoch, speaker_mapping=None):
test_figures = {}
print(" | > Synthesizing test sentences")
speaker_id = 0 if c.use_speaker_embedding else None
speaker_embedding = speaker_mapping[list(speaker_mapping.keys())[randrange(len(speaker_mapping)-1)]]['embedding'] if c.use_external_speaker_embedding_file and c.use_speaker_embedding else None
style_wav = c.get("gst_style_input")
if style_wav is None and c.use_gst:
# inicialize GST with zero dict.
style_wav = {}
print("WARNING: You don't provided a gst style wav, for this reason we use a zero tensor!")
for i in range(c.gst['gst_style_tokens']):
style_wav[str(i)] = 0
style_wav = c.get("gst_style_input")
for idx, test_sentence in enumerate(test_sentences):
try:
@ -442,6 +451,7 @@ def evaluate(model, criterion, ap, global_step, epoch, speaker_mapping=None):
use_cuda,
ap,
speaker_id=speaker_id,
speaker_embedding=speaker_embedding,
style_wav=style_wav,
truncated=False,
enable_eos_bos_chars=c.enable_eos_bos_chars, #pylint: disable=unused-argument

View File

@ -8,14 +8,17 @@ class GST(nn.Module):
See https://arxiv.org/pdf/1803.09017"""
def __init__(self, num_mel, num_heads, num_style_tokens, embedding_dim):
def __init__(self, num_mel, num_heads, num_style_tokens, gst_embedding_dim, speaker_embedding_dim=None):
super().__init__()
self.encoder = ReferenceEncoder(num_mel, embedding_dim)
self.encoder = ReferenceEncoder(num_mel, gst_embedding_dim)
self.style_token_layer = StyleTokenLayer(num_heads, num_style_tokens,
embedding_dim)
gst_embedding_dim, speaker_embedding_dim)
def forward(self, inputs):
def forward(self, inputs, speaker_embedding=None):
enc_out = self.encoder(inputs)
# concat speaker_embedding
if speaker_embedding is not None:
enc_out = torch.cat([enc_out, speaker_embedding], dim=-1)
style_embed = self.style_token_layer(enc_out)
return style_embed
@ -72,7 +75,7 @@ class ReferenceEncoder(nn.Module):
# x: 3D tensor [batch_size, post_conv_width,
# num_channels*post_conv_height]
self.recurrence.flatten_parameters()
_, out = self.recurrence(x)
memory, out = self.recurrence(x)
# out: 3D tensor [seq_len==1, batch_size, encoding_size=128]
return out.squeeze(0)
@ -90,9 +93,14 @@ class StyleTokenLayer(nn.Module):
"""NN Module attending to style tokens based on prosody encodings."""
def __init__(self, num_heads, num_style_tokens,
embedding_dim):
embedding_dim, speaker_embedding_dim=None):
super().__init__()
self.query_dim = embedding_dim // 2
if speaker_embedding_dim:
self.query_dim += speaker_embedding_dim
self.key_dim = embedding_dim // num_heads
self.style_tokens = nn.Parameter(
torch.FloatTensor(num_style_tokens, self.key_dim))
@ -115,7 +123,6 @@ class StyleTokenLayer(nn.Module):
return style_embed
class MultiHeadAttention(nn.Module):
'''
input:
@ -166,4 +173,4 @@ class MultiHeadAttention(nn.Module):
torch.split(out, 1, dim=0),
dim=3).squeeze(0) # [N, T_q, num_units]
return out
return out

View File

@ -35,7 +35,8 @@ class Tacotron(TacotronAbstract):
gst_embedding_dim=256,
gst_num_heads=4,
gst_style_tokens=10,
memory_size=5):
memory_size=5,
gst_use_speaker_embedding=False):
super(Tacotron,
self).__init__(num_chars, num_speakers, r, postnet_output_dim,
decoder_output_dim, attn_type, attn_win,
@ -45,7 +46,7 @@ class Tacotron(TacotronAbstract):
bidirectional_decoder, double_decoder_consistency,
ddc_r, encoder_in_features, decoder_in_features,
speaker_embedding_dim, gst, gst_embedding_dim,
gst_num_heads, gst_style_tokens)
gst_num_heads, gst_style_tokens, gst_use_speaker_embedding)
# speaker embedding layers
if self.num_speakers > 1:
@ -78,7 +79,8 @@ class Tacotron(TacotronAbstract):
self.gst_layer = GST(num_mel=80,
num_heads=gst_num_heads,
num_style_tokens=gst_style_tokens,
embedding_dim=gst_embedding_dim)
gst_embedding_dim=self.gst_embedding_dim,
speaker_embedding_dim=speaker_embedding_dim if self.embeddings_per_sample and self.gst_use_speaker_embedding else None)
# backward pass decoder
if self.bidirectional_decoder:
self._init_backward_decoder()
@ -108,7 +110,9 @@ class Tacotron(TacotronAbstract):
# global style token
if self.gst:
# B x gst_dim
encoder_outputs = self.compute_gst(encoder_outputs, mel_specs)
encoder_outputs = self.compute_gst(encoder_outputs,
mel_specs,
speaker_embeddings if self.gst_use_speaker_embedding else None)
# speaker embedding
if self.num_speakers > 1:
if not self.embeddings_per_sample:
@ -149,7 +153,9 @@ class Tacotron(TacotronAbstract):
encoder_outputs = self.encoder(inputs)
if self.gst:
# B x gst_dim
encoder_outputs = self.compute_gst(encoder_outputs, style_mel)
encoder_outputs = self.compute_gst(encoder_outputs,
style_mel,
speaker_embeddings if self.gst_use_speaker_embedding else None)
if self.num_speakers > 1:
if not self.embeddings_per_sample:
# B x 1 x speaker_embed_dim

View File

@ -33,7 +33,8 @@ class Tacotron2(TacotronAbstract):
gst=False,
gst_embedding_dim=512,
gst_num_heads=4,
gst_style_tokens=10):
gst_style_tokens=10,
gst_use_speaker_embedding=False):
super(Tacotron2,
self).__init__(num_chars, num_speakers, r, postnet_output_dim,
decoder_output_dim, attn_type, attn_win,
@ -43,7 +44,7 @@ class Tacotron2(TacotronAbstract):
bidirectional_decoder, double_decoder_consistency,
ddc_r, encoder_in_features, decoder_in_features,
speaker_embedding_dim, gst, gst_embedding_dim,
gst_num_heads, gst_style_tokens)
gst_num_heads, gst_style_tokens, gst_use_speaker_embedding)
# speaker embedding layer
if self.num_speakers > 1:
@ -72,7 +73,8 @@ class Tacotron2(TacotronAbstract):
self.gst_layer = GST(num_mel=80,
num_heads=self.gst_num_heads,
num_style_tokens=self.gst_style_tokens,
embedding_dim=self.gst_embedding_dim)
gst_embedding_dim=self.gst_embedding_dim,
speaker_embedding_dim=speaker_embedding_dim if self.embeddings_per_sample and self.gst_use_speaker_embedding else None)
# backward pass decoder
if self.bidirectional_decoder:
self._init_backward_decoder()
@ -98,11 +100,11 @@ class Tacotron2(TacotronAbstract):
embedded_inputs = self.embedding(text).transpose(1, 2)
# B x T_in_max x D_en
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
if self.gst:
# B x gst_dim
encoder_outputs = self.compute_gst(encoder_outputs, mel_specs)
encoder_outputs = self.compute_gst(encoder_outputs,
mel_specs,
speaker_embeddings if self.gst_use_speaker_embedding else None)
if self.num_speakers > 1:
if not self.embeddings_per_sample:
# B x 1 x speaker_embed_dim
@ -144,8 +146,9 @@ class Tacotron2(TacotronAbstract):
if self.gst:
# B x gst_dim
encoder_outputs = self.compute_gst(encoder_outputs, style_mel)
encoder_outputs = self.compute_gst(encoder_outputs,
style_mel,
speaker_embeddings if self.gst_use_speaker_embedding else None)
if self.num_speakers > 1:
if not self.embeddings_per_sample:
speaker_embeddings = self.speaker_embedding(speaker_ids)[:, None]
@ -168,7 +171,9 @@ class Tacotron2(TacotronAbstract):
if self.gst:
# B x gst_dim
encoder_outputs = self.compute_gst(encoder_outputs, style_mel)
encoder_outputs = self.compute_gst(encoder_outputs,
style_mel,
speaker_embeddings if self.gst_use_speaker_embedding else None)
if self.num_speakers > 1:
if not self.embeddings_per_sample:

View File

@ -34,7 +34,8 @@ class TacotronAbstract(ABC, nn.Module):
gst=False,
gst_embedding_dim=512,
gst_num_heads=4,
gst_style_tokens=10):
gst_style_tokens=10,
gst_use_speaker_embedding=False):
""" Abstract Tacotron class """
super().__init__()
self.num_chars = num_chars
@ -45,6 +46,7 @@ class TacotronAbstract(ABC, nn.Module):
self.gst_embedding_dim = gst_embedding_dim
self.gst_num_heads = gst_num_heads
self.gst_style_tokens = gst_style_tokens
self.gst_use_speaker_embedding = gst_use_speaker_embedding
self.num_speakers = num_speakers
self.bidirectional_decoder = bidirectional_decoder
self.double_decoder_consistency = double_decoder_consistency
@ -179,11 +181,14 @@ class TacotronAbstract(ABC, nn.Module):
self.speaker_embeddings_projected = self.speaker_project_mel(
self.speaker_embeddings).squeeze(1)
def compute_gst(self, inputs, style_input):
def compute_gst(self, inputs, style_input, speaker_embedding=None):
""" Compute global style token """
device = inputs.device
if isinstance(style_input, dict):
query = torch.zeros(1, 1, self.gst_embedding_dim//2).to(device)
if speaker_embedding is not None:
query = torch.cat([query, speaker_embedding.reshape(1, 1, -1)], dim=-1)
_GST = torch.tanh(self.gst_layer.style_token_layer.style_tokens)
gst_outputs = torch.zeros(1, 1, self.gst_embedding_dim).to(device)
for k_token, v_amplifier in style_input.items():
@ -193,7 +198,7 @@ class TacotronAbstract(ABC, nn.Module):
elif style_input is None:
gst_outputs = torch.zeros(1, 1, self.gst_embedding_dim).to(device)
else:
gst_outputs = self.gst_layer(style_input) # pylint: disable=not-callable
gst_outputs = self.gst_layer(style_input, speaker_embedding) # pylint: disable=not-callable
inputs = self._concat_speaker_embedding(inputs, gst_outputs)
return inputs

View File

@ -59,6 +59,7 @@ def setup_model(num_chars, num_speakers, c, speaker_embedding_dim=None):
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'],
memory_size=c.memory_size,
attn_type=c.attention_type,
attn_win=c.windowing,
@ -85,6 +86,7 @@ def setup_model(num_chars, num_speakers, c, speaker_embedding_dim=None):
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'],
attn_type=c.attention_type,
attn_win=c.windowing,
attn_norm=c.attention_norm,
@ -244,6 +246,7 @@ def check_config_tts(c):
check_argument('gst', c, restricted=True, val_type=dict)
check_argument('gst_style_input', c['gst'], restricted=True, val_type=[str, dict])
check_argument('gst_embedding_dim', c['gst'], restricted=True, val_type=int, min_val=0, max_val=1000)
check_argument('gst_use_speaker_embedding', c['gst'], restricted=True, val_type=bool)
check_argument('gst_num_heads', c['gst'], restricted=True, val_type=int, min_val=2, max_val=10)
check_argument('gst_style_tokens', c['gst'], restricted=True, val_type=int, min_val=1, max_val=1000)

View File

@ -61,6 +61,7 @@
// -> wave file [path to wave] or
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
// with the dictionary being len(dict) <= len(gst_style_tokens).
"gst_use_speaker_embedding": true, // if true pass speaker embedding in attention input GST.
"gst_embedding_dim": 512,
"gst_num_heads": 4,
"gst_style_tokens": 10

View File

@ -140,6 +140,7 @@
// -> wave file [path to wave] or
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
// with the dictionary being len(dict) == len(gst_style_tokens).
"gst_use_speaker_embedding": true, // if true pass speaker embedding in attention input GST.
"gst_embedding_dim": 512,
"gst_num_heads": 4,
"gst_style_tokens": 10

View File

@ -93,6 +93,7 @@
// -> wave file [path to wave] or
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
// with the dictionary being len(dict) <= len(gst_style_tokens).
"gst_use_speaker_embedding": true, // if true pass speaker embedding in attention input GST.
"gst_embedding_dim": 512,
"gst_num_heads": 4,
"gst_style_tokens": 10

View File

@ -238,3 +238,58 @@ class TacotronGSTTrainTest(unittest.TestCase):
), "param {} {} with shape {} not updated!! \n{}\n{}".format(
name, count, param.shape, param, param_ref)
count += 1
class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, speaker_embedding_dim=55, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
continue
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1

View File

@ -284,3 +284,75 @@ class TacotronGSTTrainTest(unittest.TestCase):
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 129, (8, )).long().to(device)
input_lengths[-1] = 128
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
linear_spec = torch.rand(8, 30, c.audio['fft_size']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) >
0.0).unsqueeze(2).float().squeeze()
criterion = L1LossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron(
num_chars=32,
num_speakers=5,
postnet_output_dim=c.audio['fft_size'],
decoder_output_dim=c.audio['num_mels'],
gst=True,
gst_embedding_dim=c.gst['gst_embedding_dim'],
gst_num_heads=c.gst['gst_num_heads'],
gst_style_tokens=c.gst['gst_style_tokens'],
gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding'],
r=c.r,
memory_size=c.memory_size,
speaker_embedding_dim=55,
).to(device) #FIXME: missing num_speakers parameter to Tacotron ctor
model.train()
print(" > Num parameters for Tacotron model:%s" %
(count_parameters(model)))
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for _ in range(5):
mel_out, linear_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths,
speaker_embeddings=speaker_embeddings)
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(linear_out, linear_spec,
mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
continue
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1