mirror of https://github.com/coqui-ai/TTS.git
add mk annealing (mk attn loss contribution)
This commit is contained in:
parent
e7350346bf
commit
c55900b4ad
|
@ -16,6 +16,7 @@
|
|||
"batch_size": 32,
|
||||
"eval_batch_size":32,
|
||||
"r": 5,
|
||||
"mk": 1,
|
||||
|
||||
"griffin_lim_iters": 60,
|
||||
"power": 1.2,
|
||||
|
|
19
train.py
19
train.py
|
@ -19,7 +19,8 @@ from tensorboardX import SummaryWriter
|
|||
from utils.generic_utils import (Progbar, remove_experiment_folder,
|
||||
create_experiment_folder, save_checkpoint,
|
||||
save_best_model, load_config, lr_decay,
|
||||
count_parameters, check_update, get_commit_hash)
|
||||
count_parameters, check_update, get_commit_hash,
|
||||
create_attn_mask)
|
||||
from utils.model import get_param_size
|
||||
from utils.visual import plot_alignment, plot_spectrogram
|
||||
from models.tacotron import Tacotron
|
||||
|
@ -91,6 +92,9 @@ def train(model, criterion, data_loader, optimizer, epoch):
|
|||
|
||||
optimizer.zero_grad()
|
||||
|
||||
# setup mk
|
||||
mk = mk_decay(c.mk, c.epochs, epoch)
|
||||
|
||||
# convert inputs to variables
|
||||
text_input_var = Variable(text_input)
|
||||
mel_spec_var = Variable(mel_input)
|
||||
|
@ -105,18 +109,9 @@ def train(model, criterion, data_loader, optimizer, epoch):
|
|||
linear_spec_var = linear_spec_var.cuda()
|
||||
|
||||
# create attention mask
|
||||
# TODO: vectorize
|
||||
N = text_input_var.shape[1]
|
||||
T = mel_spec_var.shape[1] // c.r
|
||||
M = np.zeros([N, T])
|
||||
for t in range(T):
|
||||
for n in range(N):
|
||||
val = 20 * np.exp(-pow((n/N)-(t/T), 2.0)/0.05)
|
||||
M[n, t] = val
|
||||
e_x = np.exp(M - np.max(M))
|
||||
M = e_x / e_x.sum(axis=0) # only difference
|
||||
M = Variable(torch.FloatTensor(M).t()).cuda()
|
||||
M = torch.stack([M]*32)
|
||||
M = create_attn_mask(N, T, g)
|
||||
|
||||
# forward pass
|
||||
mel_output, linear_output, alignments =\
|
||||
|
@ -129,7 +124,7 @@ def train(model, criterion, data_loader, optimizer, epoch):
|
|||
linear_spec_var[:, :, :n_priority_freq],
|
||||
mel_lengths_var)
|
||||
attention_loss = criterion(alignments, M, mel_lengths_var)
|
||||
loss = mel_loss + linear_loss + attention_loss
|
||||
loss = mel_loss + linear_loss + mk * attention_loss
|
||||
|
||||
# backpass and check the grad norm
|
||||
loss.backward()
|
||||
|
|
|
@ -131,6 +131,24 @@ def lr_decay(init_lr, global_step, warmup_steps):
|
|||
return lr
|
||||
|
||||
|
||||
def create_attn_mask(N, T, g=0.05):
|
||||
r'''creating attn mask for guided attention'''
|
||||
M = np.zeros([N, T])
|
||||
for t in range(T):
|
||||
for n in range(N):
|
||||
val = 20 * np.exp(-pow((n/N)-(t/T), 2.0)/g)
|
||||
M[n, t] = val
|
||||
e_x = np.exp(M - np.max(M))
|
||||
M = e_x / e_x.sum(axis=0) # only difference
|
||||
M = Variable(torch.FloatTensor(M).t()).cuda()
|
||||
M = torch.stack([M]*32)
|
||||
return M
|
||||
|
||||
|
||||
def mk_decay(init_mk, max_epoch, n_epoch):
|
||||
return init_mk * ((max_epoch - n_epoch) / max_epoch)
|
||||
|
||||
|
||||
def count_parameters(model):
|
||||
r"""Count number of trainable parameters in a network"""
|
||||
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
||||
|
|
Loading…
Reference in New Issue