Merge branch 'weight-decay' into attn-smoothing-bgs-sigmoid-wd

This commit is contained in:
Eren 2018-09-26 16:49:39 +02:00
commit c89a3098dd
2 changed files with 18 additions and 10 deletions

View File

@ -1,6 +1,6 @@
{
"model_name": "TTS-master",
"model_description": "'Same' padding as in TF",
"model_name": "TTS-weight-decay",
"model_description": "Weight decay as in FastAI",
"audio_processor": "audio",
"num_mels": 80,
"num_freq": 1025,
@ -23,6 +23,7 @@
"batch_size": 32,
"eval_batch_size":-1,
"r": 5,
"wd": 0.0001,
"griffin_lim_iters": 60,
"power": 1.5,

View File

@ -22,7 +22,6 @@ from models.tacotron import Tacotron
from layers.losses import L1LossMasked
from utils.audio import AudioProcessor
torch.manual_seed(1)
# torch.set_num_threads(4)
use_cuda = torch.cuda.is_available()
@ -90,6 +89,9 @@ def train(model, criterion, criterion_st, data_loader, optimizer, optimizer_st,
# backpass and check the grad norm for spec losses
loss.backward(retain_graph=True)
for group in optimizer.param_groups:
for param in group['params']:
param.data = param.data.add(-c.wd * group['lr'], param.data)
grad_norm, skip_flag = check_update(model, 1)
if skip_flag:
optimizer.zero_grad()
@ -99,6 +101,9 @@ def train(model, criterion, criterion_st, data_loader, optimizer, optimizer_st,
# backpass and check the grad norm for stop loss
stop_loss.backward()
for group in optimizer_st.param_groups:
for param in group['params']:
param.data = param.data.add(-c.wd * group['lr'], param.data)
grad_norm_st, skip_flag = check_update(model.decoder.stopnet, 0.5)
if skip_flag:
optimizer_st.zero_grad()
@ -277,7 +282,7 @@ def evaluate(model, criterion, criterion_st, data_loader, ap, current_step):
tb.add_figure('ValVisual/Reconstruction', const_spec, current_step)
tb.add_figure('ValVisual/GroundTruth', gt_spec, current_step)
tb.add_figure('ValVisual/ValidationAlignment', align_img,
current_step)
current_step)
# Sample audio
audio_signal = linear_output[idx].data.cpu().numpy()
@ -317,7 +322,8 @@ def evaluate(model, criterion, criterion_st, data_loader, ap, current_step):
file_path = os.path.join(AUDIO_PATH, str(current_step))
os.makedirs(file_path, exist_ok=True)
file_path = os.path.join(file_path, "TestSentence_{}.wav".format(idx))
file_path = os.path.join(file_path,
"TestSentence_{}.wav".format(idx))
ap.save_wav(wav, file_path)
wav_name = 'TestSentences/{}'.format(idx)
@ -326,10 +332,10 @@ def evaluate(model, criterion, criterion_st, data_loader, ap, current_step):
align_img = alignments[0].data.cpu().numpy()
linear_spec = plot_spectrogram(linear_spec, ap)
align_img = plot_alignment(align_img)
tb.add_figure('TestSentences/{}_Spectrogram'.format(idx), linear_spec,
current_step)
tb.add_figure('TestSentences/{}_Spectrogram'.format(idx),
linear_spec, current_step)
tb.add_figure('TestSentences/{}_Alignment'.format(idx), align_img,
current_step)
current_step)
except:
print(" !! Error as creating Test Sentence -", idx)
pass
@ -390,8 +396,9 @@ def main(args):
model = Tacotron(c.embedding_size, ap.num_freq, c.num_mels, c.r)
print(" | > Num output units : {}".format(ap.num_freq), flush=True)
optimizer = optim.Adam(model.parameters(), lr=c.lr)
optimizer_st = optim.Adam(model.decoder.stopnet.parameters(), lr=c.lr)
optimizer = optim.Adam(model.parameters(), lr=c.lr, weight_decay=0)
optimizer_st = optim.Adam(
model.decoder.stopnet.parameters(), lr=c.lr, weight_decay=0)
criterion = L1LossMasked()
criterion_st = nn.BCELoss()