loss functions updates

This commit is contained in:
Eren Golge 2019-03-23 17:33:47 +01:00
parent d8908692c5
commit 786510cd6a
2 changed files with 3 additions and 1 deletions

View File

@ -29,7 +29,7 @@
"url": "tcp:\/\/localhost:54321"
},
"reinit_layers": ["model.decoder.attention_layer"],
"reinit_layers": ["model.decoder.attention_layer"], //set which layers to be reinitialized in finetunning. Only used if --restore_model is provided.
"model": "Tacotron2", // one of the model in models/
"grad_clip": 0.02, // upper limit for gradients for clipping.

View File

@ -23,6 +23,7 @@ class L1LossMasked(nn.Module):
loss: An average loss value masked by the length.
"""
# mask: (batch, max_len, 1)
target.requires_grad = False
mask = sequence_mask(
sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
mask = mask.expand_as(input)
@ -51,6 +52,7 @@ class MSELossMasked(nn.Module):
loss: An average loss value masked by the length.
"""
# mask: (batch, max_len, 1)
target.requires_grad = False
mask = sequence_mask(
sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
mask = mask.expand_as(input)