mirror of https://github.com/coqui-ai/TTS.git
loss functions updates
This commit is contained in:
parent
d8908692c5
commit
786510cd6a
|
@ -29,7 +29,7 @@
|
|||
"url": "tcp:\/\/localhost:54321"
|
||||
},
|
||||
|
||||
"reinit_layers": ["model.decoder.attention_layer"],
|
||||
"reinit_layers": ["model.decoder.attention_layer"], //set which layers to be reinitialized in finetunning. Only used if --restore_model is provided.
|
||||
|
||||
"model": "Tacotron2", // one of the model in models/
|
||||
"grad_clip": 0.02, // upper limit for gradients for clipping.
|
||||
|
|
|
@ -23,6 +23,7 @@ class L1LossMasked(nn.Module):
|
|||
loss: An average loss value masked by the length.
|
||||
"""
|
||||
# mask: (batch, max_len, 1)
|
||||
target.requires_grad = False
|
||||
mask = sequence_mask(
|
||||
sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
|
||||
mask = mask.expand_as(input)
|
||||
|
@ -51,6 +52,7 @@ class MSELossMasked(nn.Module):
|
|||
loss: An average loss value masked by the length.
|
||||
"""
|
||||
# mask: (batch, max_len, 1)
|
||||
target.requires_grad = False
|
||||
mask = sequence_mask(
|
||||
sequence_length=length, max_len=target.size(1)).unsqueeze(2).float()
|
||||
mask = mask.expand_as(input)
|
||||
|
|
Loading…
Reference in New Issue