mirror of https://github.com/coqui-ai/TTS.git
bug fix for tacotron adapting it to new common layers
This commit is contained in:
parent
0b5a00d29e
commit
013ec2f168
|
@ -454,7 +454,7 @@ class Decoder(nn.Module):
|
||||||
if t > 0:
|
if t > 0:
|
||||||
new_memory = outputs[-1]
|
new_memory = outputs[-1]
|
||||||
self._update_memory_queue(new_memory)
|
self._update_memory_queue(new_memory)
|
||||||
output, stop_token, attention = self.decode(inputs, t, None)
|
output, stop_token, attention = self.decode(inputs, None)
|
||||||
stop_token = torch.sigmoid(stop_token.data)
|
stop_token = torch.sigmoid(stop_token.data)
|
||||||
outputs += [output]
|
outputs += [output]
|
||||||
attentions += [attention]
|
attentions += [attention]
|
||||||
|
|
|
@ -113,7 +113,7 @@ class Decoder(nn.Module):
|
||||||
|
|
||||||
self.prenet = Prenet(self.mel_channels * r, prenet_type,
|
self.prenet = Prenet(self.mel_channels * r, prenet_type,
|
||||||
prenet_dropout,
|
prenet_dropout,
|
||||||
[self.prenet_dim, self.prenet_dim])
|
[self.prenet_dim, self.prenet_dim], bias=False)
|
||||||
|
|
||||||
self.attention_rnn = nn.LSTMCell(self.prenet_dim + in_features,
|
self.attention_rnn = nn.LSTMCell(self.prenet_dim + in_features,
|
||||||
self.attention_rnn_dim)
|
self.attention_rnn_dim)
|
||||||
|
|
Loading…
Reference in New Issue