From ad943120ae1e2ea330347e2ad4a649ceef7c401a Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Fri, 25 May 2018 15:01:16 -0700 Subject: [PATCH] Do not avg cummulative attention weight --- layers/tacotron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layers/tacotron.py b/layers/tacotron.py index 5256d215..b8210b78 100644 --- a/layers/tacotron.py +++ b/layers/tacotron.py @@ -286,7 +286,7 @@ class Decoder(nn.Module): processed_memory = self.prenet(memory_input) # Attention RNN attention_cat = torch.cat((attention.unsqueeze(1), - attention_cum.unsqueeze(1) / (t + 1)), + attention_cum.unsqueeze(1)), dim=1) attention_rnn_hidden, current_context_vec, attention = self.attention_rnn( processed_memory, current_context_vec, attention_rnn_hidden, inputs, attention_cat)