From 38213dffe901f7ac4a65304b11f8bb95e39c565a Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Thu, 18 Apr 2019 18:55:37 +0200 Subject: [PATCH] bug fix #2 --- layers/tacotron2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/layers/tacotron2.py b/layers/tacotron2.py index d7df0c9a..df05e5ad 100644 --- a/layers/tacotron2.py +++ b/layers/tacotron2.py @@ -183,7 +183,7 @@ class Attention(nn.Module): def apply_forward_attention(self, inputs, alignment, processed_query): # forward attention prev_alpha = F.pad(self.alpha[:, :-1].clone(), (1, 0, 0, 0)).to(inputs.device) - alpha = (((1-self.u) * self.alpha.clone().to(inputs.device) + self.u * prev_alpha)) * alignment + alpha = (((1-self.u) * self.alpha.clone().to(inputs.device) + self.u * prev_alpha) + 1e-8) * alignment self.alpha = alpha / alpha.sum(dim=1).unsqueeze(1) # compute context context = torch.bmm(self.alpha.unsqueeze(1), inputs)