From 07f71b1761e4f328acc3abe29b676c2e3bae42d8 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Wed, 25 Apr 2018 08:00:48 -0700 Subject: [PATCH] Remove variables --- layers/tacotron.py | 17 ++++++++--------- models/tacotron.py | 1 - 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/layers/tacotron.py b/layers/tacotron.py index 13663a6e..81328585 100644 --- a/layers/tacotron.py +++ b/layers/tacotron.py @@ -1,6 +1,5 @@ # coding: utf-8 import torch -from torch.autograd import Variable from torch import nn from .attention import AttentionRNN @@ -263,16 +262,16 @@ class Decoder(nn.Module): self.memory_dim, self.r) T_decoder = memory.size(1) # go frame - 0 frames tarting the sequence - initial_memory = Variable( - inputs.data.new(B, self.memory_dim * self.r).zero_()) + initial_memory = \ + inputs.data.new(B, self.memory_dim * self.r).zero_().requires_grad_() # Init decoder states - attention_rnn_hidden = Variable( - inputs.data.new(B, 256).zero_()) - decoder_rnn_hiddens = [Variable( - inputs.data.new(B, 256).zero_()) + attention_rnn_hidden = \ + inputs.data.new(B, 256).zero_().requires_grad_() + decoder_rnn_hiddens = [ + inputs.data.new(B, 256).zero_().requires_grad_() for _ in range(len(self.decoder_rnns))] - current_context_vec = Variable( - inputs.data.new(B, 256).zero_()) + current_context_vec = \ + inputs.data.new(B, 256).zero_().requires_grad_() # Time first (T_decoder, B, memory_dim) if memory is not None: memory = memory.transpose(0, 1) diff --git a/models/tacotron.py b/models/tacotron.py index d5d69b1d..c27656da 100644 --- a/models/tacotron.py +++ b/models/tacotron.py @@ -1,6 +1,5 @@ # coding: utf-8 import torch -from torch.autograd import Variable from torch import nn from TTS.utils.text.symbols import symbols from TTS.layers.tacotron import Prenet, Encoder, Decoder, CBHG