mirror of https://github.com/coqui-ai/TTS.git
remove requires_grad_()
This commit is contained in:
parent
0482a66a98
commit
7bfdc32b7b
|
@ -263,15 +263,15 @@ class Decoder(nn.Module):
|
||||||
T_decoder = memory.size(1)
|
T_decoder = memory.size(1)
|
||||||
# go frame - 0 frames tarting the sequence
|
# go frame - 0 frames tarting the sequence
|
||||||
initial_memory = \
|
initial_memory = \
|
||||||
inputs.data.new(B, self.memory_dim * self.r).zero_().requires_grad_()
|
inputs.data.new(B, self.memory_dim * self.r).zero_()
|
||||||
# Init decoder states
|
# Init decoder states
|
||||||
attention_rnn_hidden = \
|
attention_rnn_hidden = \
|
||||||
inputs.data.new(B, 256).zero_().requires_grad_()
|
inputs.data.new(B, 256).zero_()
|
||||||
decoder_rnn_hiddens = [
|
decoder_rnn_hiddens = [
|
||||||
inputs.data.new(B, 256).zero_().requires_grad_()
|
inputs.data.new(B, 256).zero_()
|
||||||
for _ in range(len(self.decoder_rnns))]
|
for _ in range(len(self.decoder_rnns))]
|
||||||
current_context_vec = \
|
current_context_vec = \
|
||||||
inputs.data.new(B, 256).zero_().requires_grad_()
|
inputs.data.new(B, 256).zero_()
|
||||||
# Time first (T_decoder, B, memory_dim)
|
# Time first (T_decoder, B, memory_dim)
|
||||||
if memory is not None:
|
if memory is not None:
|
||||||
memory = memory.transpose(0, 1)
|
memory = memory.transpose(0, 1)
|
||||||
|
|
6
train.py
6
train.py
|
@ -93,12 +93,6 @@ def train(model, criterion, data_loader, optimizer, epoch):
|
||||||
|
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
|
|
||||||
# convert inputs to variables
|
|
||||||
text_input.requires_grad_()
|
|
||||||
mel_spec.requires_grad_()
|
|
||||||
# mel_lengths.requires_grad_()
|
|
||||||
# linear_spec.requires_grad_()
|
|
||||||
|
|
||||||
# dispatch data to GPU
|
# dispatch data to GPU
|
||||||
if use_cuda:
|
if use_cuda:
|
||||||
text_input = text_input.cuda()
|
text_input = text_input.cuda()
|
||||||
|
|
Loading…
Reference in New Issue