mirror of https://github.com/coqui-ai/TTS.git
test fixes
This commit is contained in:
parent
0a61ea3b2d
commit
2c364c0df8
|
@ -13,6 +13,7 @@ nosetests tests -x &&\
|
||||||
./tests/test_vocoder_wavernn_train.sh && \
|
./tests/test_vocoder_wavernn_train.sh && \
|
||||||
./tests/test_vocoder_wavegrad_train.sh && \
|
./tests/test_vocoder_wavegrad_train.sh && \
|
||||||
./tests/test_speedy_speech_train.sh && \
|
./tests/test_speedy_speech_train.sh && \
|
||||||
|
./tests/test_align_tts_train.sh && \
|
||||||
./tests/test_compute_statistics.sh && \
|
./tests/test_compute_statistics.sh && \
|
||||||
|
|
||||||
# linter check
|
# linter check
|
||||||
|
|
|
@ -59,7 +59,7 @@ class GlowTTSTrainTest(unittest.TestCase):
|
||||||
use_encoder_prenet=True,
|
use_encoder_prenet=True,
|
||||||
num_flow_blocks_dec=12,
|
num_flow_blocks_dec=12,
|
||||||
kernel_size_dec=5,
|
kernel_size_dec=5,
|
||||||
dilation_rate=5,
|
dilation_rate=1,
|
||||||
num_block_layers=4,
|
num_block_layers=4,
|
||||||
dropout_p_dec=0.,
|
dropout_p_dec=0.,
|
||||||
num_speakers=0,
|
num_speakers=0,
|
||||||
|
@ -88,7 +88,7 @@ class GlowTTSTrainTest(unittest.TestCase):
|
||||||
use_encoder_prenet=True,
|
use_encoder_prenet=True,
|
||||||
num_flow_blocks_dec=12,
|
num_flow_blocks_dec=12,
|
||||||
kernel_size_dec=5,
|
kernel_size_dec=5,
|
||||||
dilation_rate=5,
|
dilation_rate=1,
|
||||||
num_block_layers=4,
|
num_block_layers=4,
|
||||||
dropout_p_dec=0.,
|
dropout_p_dec=0.,
|
||||||
num_speakers=0,
|
num_speakers=0,
|
||||||
|
@ -111,11 +111,11 @@ class GlowTTSTrainTest(unittest.TestCase):
|
||||||
assert (param - param_ref).sum() == 0, param
|
assert (param - param_ref).sum() == 0, param
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
optimizer = optim.Adam(model.parameters(), lr=c.lr)
|
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
|
optimizer.zero_grad()
|
||||||
z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward(
|
z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward(
|
||||||
input_dummy, input_lengths, mel_spec, mel_lengths, None)
|
input_dummy, input_lengths, mel_spec, mel_lengths, None)
|
||||||
optimizer.zero_grad()
|
|
||||||
loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths,
|
loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths,
|
||||||
o_dur_log, o_total_dur, input_lengths)
|
o_dur_log, o_total_dur, input_lengths)
|
||||||
loss = loss_dict['loss']
|
loss = loss_dict['loss']
|
||||||
|
|
Loading…
Reference in New Issue