mirror of https://github.com/coqui-ai/TTS.git
Remove useless tests for speedup the aux_tests
This commit is contained in:
parent
247da8ef12
commit
a436fe40a3
|
@ -24,20 +24,21 @@ output_path = os.path.join(get_tests_output_path(), "train_outputs")
|
|||
|
||||
config = SpeakerEncoderConfig(
|
||||
batch_size=4,
|
||||
num_classes_in_batch=2,
|
||||
num_classes_in_batch=4,
|
||||
num_utter_per_class=2,
|
||||
eval_num_classes_in_batch=2,
|
||||
eval_num_classes_in_batch=4,
|
||||
eval_num_utter_per_class=2,
|
||||
num_loader_workers=1,
|
||||
epochs=2,
|
||||
epochs=1,
|
||||
print_step=1,
|
||||
save_step=1,
|
||||
save_step=2,
|
||||
print_eval=True,
|
||||
run_eval=True,
|
||||
audio=BaseAudioConfig(num_mels=80),
|
||||
)
|
||||
config.audio.do_trim_silence = True
|
||||
config.audio.trim_db = 60
|
||||
config.loss = "ge2e"
|
||||
config.save_json(config_path)
|
||||
|
||||
print(config)
|
||||
|
@ -72,14 +73,14 @@ run_cli(command_train)
|
|||
shutil.rmtree(continue_path)
|
||||
|
||||
# test model with ge2e loss function
|
||||
config.loss = "ge2e"
|
||||
config.save_json(config_path)
|
||||
run_test_train()
|
||||
# config.loss = "ge2e"
|
||||
# config.save_json(config_path)
|
||||
# run_test_train()
|
||||
|
||||
# test model with angleproto loss function
|
||||
config.loss = "angleproto"
|
||||
config.save_json(config_path)
|
||||
run_test_train()
|
||||
# config.loss = "angleproto"
|
||||
# config.save_json(config_path)
|
||||
# run_test_train()
|
||||
|
||||
# test model with softmaxproto loss function
|
||||
config.loss = "softmaxproto"
|
||||
|
|
|
@ -66,8 +66,8 @@
|
|||
"use_mas": false, // use Monotonic Alignment Search if true. Otherwise use pre-computed attention alignments.
|
||||
|
||||
// TRAINING
|
||||
"batch_size": 2, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size":1,
|
||||
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size": 8,
|
||||
"r": 1, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
|
||||
"loss_masking": true, // enable / disable loss masking against the sequence padding.
|
||||
"data_dep_init_iter": 1,
|
||||
|
|
|
@ -61,8 +61,8 @@
|
|||
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
|
||||
|
||||
// TRAINING
|
||||
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size":1,
|
||||
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size": 8,
|
||||
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
|
||||
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
|
||||
"loss_masking": true, // enable / disable loss masking against the sequence padding.
|
||||
|
|
|
@ -61,8 +61,8 @@
|
|||
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
|
||||
|
||||
// TRAINING
|
||||
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size":1,
|
||||
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||
"eval_batch_size": 8,
|
||||
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
|
||||
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
|
||||
"loss_masking": true, // enable / disable loss masking against the sequence padding.
|
||||
|
|
Loading…
Reference in New Issue