Remove useless tests for speedup the aux_tests

This commit is contained in:
Edresson Casanova 2022-03-08 17:50:19 -03:00
parent 247da8ef12
commit a436fe40a3
4 changed files with 17 additions and 16 deletions

View File

@ -24,20 +24,21 @@ output_path = os.path.join(get_tests_output_path(), "train_outputs")
config = SpeakerEncoderConfig( config = SpeakerEncoderConfig(
batch_size=4, batch_size=4,
num_classes_in_batch=2, num_classes_in_batch=4,
num_utter_per_class=2, num_utter_per_class=2,
eval_num_classes_in_batch=2, eval_num_classes_in_batch=4,
eval_num_utter_per_class=2, eval_num_utter_per_class=2,
num_loader_workers=1, num_loader_workers=1,
epochs=2, epochs=1,
print_step=1, print_step=1,
save_step=1, save_step=2,
print_eval=True, print_eval=True,
run_eval=True, run_eval=True,
audio=BaseAudioConfig(num_mels=80), audio=BaseAudioConfig(num_mels=80),
) )
config.audio.do_trim_silence = True config.audio.do_trim_silence = True
config.audio.trim_db = 60 config.audio.trim_db = 60
config.loss = "ge2e"
config.save_json(config_path) config.save_json(config_path)
print(config) print(config)
@ -72,14 +73,14 @@ run_cli(command_train)
shutil.rmtree(continue_path) shutil.rmtree(continue_path)
# test model with ge2e loss function # test model with ge2e loss function
config.loss = "ge2e" # config.loss = "ge2e"
config.save_json(config_path) # config.save_json(config_path)
run_test_train() # run_test_train()
# test model with angleproto loss function # test model with angleproto loss function
config.loss = "angleproto" # config.loss = "angleproto"
config.save_json(config_path) # config.save_json(config_path)
run_test_train() # run_test_train()
# test model with softmaxproto loss function # test model with softmaxproto loss function
config.loss = "softmaxproto" config.loss = "softmaxproto"

View File

@ -66,8 +66,8 @@
"use_mas": false, // use Monotonic Alignment Search if true. Otherwise use pre-computed attention alignments. "use_mas": false, // use Monotonic Alignment Search if true. Otherwise use pre-computed attention alignments.
// TRAINING // TRAINING
"batch_size": 2, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1, "eval_batch_size": 8,
"r": 1, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 1, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"data_dep_init_iter": 1, "data_dep_init_iter": 1,

View File

@ -61,8 +61,8 @@
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING // TRAINING
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1, "eval_batch_size": 8,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.

View File

@ -61,8 +61,8 @@
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING // TRAINING
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1, "eval_batch_size": 8,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.