config updates

This commit is contained in:
Eren Golge 2019-05-14 22:35:04 +02:00
parent ba08e98bd9
commit bdcd306222
1 changed files with 6 additions and 6 deletions

View File

@ -1,6 +1,6 @@
{
"run_name": "mozilla-no-loc-fattn-stopnet",
"run_description": "using forward attention, with original prenet, merged stopnet. Compare this with ",
"run_name": "mozilla-no-loc-fattn-stopnet-sigmoid",
"run_description": "using forward attention, with original prenet, merged stopnet, sigmoid. Compare this with 4780 ",
"audio":{
// Audio processing parameters
@ -39,7 +39,7 @@
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"windowing": false, // Enables attention windowing. Used only in eval mode.
"memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5.
"attention_norm": "softmax", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron.
"attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron.
"prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn".
"prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet.
"use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster.
@ -47,9 +47,10 @@
"location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"loss_masking": false, // enable / disable loss masking against the sequence padding.
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"stopnet": true, // Train stopnet predicting the end of synthesis.
"stopnet": false, // Train stopnet predicting the end of synthesis.
"separate_stopnet": false, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.
"eval_batch_size":16,
"r": 1, // Number of frames to predict for step.
@ -57,7 +58,6 @@
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
"print_step": 10, // Number of steps to log traning on console.
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"batch_group_size": 0, //Number of batches to shuffle after bucketing.
"run_eval": true,