config update for phonemizer

This commit is contained in:
Eren Golge 2019-01-08 17:08:27 +01:00
parent df49e93684
commit e7a119a427
1 changed files with 8 additions and 7 deletions

View File

@ -1,6 +1,6 @@
{ {
"model_name": "TTS-phoneme", "model_name": "TTS-phoneme",
"model_description": "Training with phonemes.", "model_description": "Training with phonemes created by phonemizer.",
"audio":{ "audio":{
"audio_processor": "audio", // to use dictate different audio processors, if available. "audio_processor": "audio", // to use dictate different audio processors, if available.
@ -25,12 +25,13 @@
"do_trim_silence": true // enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true) "do_trim_silence": true // enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true)
}, },
"embedding_size": 256, // Character embedding vector length. You don't need to change it in general. "embedding_size": 256,
"text_cleaner": "english_cleaners", "text_cleaner": "phoneme_cleaners",
"epochs": 1000, // total number of epochs to train. "epochs": 1000,
"lr": 0.001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_decay": false, // if true, Noam learning rate decaying is applied through training. "lr": 0.0001,
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "lr_decay": false,
"warmup_steps": 4000,
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.
"eval_batch_size":32, "eval_batch_size":32,