config update

This commit is contained in:
Eren Golge 2019-05-21 22:54:39 +02:00
parent 1654a725a7
commit c6cca89991
2 changed files with 78 additions and 76 deletions

View File

@ -9,6 +9,7 @@ pip3 install https://download.pytorch.org/whl/cu100/torch-1.0.1.post2-cp36-cp36m
wget https://www.dropbox.com/s/wqn5v3wkktw9lmo/install.sh?dl=0 -O install.sh wget https://www.dropbox.com/s/wqn5v3wkktw9lmo/install.sh?dl=0 -O install.sh
sudo sh install.sh sudo sh install.sh
python3 setup.py develop python3 setup.py develop
cp ${USER_DIR}/Mozilla_22050
python3 distribute.py --config_path config_cluster.json --data_path ${USER_DIR}/MozillaAll2/Mozilla/ python3 distribute.py --config_path config_cluster.json --data_path ${USER_DIR}/MozillaAll2/Mozilla/
# python3 distribute.py --config_path config_cluster.json --data_path ${SHARED_DIR}/data/mozilla/Judy/ # python3 distribute.py --config_path config_cluster.json --data_path ${SHARED_DIR}/data/mozilla/Judy/
# while true; do sleep 1000000; done # while true; do sleep 1000000; done

View File

@ -1,6 +1,6 @@
{ {
"run_name": "mozilla-no-loc-fattn-stopnet-sigmoid", "run_name": "mozilla-no-loc-fattn-stopnet-sigmoid-loss_masking",
"run_description": "using forward attention, with original prenet, merged stopnet, sigmoid. Compare this with 4780 ", "run_description": "using forward attention, with original prenet, loss masking,separate stopnet, sigmoid. Compare this with 4817 ",
"audio":{ "audio":{
// Audio processing parameters // Audio processing parameters
@ -34,7 +34,7 @@
"model": "Tacotron2", // one of the model in models/ "model": "Tacotron2", // one of the model in models/
"grad_clip": 1, // upper limit for gradients for clipping. "grad_clip": 1, // upper limit for gradients for clipping.
"epochs": 1000, // total number of epochs to train. "epochs": 1000, // total number of epochs to train.
"lr": 0.0002, // Initial learning rate. If Noam decay is active, maximum learning rate. "lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_decay": false, // if true, Noam learning rate decaying is applied through training. "lr_decay": false, // if true, Noam learning rate decaying is applied through training.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"windowing": false, // Enables attention windowing. Used only in eval mode. "windowing": false, // Enables attention windowing. Used only in eval mode.
@ -45,10 +45,10 @@
"use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. "use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster.
"transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention. "transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention.
"location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default. "location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"loss_masking": false, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"stopnet": false, // Train stopnet predicting the end of synthesis. "stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": false, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER. "separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.
@ -61,18 +61,19 @@
"batch_group_size": 0, //Number of batches to shuffle after bucketing. "batch_group_size": 0, //Number of batches to shuffle after bucketing.
"run_eval": true, "run_eval": true,
"test_delay_epochs": 1, //Until attention is aligned, testing only wastes computation time. "test_delay_epochs": 5, //Until attention is aligned, testing only wastes computation time.
"data_path": "/media/erogol/data_ssd/Data/LJSpeech-1.1", // DATASET-RELATED: can overwritten from command argument "data_path": "/media/erogol/data_ssd/Data/LJSpeech-1.1", // DATASET-RELATED: can overwritten from command argument
"meta_file_train": "metadata_train.txt", // DATASET-RELATED: metafile for training dataloader. "meta_file_train": "metadata_train.csv", // DATASET-RELATED: metafile for training dataloader.
"meta_file_val": "metadata_val.txt", // DATASET-RELATED: metafile for evaluation dataloader. "meta_file_val": "metadata_val.csv", // DATASET-RELATED: metafile for evaluation dataloader.
"dataset": "mozilla", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py "dataset": "ljspeech", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py
"min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training "min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training
"max_seq_len": 150, // DATASET-RELATED: maximum text length "max_seq_len": 150, // DATASET-RELATED: maximum text length
"output_path": "../keep/", // DATASET-RELATED: output path for all training outputs. "output_path": "../keep/", // DATASET-RELATED: output path for all training outputs.
"num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values. "num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"num_val_loader_workers": 4, // number of evaluation data loader processes. "num_val_loader_workers": 4, // number of evaluation data loader processes.
"phoneme_cache_path": "mozilla_us_phonemes", // phoneme computation is slow, therefore, it caches results in the given folder. "phoneme_cache_path": "mozilla_us_phonemes", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation. "use_phonemes": false, // use phonemes instead of raw characters. It is suggested for better pronounciation.
"phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages "phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
"text_cleaner": "phoneme_cleaners" "text_cleaner": "phoneme_cleaners"
} }