update default hifigan config

This commit is contained in:
Eren Gölge 2021-04-09 11:40:06 +02:00
parent 105e0b4d62
commit 2b529f60c8
1 changed files with 27 additions and 22 deletions

View File

@ -11,10 +11,10 @@
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters
"sample_rate": 16000, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled.
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
"log_func": "np.log",
"log_func": "np.log10",
"do_sound_norm": true,
// Silence trimming
@ -23,17 +23,17 @@
// MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame.
"mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
"mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 1.0, // scaler value appplied after log transform of spectrogram.
// Normalization parameters
"signal_norm": false, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range.
"stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
"stats_path": "/home/erogol/.local/share/tts/tts_models--en--ljspeech--speedy-speech-wn/scale_stats.npy"
},
// DISTRIBUTED TRAINING
@ -70,7 +70,7 @@
"l1_spec_loss_params": {
"use_mel": true,
"sample_rate": 16000,
"sample_rate": 22050,
"n_fft": 1024,
"hop_length": 256,
"win_length": 1024,
@ -104,7 +104,7 @@
},
// DATASET
"data_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/vo_voice_quality_transformation/",
"data_path": "/home/erogol/gdrive/Datasets/LJSpeech-1.1/wavs/",
"feature_path": null,
// "feature_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/tacotron-DCA/",
"seq_len": 8192,
@ -127,25 +127,30 @@
"wd": 0.0, // Weight decay weight.
"gen_clip_grad": -1, // Generator gradient clipping threshold. Apply gradient clipping if > 0
"disc_clip_grad": -1, // Discriminator gradient clipping threshold.
// "lr_scheduler_gen": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
// "lr_scheduler_gen_params": {
// "gamma": 0.999,
// "last_epoch": -1
// },
// "lr_scheduler_disc": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
// "lr_scheduler_disc_params": {
// "gamma": 0.999,
// "last_epoch": -1
// },
"lr_gen": 0.00001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_disc": 0.00001,
"lr_gen": 0.0002, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_disc": 0.0002,
"optimizer": "AdamW",
"optimizer_params":{
"betas": [0.8, 0.99],
"weight_decay": 0.0
},
"lr_scheduler_gen": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"lr_scheduler_gen_params": {
"gamma": 0.999,
"last_epoch": -1
},
"lr_scheduler_disc": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"lr_scheduler_disc_params": {
"gamma": 0.999,
"last_epoch": -1
},
// TENSORBOARD and LOGGING
"print_step": 25, // Number of steps to log traning on console.
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING
"num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values.
@ -153,7 +158,7 @@
"eval_split_size": 10,
// PATHS
"output_path": "/home/erogol/gdrive/Trainings/sam/"
"output_path": "/home/erogol/gdrive/Trainings/LJSpeech/"
}