mirror of https://github.com/coqui-ai/TTS.git
grave attention config update:
This commit is contained in:
parent
71af8da293
commit
f2b6d00c45
17
config.json
17
config.json
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"model": "Tacotron2", // one of the model in models/
|
"model": "Tacotron2", // one of the model in models/
|
||||||
"run_name": "ljspeech",
|
"run_name": "ljspeech-graves",
|
||||||
"run_description": "tacotron2 without bidirectional decoder",
|
"run_description": "tacotron2 wuth graves attention",
|
||||||
|
|
||||||
// AUDIO PARAMETERS
|
// AUDIO PARAMETERS
|
||||||
"audio":{
|
"audio":{
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
||||||
"eval_batch_size":16,
|
"eval_batch_size":16,
|
||||||
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
|
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
|
||||||
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 16], [290000, 1, 8]], // ONLY TACOTRON - set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled.
|
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], // ONLY TACOTRON - set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled.
|
||||||
"loss_masking": true, // enable / disable loss masking against the sequence padding.
|
"loss_masking": true, // enable / disable loss masking against the sequence padding.
|
||||||
|
|
||||||
// VALIDATION
|
// VALIDATION
|
||||||
|
@ -47,6 +47,7 @@
|
||||||
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
|
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
|
||||||
|
|
||||||
// OPTIMIZER
|
// OPTIMIZER
|
||||||
|
"noam_schedule": false,
|
||||||
"grad_clip": 1, // upper limit for gradients for clipping.
|
"grad_clip": 1, // upper limit for gradients for clipping.
|
||||||
"epochs": 1000, // total number of epochs to train.
|
"epochs": 1000, // total number of epochs to train.
|
||||||
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
||||||
|
@ -60,7 +61,7 @@
|
||||||
"prenet_dropout": true, // enable/disable dropout at prenet.
|
"prenet_dropout": true, // enable/disable dropout at prenet.
|
||||||
|
|
||||||
// ATTENTION
|
// ATTENTION
|
||||||
"attention_type": "original", // 'original' or 'graves'
|
"attention_type": "graves", // 'original' or 'graves'
|
||||||
"attention_heads": 5, // number of attention heads (only for 'graves')
|
"attention_heads": 5, // number of attention heads (only for 'graves')
|
||||||
"attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron.
|
"attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron.
|
||||||
"windowing": false, // Enables attention windowing. Used only in eval mode.
|
"windowing": false, // Enables attention windowing. Used only in eval mode.
|
||||||
|
@ -90,8 +91,8 @@
|
||||||
"max_seq_len": 150, // DATASET-RELATED: maximum text length
|
"max_seq_len": 150, // DATASET-RELATED: maximum text length
|
||||||
|
|
||||||
// PATHS
|
// PATHS
|
||||||
// "output_path": "../keep/", // DATASET-RELATED: output path for all training outputs.
|
"output_path": "/data5/rw/pit/keep/", // DATASET-RELATED: output path for all training outputs.
|
||||||
"output_path": "/media/erogol/data_ssd/Models/runs/",
|
// "output_path": "/media/erogol/data_ssd/Models/runs/",
|
||||||
|
|
||||||
// PHONEMES
|
// PHONEMES
|
||||||
"phoneme_cache_path": "mozilla_us_phonemes", // phoneme computation is slow, therefore, it caches results in the given folder.
|
"phoneme_cache_path": "mozilla_us_phonemes", // phoneme computation is slow, therefore, it caches results in the given folder.
|
||||||
|
@ -108,8 +109,8 @@
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
"name": "ljspeech",
|
"name": "ljspeech",
|
||||||
// "path": "/data/ro/shared/data/keithito/LJSpeech-1.1/",
|
"path": "/data5/ro/shared/data/keithito/LJSpeech-1.1/",
|
||||||
"path": "/home/erogol/Data/LJSpeech-1.1",
|
// "path": "/home/erogol/Data/LJSpeech-1.1",
|
||||||
"meta_file_train": "metadata_train.csv",
|
"meta_file_train": "metadata_train.csv",
|
||||||
"meta_file_val": "metadata_val.csv"
|
"meta_file_val": "metadata_val.csv"
|
||||||
}
|
}
|
||||||
|
|
|
@ -159,7 +159,7 @@ class GravesAttention(nn.Module):
|
||||||
k_t = gbk_t[:, 2, :]
|
k_t = gbk_t[:, 2, :]
|
||||||
|
|
||||||
# attention GMM parameters
|
# attention GMM parameters
|
||||||
sig_t = torch.nn.functional.softplus(b_t)+self.eps
|
sig_t = torch.nn.functional.softplus(b_t) + self.eps
|
||||||
|
|
||||||
mu_t = self.mu_prev + torch.nn.functional.softplus(k_t)
|
mu_t = self.mu_prev + torch.nn.functional.softplus(k_t)
|
||||||
g_t = torch.softmax(g_t, dim=-1) / sig_t + self.eps
|
g_t = torch.softmax(g_t, dim=-1) / sig_t + self.eps
|
||||||
|
|
|
@ -303,9 +303,9 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.7.2"
|
"version": "3.7.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
"nbformat_minor": 2
|
"nbformat_minor": 4
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue