Merge branch 'pr/gerazov/650-2' into dev

This commit is contained in:
Eren Gölge 2021-02-17 13:40:45 +00:00
commit d0454461de
26 changed files with 728 additions and 600 deletions

View File

@ -500,6 +500,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion = GlowTTSLoss() criterion = GlowTTSLoss()
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
checkpoint = torch.load(args.restore_path, map_location='cpu') checkpoint = torch.load(args.restore_path, map_location='cpu')
try: try:
# TODO: fix optimizer init, model.cuda() needs to be called before # TODO: fix optimizer init, model.cuda() needs to be called before
@ -517,7 +518,7 @@ def main(args): # pylint: disable=redefined-outer-name
for group in optimizer.param_groups: for group in optimizer.param_groups:
group['initial_lr'] = c.lr group['initial_lr'] = c.lr
print(" > Model restored from step %d" % checkpoint['step'], print(f" > Model restored from step {checkpoint['step']:d}",
flush=True) flush=True)
args.restore_step = checkpoint['step'] args.restore_step = checkpoint['step']
else: else:
@ -541,8 +542,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model) num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True) print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float('inf') best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define dataloaders # define dataloaders
train_loader = setup_loader(ap, 1, is_val=False, verbose=True) train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
@ -552,7 +562,8 @@ def main(args): # pylint: disable=redefined-outer-name
model = data_depended_init(train_loader, model) model = data_depended_init(train_loader, model)
for epoch in range(0, c.epochs): for epoch in range(0, c.epochs):
c_logger.print_epoch_start(epoch, c.epochs) c_logger.print_epoch_start(epoch, c.epochs)
train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer, train_avg_loss_dict, global_step = train(train_loader, model,
criterion, optimizer,
scheduler, ap, global_step, scheduler, ap, global_step,
epoch) epoch)
eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap, eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap,
@ -561,8 +572,9 @@ def main(args): # pylint: disable=redefined-outer-name
target_loss = train_avg_loss_dict['avg_loss'] target_loss = train_avg_loss_dict['avg_loss']
if c.run_eval: if c.run_eval:
target_loss = eval_avg_loss_dict['avg_loss'] target_loss = eval_avg_loss_dict['avg_loss']
best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r, best_loss = save_best_model(target_loss, best_loss, model, optimizer,
OUT_PATH, model_characters) global_step, epoch, c.r, OUT_PATH, model_characters,
keep_all_best=keep_all_best, keep_after=keep_after)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -464,6 +464,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion = SpeedySpeechLoss(c) criterion = SpeedySpeechLoss(c)
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
checkpoint = torch.load(args.restore_path, map_location='cpu') checkpoint = torch.load(args.restore_path, map_location='cpu')
try: try:
# TODO: fix optimizer init, model.cuda() needs to be called before # TODO: fix optimizer init, model.cuda() needs to be called before
@ -505,8 +506,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model) num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True) print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float('inf') best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define dataloaders # define dataloaders
train_loader = setup_loader(ap, 1, is_val=False, verbose=True) train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
@ -525,8 +535,8 @@ def main(args): # pylint: disable=redefined-outer-name
if c.run_eval: if c.run_eval:
target_loss = eval_avg_loss_dict['avg_loss'] target_loss = eval_avg_loss_dict['avg_loss']
best_loss = save_best_model(target_loss, best_loss, model, optimizer, best_loss = save_best_model(target_loss, best_loss, model, optimizer,
global_step, epoch, c.r, global_step, epoch, c.r, OUT_PATH, model_characters,
OUT_PATH, model_characters) keep_all_best=keep_all_best, keep_after=keep_after)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -538,12 +538,13 @@ def main(args): # pylint: disable=redefined-outer-name
# setup criterion # setup criterion
criterion = TacotronLoss(c, stopnet_pos_weight=c.stopnet_pos_weight, ga_sigma=0.4) criterion = TacotronLoss(c, stopnet_pos_weight=c.stopnet_pos_weight, ga_sigma=0.4)
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu') checkpoint = torch.load(args.restore_path, map_location='cpu')
try: try:
print(" > Restoring Model.") print(" > Restoring Model...")
model.load_state_dict(checkpoint['model']) model.load_state_dict(checkpoint['model'])
# optimizer restore # optimizer restore
print(" > Restoring Optimizer.") print(" > Restoring Optimizer...")
optimizer.load_state_dict(checkpoint['optimizer']) optimizer.load_state_dict(checkpoint['optimizer'])
if "scaler" in checkpoint and c.mixed_precision: if "scaler" in checkpoint and c.mixed_precision:
print(" > Restoring AMP Scaler...") print(" > Restoring AMP Scaler...")
@ -551,7 +552,7 @@ def main(args): # pylint: disable=redefined-outer-name
if c.reinit_layers: if c.reinit_layers:
raise RuntimeError raise RuntimeError
except (KeyError, RuntimeError): except (KeyError, RuntimeError):
print(" > Partial model initialization.") print(" > Partial model initialization...")
model_dict = model.state_dict() model_dict = model.state_dict()
model_dict = set_init_dict(model_dict, checkpoint['model'], c) model_dict = set_init_dict(model_dict, checkpoint['model'], c)
# torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt')) # torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
@ -585,8 +586,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model) num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True) print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float('inf') best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define data loaders # define data loaders
train_loader = setup_loader(ap, train_loader = setup_loader(ap,
@ -639,6 +649,8 @@ def main(args): # pylint: disable=redefined-outer-name
c.r, c.r,
OUT_PATH, OUT_PATH,
model_characters, model_characters,
keep_all_best=keep_all_best,
keep_after=keep_after,
scaler=scaler.state_dict() if c.mixed_precision else None scaler=scaler.state_dict() if c.mixed_precision else None
) )

View File

@ -485,6 +485,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion_disc = DiscriminatorLoss(c) criterion_disc = DiscriminatorLoss(c)
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu') checkpoint = torch.load(args.restore_path, map_location='cpu')
try: try:
print(" > Restoring Generator Model...") print(" > Restoring Generator Model...")
@ -523,7 +524,7 @@ def main(args): # pylint: disable=redefined-outer-name
for group in optimizer_disc.param_groups: for group in optimizer_disc.param_groups:
group['lr'] = c.lr_disc group['lr'] = c.lr_disc
print(" > Model restored from step %d" % checkpoint['step'], print(f" > Model restored from step {checkpoint['step']:d}",
flush=True) flush=True)
args.restore_step = checkpoint['step'] args.restore_step = checkpoint['step']
else: else:
@ -545,8 +546,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model_disc) num_params = count_parameters(model_disc)
print(" > Discriminator has {} parameters".format(num_params), flush=True) print(" > Discriminator has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float('inf') best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with best loss of {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step global_step = args.restore_step
for epoch in range(0, c.epochs): for epoch in range(0, c.epochs):
@ -571,7 +581,10 @@ def main(args): # pylint: disable=redefined-outer-name
global_step, global_step,
epoch, epoch,
OUT_PATH, OUT_PATH,
model_losses=eval_avg_loss_dict) keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict,
)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -354,6 +354,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion.cuda() criterion.cuda()
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu') checkpoint = torch.load(args.restore_path, map_location='cpu')
try: try:
print(" > Restoring Model...") print(" > Restoring Model...")
@ -393,8 +394,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model) num_params = count_parameters(model)
print(" > WaveGrad has {} parameters".format(num_params), flush=True) print(" > WaveGrad has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float('inf') best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step global_step = args.restore_step
for epoch in range(0, c.epochs): for epoch in range(0, c.epochs):
@ -416,6 +426,8 @@ def main(args): # pylint: disable=redefined-outer-name
global_step, global_step,
epoch, epoch,
OUT_PATH, OUT_PATH,
keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict, model_losses=eval_avg_loss_dict,
scaler=scaler.state_dict() if c.mixed_precision else None scaler=scaler.state_dict() if c.mixed_precision else None
) )

View File

@ -383,6 +383,7 @@ def main(args): # pylint: disable=redefined-outer-name
# restore any checkpoint # restore any checkpoint
if args.restore_path: if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location="cpu") checkpoint = torch.load(args.restore_path, map_location="cpu")
try: try:
print(" > Restoring Model...") print(" > Restoring Model...")
@ -416,8 +417,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_parameters = count_parameters(model_wavernn) num_parameters = count_parameters(model_wavernn)
print(" > Model has {} parameters".format(num_parameters), flush=True) print(" > Model has {} parameters".format(num_parameters), flush=True)
if "best_loss" not in locals(): if args.restore_step == 0 or not args.best_path:
best_loss = float("inf") best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step global_step = args.restore_step
for epoch in range(0, c.epochs): for epoch in range(0, c.epochs):
@ -440,6 +450,8 @@ def main(args): # pylint: disable=redefined-outer-name
global_step, global_step,
epoch, epoch,
OUT_PATH, OUT_PATH,
keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict, model_losses=eval_avg_loss_dict,
scaler=scaler.state_dict() if c.mixed_precision else None scaler=scaler.state_dict() if c.mixed_precision else None
) )

View File

@ -1,172 +1,174 @@
{ {
"model": "Tacotron2", "model": "Tacotron2",
"run_name": "ljspeech-ddc", "run_name": "ljspeech-ddc",
"run_description": "tacotron2 with DDC and differential spectral loss.", "run_description": "tacotron2 with DDC and differential spectral loss.",
// AUDIO PARAMETERS // AUDIO PARAMETERS
"audio":{ "audio":{
// stft parameters // stft parameters
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame. "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
"win_length": 1024, // stft window length in ms. "win_length": 1024, // stft window length in ms.
"hop_length": 256, // stft window hop-lengh in ms. "hop_length": 256, // stft window hop-lengh in ms.
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used. "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used. "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters // Audio processing parameters
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. "sample_rate": 22050, // DATASET-RELATED: wav sample-rate.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air. "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
// Silence trimming // Silence trimming
"do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true) "do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true)
"trim_db": 60, // threshold for timming silence. Set this according to your dataset. "trim_db": 60, // threshold for timming silence. Set this according to your dataset.
// Griffin-Lim // Griffin-Lim
"power": 1.5, // value to sharpen wav signals after GL algorithm. "power": 1.5, // value to sharpen wav signals after GL algorithm.
"griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation. "griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
// MelSpectrogram parameters // MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame. "num_mels": 80, // size of the mel spec frame.
"mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! "mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!! "mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 1, "spec_gain": 1,
// Normalization parameters // Normalization parameters
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params. "signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization "min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1] "symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range. "clip_norm": true, // clip normalized values into the range.
"stats_path": "/home/erogol/Data/LJSpeech-1.1/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored "stats_path": "/home/erogol/Data/LJSpeech-1.1/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
}, },
// VOCABULARY PARAMETERS // VOCABULARY PARAMETERS
// if custom character set is not defined, // if custom character set is not defined,
// default set in symbols.py is used // default set in symbols.py is used
// "characters":{ // "characters":{
// "pad": "_", // "pad": "_",
// "eos": "~", // "eos": "~",
// "bos": "^", // "bos": "^",
// "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ", // "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ",
// "punctuations":"!'(),-.:;? ", // "punctuations":"!'(),-.:;? ",
// "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ" // "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ"
// }, // },
// DISTRIBUTED TRAINING // DISTRIBUTED TRAINING
"distributed":{ "distributed":{
"backend": "nccl", "backend": "nccl",
"url": "tcp:\/\/localhost:54321" "url": "tcp:\/\/localhost:54321"
}, },
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING // TRAINING
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":16, "eval_batch_size":16,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate. "mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
// LOSS SETTINGS // LOSS SETTINGS
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled "decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled
"postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled "postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled
"postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled "decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled
"postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled "postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled
"ga_alpha": 5.0, // weight for guided attention loss. If > 0, guided attention is enabled. "ga_alpha": 5.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples. "stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples.
// VALIDATION // VALIDATION
"run_eval": true, "run_eval": true,
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time. "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences. "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
// OPTIMIZER // OPTIMIZER
"noam_schedule": false, // use noam warmup and lr schedule. "noam_schedule": false, // use noam warmup and lr schedule.
"grad_clip": 1.0, // upper limit for gradients for clipping. "grad_clip": 1.0, // upper limit for gradients for clipping.
"epochs": 1000, // total number of epochs to train. "epochs": 1000, // total number of epochs to train.
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate. "lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"wd": 0.000001, // Weight decay weight. "wd": 0.000001, // Weight decay weight.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths. "seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths.
// TACOTRON PRENET // TACOTRON PRENET
"memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame. "memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame.
"prenet_type": "original", // "original" or "bn". "prenet_type": "original", // "original" or "bn".
"prenet_dropout": false, // enable/disable dropout at prenet. "prenet_dropout": false, // enable/disable dropout at prenet.
// TACOTRON ATTENTION // TACOTRON ATTENTION
"attention_type": "original", // 'original' , 'graves', 'dynamic_convolution' "attention_type": "original", // 'original' , 'graves', 'dynamic_convolution'
"attention_heads": 4, // number of attention heads (only for 'graves') "attention_heads": 4, // number of attention heads (only for 'graves')
"attention_norm": "sigmoid", // softmax or sigmoid. "attention_norm": "sigmoid", // softmax or sigmoid.
"windowing": false, // Enables attention windowing. Used only in eval mode. "windowing": false, // Enables attention windowing. Used only in eval mode.
"use_forward_attn": false, // if it uses forward attention. In general, it aligns faster. "use_forward_attn": false, // if it uses forward attention. In general, it aligns faster.
"forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode. "forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode.
"transition_agent": false, // enable/disable transition agent of forward attention. "transition_agent": false, // enable/disable transition agent of forward attention.
"location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset. "bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset.
"double_decoder_consistency": true, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/ "double_decoder_consistency": true, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/
"ddc_r": 7, // reduction rate for coarse decoder. "ddc_r": 7, // reduction rate for coarse decoder.
// STOPNET // STOPNET
"stopnet": true, // Train stopnet predicting the end of synthesis. "stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER. "separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
// TENSORBOARD and LOGGING // TENSORBOARD and LOGGING
"print_step": 25, // Number of steps to log training on console. "print_step": 25, // Number of steps to log training on console.
"tb_plot_step": 100, // Number of steps to plot TB training figures. "tb_plot_step": 100, // Number of steps to plot TB training figures.
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
// DATA LOADING "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"text_cleaner": "phoneme_cleaners",
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. // DATA LOADING
"num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values. "text_cleaner": "phoneme_cleaners",
"num_val_loader_workers": 4, // number of evaluation data loader processes. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"batch_group_size": 4, //Number of batches to shuffle after bucketing. "num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training "num_val_loader_workers": 4, // number of evaluation data loader processes.
"max_seq_len": 153, // DATASET-RELATED: maximum text length "batch_group_size": 4, //Number of batches to shuffle after bucketing.
"compute_input_seq_cache": false, // if true, text sequences are computed before starting training. If phonemes are enabled, they are also computed at this stage. "min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training
"use_noise_augment": true, "max_seq_len": 153, // DATASET-RELATED: maximum text length
"compute_input_seq_cache": false, // if true, text sequences are computed before starting training. If phonemes are enabled, they are also computed at this stage.
// PATHS "use_noise_augment": true,
"output_path": "/home/erogol/Models/LJSpeech/",
// PATHS
// PHONEMES "output_path": "/home/erogol/Models/LJSpeech/",
"phoneme_cache_path": "/home/erogol/Models/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation. // PHONEMES
"phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages "phoneme_cache_path": "/home/erogol/Models/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation.
// MULTI-SPEAKER and GST "phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
"use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"use_gst": false, // use global style tokens // MULTI-SPEAKER and GST
"use_external_speaker_embedding_file": false, // if true, forces the model to use external embedding per sample instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558 "use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"external_speaker_embedding_file": "../../speakers-vctk-en.json", // if not null and use_external_speaker_embedding_file is true, it is used to load a specific embedding file and thus uses these embeddings instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558 "use_gst": false, // use global style tokens
"gst": { // gst parameter if gst is enabled "use_external_speaker_embedding_file": false, // if true, forces the model to use external embedding per sample instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
"gst_style_input": null, // Condition the style input either on a "external_speaker_embedding_file": "../../speakers-vctk-en.json", // if not null and use_external_speaker_embedding_file is true, it is used to load a specific embedding file and thus uses these embeddings instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
// -> wave file [path to wave] or "gst": { // gst parameter if gst is enabled
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15} "gst_style_input": null, // Condition the style input either on a
// with the dictionary being len(dict) <= len(gst_style_tokens). // -> wave file [path to wave] or
"gst_embedding_dim": 512, // -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
"gst_num_heads": 4, // with the dictionary being len(dict) <= len(gst_style_tokens).
"gst_style_tokens": 10, "gst_embedding_dim": 512,
"gst_use_speaker_embedding": false "gst_num_heads": 4,
}, "gst_style_tokens": 10,
"gst_use_speaker_embedding": false
// DATASETS },
"datasets": // List of datasets. They all merged and they get different speaker_ids.
[ // DATASETS
{ "datasets": // List of datasets. They all merged and they get different speaker_ids.
"name": "ljspeech", [
"path": "/home/erogol/Data/LJSpeech-1.1/", {
"meta_file_train": "metadata.csv", // for vtck if list, ignore speakers id in list for train, its useful for test cloning with new speakers "name": "ljspeech",
"meta_file_val": null "path": "/home/erogol/Data/LJSpeech-1.1/",
} "meta_file_train": "metadata.csv", // for vtck if list, ignore speakers id in list for train, its useful for test cloning with new speakers
] "meta_file_val": null
} }
]
}

View File

@ -93,6 +93,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"apex_amp_level": null, "apex_amp_level": null,

View File

@ -105,6 +105,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -1,171 +1,173 @@
{ {
"model": "Tacotron2", "model": "Tacotron2",
"run_name": "ljspeech-dcattn", "run_name": "ljspeech-dcattn",
"run_description": "tacotron2 with dynamic convolution attention.", "run_description": "tacotron2 with dynamic convolution attention.",
// AUDIO PARAMETERS // AUDIO PARAMETERS
"audio":{ "audio":{
// stft parameters // stft parameters
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame. "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
"win_length": 1024, // stft window length in ms. "win_length": 1024, // stft window length in ms.
"hop_length": 256, // stft window hop-lengh in ms. "hop_length": 256, // stft window hop-lengh in ms.
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used. "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used. "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters // Audio processing parameters
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. "sample_rate": 22050, // DATASET-RELATED: wav sample-rate.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air. "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
// Silence trimming // Silence trimming
"do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true) "do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true)
"trim_db": 60, // threshold for timming silence. Set this according to your dataset. "trim_db": 60, // threshold for timming silence. Set this according to your dataset.
// Griffin-Lim // Griffin-Lim
"power": 1.5, // value to sharpen wav signals after GL algorithm. "power": 1.5, // value to sharpen wav signals after GL algorithm.
"griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation. "griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
// MelSpectrogram parameters // MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame. "num_mels": 80, // size of the mel spec frame.
"mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! "mel_fmin": 50.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!! "mel_fmax": 7600.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 1, "spec_gain": 1,
// Normalization parameters // Normalization parameters
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params. "signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization "min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1] "symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range. "clip_norm": true, // clip normalized values into the range.
"stats_path": "/home/erogol/Data/LJSpeech-1.1/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored "stats_path": "/home/erogol/Data/LJSpeech-1.1/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
}, },
// VOCABULARY PARAMETERS // VOCABULARY PARAMETERS
// if custom character set is not defined, // if custom character set is not defined,
// default set in symbols.py is used // default set in symbols.py is used
// "characters":{ // "characters":{
// "pad": "_", // "pad": "_",
// "eos": "~", // "eos": "~",
// "bos": "^", // "bos": "^",
// "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ", // "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ",
// "punctuations":"!'(),-.:;? ", // "punctuations":"!'(),-.:;? ",
// "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ" // "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ"
// }, // },
// DISTRIBUTED TRAINING // DISTRIBUTED TRAINING
"distributed":{ "distributed":{
"backend": "nccl", "backend": "nccl",
"url": "tcp:\/\/localhost:54321" "url": "tcp:\/\/localhost:54321"
}, },
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING // TRAINING
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":16, "eval_batch_size":16,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate. "mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
// LOSS SETTINGS // LOSS SETTINGS
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled "decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled
"postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled "postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled
"postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled "decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled
"postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled "postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled
"ga_alpha": 0.0, // weight for guided attention loss. If > 0, guided attention is enabled. "ga_alpha": 0.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples. "stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples.
// VALIDATION // VALIDATION
"run_eval": true, "run_eval": true,
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time. "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences. "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
// OPTIMIZER // OPTIMIZER
"noam_schedule": false, // use noam warmup and lr schedule. "noam_schedule": false, // use noam warmup and lr schedule.
"grad_clip": 1.0, // upper limit for gradients for clipping. "grad_clip": 1.0, // upper limit for gradients for clipping.
"epochs": 1000, // total number of epochs to train. "epochs": 1000, // total number of epochs to train.
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate. "lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"wd": 0.000001, // Weight decay weight. "wd": 0.000001, // Weight decay weight.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths. "seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths.
// TACOTRON PRENET // TACOTRON PRENET
"memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame. "memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame.
"prenet_type": "original", // "original" or "bn". "prenet_type": "original", // "original" or "bn".
"prenet_dropout": false, // enable/disable dropout at prenet. "prenet_dropout": false, // enable/disable dropout at prenet.
// TACOTRON ATTENTION // TACOTRON ATTENTION
"attention_type": "dynamic_convolution", // 'original' , 'graves', 'dynamic_convolution' "attention_type": "dynamic_convolution", // 'original' , 'graves', 'dynamic_convolution'
"attention_heads": 4, // number of attention heads (only for 'graves') "attention_heads": 4, // number of attention heads (only for 'graves')
"attention_norm": "softmax", // softmax or sigmoid. "attention_norm": "softmax", // softmax or sigmoid.
"windowing": false, // Enables attention windowing. Used only in eval mode. "windowing": false, // Enables attention windowing. Used only in eval mode.
"use_forward_attn": false, // if it uses forward attention. In general, it aligns faster. "use_forward_attn": false, // if it uses forward attention. In general, it aligns faster.
"forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode. "forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode.
"transition_agent": false, // enable/disable transition agent of forward attention. "transition_agent": false, // enable/disable transition agent of forward attention.
"location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset. "bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset.
"double_decoder_consistency": false, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/ "double_decoder_consistency": false, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/
"ddc_r": 7, // reduction rate for coarse decoder. "ddc_r": 7, // reduction rate for coarse decoder.
// STOPNET // STOPNET
"stopnet": true, // Train stopnet predicting the end of synthesis. "stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER. "separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
// TENSORBOARD and LOGGING // TENSORBOARD and LOGGING
"print_step": 25, // Number of steps to log training on console. "print_step": 25, // Number of steps to log training on console.
"tb_plot_step": 100, // Number of steps to plot TB training figures. "tb_plot_step": 100, // Number of steps to plot TB training figures.
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
// DATA LOADING "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"text_cleaner": "phoneme_cleaners",
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. // DATA LOADING
"num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values. "text_cleaner": "phoneme_cleaners",
"num_val_loader_workers": 4, // number of evaluation data loader processes. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"batch_group_size": 4, //Number of batches to shuffle after bucketing. "num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training "num_val_loader_workers": 4, // number of evaluation data loader processes.
"max_seq_len": 153, // DATASET-RELATED: maximum text length "batch_group_size": 4, //Number of batches to shuffle after bucketing.
"compute_input_seq_cache": false, // if true, text sequences are computed before starting training. If phonemes are enabled, they are also computed at this stage. "min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training
"max_seq_len": 153, // DATASET-RELATED: maximum text length
// PATHS "compute_input_seq_cache": false, // if true, text sequences are computed before starting training. If phonemes are enabled, they are also computed at this stage.
"output_path": "/home/erogol/Models/LJSpeech/",
// PATHS
// PHONEMES "output_path": "/home/erogol/Models/LJSpeech/",
"phoneme_cache_path": "/home/erogol/Models/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation. // PHONEMES
"phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages "phoneme_cache_path": "/home/erogol/Models/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation.
// MULTI-SPEAKER and GST "phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
"use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"use_gst": false, // use global style tokens // MULTI-SPEAKER and GST
"use_external_speaker_embedding_file": false, // if true, forces the model to use external embedding per sample instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558 "use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"external_speaker_embedding_file": "../../speakers-vctk-en.json", // if not null and use_external_speaker_embedding_file is true, it is used to load a specific embedding file and thus uses these embeddings instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558 "use_gst": false, // use global style tokens
"gst": { // gst parameter if gst is enabled "use_external_speaker_embedding_file": false, // if true, forces the model to use external embedding per sample instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
"gst_style_input": null, // Condition the style input either on a "external_speaker_embedding_file": "../../speakers-vctk-en.json", // if not null and use_external_speaker_embedding_file is true, it is used to load a specific embedding file and thus uses these embeddings instead of nn.embeddings, that is, it supports external embeddings such as those used at: https://arxiv.org/abs /1806.04558
// -> wave file [path to wave] or "gst": { // gst parameter if gst is enabled
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15} "gst_style_input": null, // Condition the style input either on a
// with the dictionary being len(dict) <= len(gst_style_tokens). // -> wave file [path to wave] or
"gst_embedding_dim": 512, // -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
"gst_num_heads": 4, // with the dictionary being len(dict) <= len(gst_style_tokens).
"gst_style_tokens": 10, "gst_embedding_dim": 512,
"gst_use_speaker_embedding": false "gst_num_heads": 4,
}, "gst_style_tokens": 10,
"gst_use_speaker_embedding": false
// DATASETS },
"datasets": // List of datasets. They all merged and they get different speaker_ids.
[ // DATASETS
{ "datasets": // List of datasets. They all merged and they get different speaker_ids.
"name": "ljspeech", [
"path": "/home/erogol/Data/LJSpeech-1.1/", {
"meta_file_train": "metadata.csv", // for vtck if list, ignore speakers id in list for train, its useful for test cloning with new speakers "name": "ljspeech",
"meta_file_val": null "path": "/home/erogol/Data/LJSpeech-1.1/",
} "meta_file_train": "metadata.csv", // for vtck if list, ignore speakers id in list for train, its useful for test cloning with new speakers
] "meta_file_val": null
} }
]
}

View File

@ -109,6 +109,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
"mixed_precision": false, "mixed_precision": false,

View File

@ -18,16 +18,11 @@ from TTS.utils.tensorboard_logger import TensorboardLogger
def parse_arguments(argv): def parse_arguments(argv):
"""Parse command line arguments of training scripts. """Parse command line arguments of training scripts.
Parameters Args:
---------- argv (list): This is a list of input arguments as given by sys.argv
argv : list
This is a list of input arguments as given by sys.argv
Returns
-------
argparse.Namespace
Parsed arguments.
Returns:
argparse.Namespace: Parsed arguments.
""" """
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
@ -42,6 +37,12 @@ def parse_arguments(argv):
type=str, type=str,
help="Model file to be restored. Use to finetune a model.", help="Model file to be restored. Use to finetune a model.",
default="") default="")
parser.add_argument(
"--best_path",
type=str,
help=("Best model file to be used for extracting best loss."
"If not specified, the latest best model in continue path is used"),
default="")
parser.add_argument( parser.add_argument(
"--config_path", "--config_path",
type=str, type=str,
@ -67,43 +68,51 @@ def parse_arguments(argv):
def get_last_checkpoint(path): def get_last_checkpoint(path):
"""Get latest checkpoint from a list of filenames. """Get latest checkpoint or/and best model in path.
It is based on globbing for `*.pth.tar` and the RegEx It is based on globbing for `*.pth.tar` and the RegEx
`checkpoint_([0-9]+)`. `(checkpoint|best_model)_([0-9]+)`.
Parameters Args:
---------- path (list): Path to files to be compared.
path : list
Path to files to be compared.
Raises Raises:
------ ValueError: If no checkpoint or best_model files are found.
ValueError
If no checkpoint files are found.
Returns
-------
last_checkpoint : str
Last checkpoint filename.
Returns:
last_checkpoint (str): Last checkpoint filename.
""" """
last_checkpoint_num = 0 file_names = glob.glob(os.path.join(path, "*.pth.tar"))
last_checkpoint = None last_models = {}
filenames = glob.glob( last_model_nums = {}
os.path.join(path, "/*.pth.tar")) for key in ['checkpoint', 'best_model']:
for filename in filenames: last_model_num = 0
try: last_model = None
checkpoint_num = int( for file_name in file_names:
re.search(r"checkpoint_([0-9]+)", filename).groups()[0]) try:
if checkpoint_num > last_checkpoint_num: model_num = int(re.search(
last_checkpoint_num = checkpoint_num f"{key}_([0-9]+)", file_name).groups()[0])
last_checkpoint = filename if model_num > last_model_num:
except AttributeError: # if there's no match in the filename last_model_num = model_num
pass last_model = file_name
if last_checkpoint is None: except AttributeError: # if there's no match in the filename
raise ValueError(f"No checkpoints in {path}!") continue
return last_checkpoint last_models[key] = last_model
last_model_nums[key] = last_model_num
# check what models were found
if not last_models:
raise ValueError(f"No models found in continue path {path}!")
elif 'checkpoint' not in last_models: # no checkpoint just best model
last_models['checkpoint'] = last_models['best_model']
elif 'best_model' not in last_models: # no best model
# this shouldn't happen, but let's handle it just in case
last_models['best_model'] = None
# finally check if last best model is more recent than checkpoint
elif last_model_nums['best_model'] > last_model_nums['checkpoint']:
last_models['checkpoint'] = last_models['best_model']
return last_models['checkpoint'], last_models['best_model']
def process_args(args, model_type): def process_args(args, model_type):
@ -111,8 +120,8 @@ def process_args(args, model_type):
Args: Args:
args (argparse.Namespace or dict like): Parsed input arguments. args (argparse.Namespace or dict like): Parsed input arguments.
model_type (str): Model type used to check config parameters and setup the TensorBoard model_type (str): Model type used to check config parameters and setup
logger. One of: the TensorBoard logger. One of:
- tacotron - tacotron
- glow_tts - glow_tts
- speedy_speech - speedy_speech
@ -121,26 +130,23 @@ def process_args(args, model_type):
- wavernn - wavernn
Raises: Raises:
ValueError ValueError: If `model_type` is not one of implemented choices.
If `model_type` is not one of implemented choices.
Returns: Returns:
c (TTS.utils.io.AttrDict): Config paramaters. c (TTS.utils.io.AttrDict): Config paramaters.
out_path (str): Path to save models and logging. out_path (str): Path to save models and logging.
audio_path (str): Path to save generated test audios. audio_path (str): Path to save generated test audios.
c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does logging to the console. c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does the TensorBoard loggind. logging to the console.
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does
the TensorBoard loggind.
""" """
if args.continue_path != "": if args.continue_path:
args.output_path = args.continue_path args.output_path = args.continue_path
args.config_path = os.path.join(args.continue_path, "config.json") args.config_path = os.path.join(args.continue_path, "config.json")
list_of_files = glob.glob( args.restore_path, best_model = get_last_checkpoint(args.continue_path)
os.path.join(args.continue_path, "*.pth.tar") if not args.best_path:
) # * means all if need specific format then *.csv args.best_path = best_model
args.restore_path = max(list_of_files, key=os.path.getctime)
# checkpoint number based continuing
# args.restore_path = get_last_checkpoint(args.continue_path)
print(f" > Training continues for {args.restore_path}")
# setup output paths and read configs # setup output paths and read configs
c = load_config(args.config_path) c = load_config(args.config_path)
@ -154,8 +160,7 @@ def process_args(args, model_type):
if model_class == "TTS": if model_class == "TTS":
check_config_tts(c) check_config_tts(c)
elif model_class == "VOCODER": elif model_class == "VOCODER":
print("Vocoder config checker not implemented, " print("Vocoder config checker not implemented, skipping ...")
"skipping ...")
else: else:
raise ValueError(f"model type {model_type} not recognized!") raise ValueError(f"model type {model_type} not recognized!")
@ -165,7 +170,7 @@ def process_args(args, model_type):
print(" > Mixed precision mode is ON") print(" > Mixed precision mode is ON")
out_path = args.continue_path out_path = args.continue_path
if args.continue_path == "": if not out_path:
out_path = create_experiment_folder(c.output_path, c.run_name, out_path = create_experiment_folder(c.output_path, c.run_name,
args.debug) args.debug)

View File

@ -138,6 +138,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -128,6 +128,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -141,6 +141,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -130,6 +130,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -124,6 +124,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -103,6 +103,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -89,6 +89,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -1,4 +1,5 @@
import os import os
import glob
import torch import torch
import datetime import datetime
import pickle as pickle_tts import pickle as pickle_tts
@ -61,12 +62,13 @@ def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs) scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
def save_best_model(target_loss, best_loss, model, optimizer, scheduler, def save_best_model(current_loss, best_loss, model, optimizer, scheduler,
model_disc, optimizer_disc, scheduler_disc, current_step, model_disc, optimizer_disc, scheduler_disc, current_step,
epoch, output_folder, **kwargs): epoch, out_path, keep_all_best=False, keep_after=10000,
if target_loss < best_loss: **kwargs):
file_name = 'best_model.pth.tar' if current_loss < best_loss:
checkpoint_path = os.path.join(output_folder, file_name) best_model_name = f'best_model_{current_step}.pth.tar'
checkpoint_path = os.path.join(out_path, best_model_name)
print(" > BEST MODEL : {}".format(checkpoint_path)) print(" > BEST MODEL : {}".format(checkpoint_path))
save_model(model, save_model(model,
optimizer, optimizer,
@ -77,7 +79,21 @@ def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
current_step, current_step,
epoch, epoch,
checkpoint_path, checkpoint_path,
model_loss=target_loss, model_loss=current_loss,
**kwargs) **kwargs)
best_loss = target_loss # only delete previous if current is saved successfully
if not keep_all_best or (current_step < keep_after):
model_names = glob.glob(
os.path.join(out_path, 'best_model*.pth.tar'))
for model_name in model_names:
if os.path.basename(model_name) == best_model_name:
continue
os.remove(model_name)
# create symlink to best model for convinience
link_name = 'best_model.pth.tar'
link_path = os.path.join(out_path, link_name)
if os.path.islink(link_path) or os.path.isfile(link_path):
os.remove(link_path)
os.symlink(best_model_name, os.path.join(out_path, link_name))
best_loss = current_loss
return best_loss return best_loss

View File

@ -106,6 +106,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"apex_amp_level": null, "apex_amp_level": null,

View File

@ -111,6 +111,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
"mixed_precision": false, "mixed_precision": false,

View File

@ -1,175 +1,177 @@
{ {
"model": "Tacotron2", "model": "Tacotron2",
"run_name": "test_sample_dataset_run", "run_name": "test_sample_dataset_run",
"run_description": "sample dataset test run", "run_description": "sample dataset test run",
// AUDIO PARAMETERS // AUDIO PARAMETERS
"audio":{ "audio":{
// stft parameters // stft parameters
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame. "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
"win_length": 1024, // stft window length in ms. "win_length": 1024, // stft window length in ms.
"hop_length": 256, // stft window hop-lengh in ms. "hop_length": 256, // stft window hop-lengh in ms.
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used. "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used. "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
// Audio processing parameters // Audio processing parameters
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. "sample_rate": 22050, // DATASET-RELATED: wav sample-rate.
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air. "ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
// Silence trimming // Silence trimming
"do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true) "do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (true), TWEB (false), Nancy (true)
"trim_db": 60, // threshold for timming silence. Set this according to your dataset. "trim_db": 60, // threshold for timming silence. Set this according to your dataset.
// Griffin-Lim // Griffin-Lim
"power": 1.5, // value to sharpen wav signals after GL algorithm. "power": 1.5, // value to sharpen wav signals after GL algorithm.
"griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation. "griffin_lim_iters": 60,// #griffin-lim iterations. 30-60 is a good range. Larger the value, slower the generation.
// MelSpectrogram parameters // MelSpectrogram parameters
"num_mels": 80, // size of the mel spec frame. "num_mels": 80, // size of the mel spec frame.
"mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! "mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
"mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!! "mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
"spec_gain": 20.0, "spec_gain": 20.0,
// Normalization parameters // Normalization parameters
"signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params. "signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
"min_level_db": -100, // lower bound for normalization "min_level_db": -100, // lower bound for normalization
"symmetric_norm": true, // move normalization to range [-1, 1] "symmetric_norm": true, // move normalization to range [-1, 1]
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
"clip_norm": true, // clip normalized values into the range. "clip_norm": true, // clip normalized values into the range.
"stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored "stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
}, },
// VOCABULARY PARAMETERS // VOCABULARY PARAMETERS
// if custom character set is not defined, // if custom character set is not defined,
// default set in symbols.py is used // default set in symbols.py is used
// "characters":{ // "characters":{
// "pad": "_", // "pad": "_",
// "eos": "~", // "eos": "~",
// "bos": "^", // "bos": "^",
// "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ", // "characters": "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'(),-.:;? ",
// "punctuations":"!'(),-.:;? ", // "punctuations":"!'(),-.:;? ",
// "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ" // "phonemes":"iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧɚ˞ɫ"
// }, // },
// DISTRIBUTED TRAINING // DISTRIBUTED TRAINING
"distributed":{ "distributed":{
"backend": "nccl", "backend": "nccl",
"url": "tcp:\/\/localhost:54321" "url": "tcp:\/\/localhost:54321"
}, },
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
// TRAINING // TRAINING
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. "batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1, "eval_batch_size":1,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled. "r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "gradual_training": [[0, 7, 4]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled. "ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"mixed_precision": false, "mixed_precision": false,
// VALIDATION // VALIDATION
"run_eval": true, "run_eval": true,
"test_delay_epochs": 0, //Until attention is aligned, testing only wastes computation time. "test_delay_epochs": 0, //Until attention is aligned, testing only wastes computation time.
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences. "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
// LOSS SETTINGS // LOSS SETTINGS
"loss_masking": true, // enable / disable loss masking against the sequence padding. "loss_masking": true, // enable / disable loss masking against the sequence padding.
"decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled "decoder_loss_alpha": 0.5, // original decoder loss weight. If > 0, it is enabled
"postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled "postnet_loss_alpha": 0.25, // original postnet loss weight. If > 0, it is enabled
"postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "postnet_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled "decoder_diff_spec_alpha": 0.25, // differential spectral loss weight. If > 0, it is enabled
"decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled "decoder_ssim_alpha": 0.5, // decoder ssim loss weight. If > 0, it is enabled
"postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled "postnet_ssim_alpha": 0.25, // postnet ssim loss weight. If > 0, it is enabled
"ga_alpha": 5.0, // weight for guided attention loss. If > 0, guided attention is enabled. "ga_alpha": 5.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples. "stopnet_pos_weight": 15.0, // pos class weight for stopnet loss since there are way more negative samples than positive samples.
// OPTIMIZER // OPTIMIZER
"noam_schedule": false, // use noam warmup and lr schedule. "noam_schedule": false, // use noam warmup and lr schedule.
"grad_clip": 1.0, // upper limit for gradients for clipping. "grad_clip": 1.0, // upper limit for gradients for clipping.
"epochs": 1, // total number of epochs to train. "epochs": 1, // total number of epochs to train.
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate. "lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"wd": 0.000001, // Weight decay weight. "wd": 0.000001, // Weight decay weight.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths. "seq_len_norm": false, // Normalize eash sample loss with its length to alleviate imbalanced datasets. Use it if your dataset is small or has skewed distribution of sequence lengths.
// TACOTRON PRENET // TACOTRON PRENET
"memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame. "memory_size": -1, // ONLY TACOTRON - size of the memory queue used fro storing last decoder predictions for auto-regression. If < 0, memory queue is disabled and decoder only uses the last prediction frame.
"prenet_type": "bn", // "original" or "bn". "prenet_type": "bn", // "original" or "bn".
"prenet_dropout": false, // enable/disable dropout at prenet. "prenet_dropout": false, // enable/disable dropout at prenet.
// TACOTRON ATTENTION // TACOTRON ATTENTION
"attention_type": "original", // 'original' , 'graves', 'dynamic_convolution' "attention_type": "original", // 'original' , 'graves', 'dynamic_convolution'
"attention_heads": 4, // number of attention heads (only for 'graves') "attention_heads": 4, // number of attention heads (only for 'graves')
"attention_norm": "sigmoid", // softmax or sigmoid. "attention_norm": "sigmoid", // softmax or sigmoid.
"windowing": false, // Enables attention windowing. Used only in eval mode. "windowing": false, // Enables attention windowing. Used only in eval mode.
"use_forward_attn": false, // if it uses forward attention. In general, it aligns faster. "use_forward_attn": false, // if it uses forward attention. In general, it aligns faster.
"forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode. "forward_attn_mask": false, // Additional masking forcing monotonicity only in eval mode.
"transition_agent": false, // enable/disable transition agent of forward attention. "transition_agent": false, // enable/disable transition agent of forward attention.
"location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset. "bidirectional_decoder": false, // use https://arxiv.org/abs/1907.09006. Use it, if attention does not work well with your dataset.
"double_decoder_consistency": true, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/ "double_decoder_consistency": true, // use DDC explained here https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency-draft/
"ddc_r": 7, // reduction rate for coarse decoder. "ddc_r": 7, // reduction rate for coarse decoder.
// STOPNET // STOPNET
"stopnet": true, // Train stopnet predicting the end of synthesis. "stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER. "separate_stopnet": true, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
// TENSORBOARD and LOGGING // TENSORBOARD and LOGGING
"print_step": 1, // Number of steps to log training on console. "print_step": 1, // Number of steps to log training on console.
"tb_plot_step": 100, // Number of steps to plot TB training figures. "tb_plot_step": 100, // Number of steps to plot TB training figures.
"print_eval": false, // If True, it prints intermediate loss values in evalulation. "print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
// DATA LOADING "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"text_cleaner": "phoneme_cleaners",
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. // DATA LOADING
"num_loader_workers": 0, // number of training data loader processes. Don't set it too big. 4-8 are good values. "text_cleaner": "phoneme_cleaners",
"num_val_loader_workers": 0, // number of evaluation data loader processes. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"batch_group_size": 0, //Number of batches to shuffle after bucketing. "num_loader_workers": 0, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training "num_val_loader_workers": 0, // number of evaluation data loader processes.
"max_seq_len": 153, // DATASET-RELATED: maximum text length "batch_group_size": 0, //Number of batches to shuffle after bucketing.
"compute_input_seq_cache": true, "min_seq_len": 6, // DATASET-RELATED: minimum text length to use in training
"max_seq_len": 153, // DATASET-RELATED: maximum text length
// PATHS "compute_input_seq_cache": true,
"output_path": "tests/train_outputs/",
// PATHS
// PHONEMES "output_path": "tests/train_outputs/",
"phoneme_cache_path": "tests/train_outputs/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation. // PHONEMES
"phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages "phoneme_cache_path": "tests/train_outputs/phoneme_cache/", // phoneme computation is slow, therefore, it caches results in the given folder.
"use_phonemes": true, // use phonemes instead of raw characters. It is suggested for better pronounciation.
// MULTI-SPEAKER and GST "phoneme_language": "en-us", // depending on your target language, pick one from https://github.com/bootphon/phonemizer#languages
"use_external_speaker_embedding_file": false,
"external_speaker_embedding_file": null, // MULTI-SPEAKER and GST
"use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning. "use_external_speaker_embedding_file": false,
"use_gst": true, // use global style tokens "external_speaker_embedding_file": null,
"gst": { // gst parameter if gst is enabled "use_speaker_embedding": false, // use speaker embedding to enable multi-speaker learning.
"gst_style_input": null, // Condition the style input either on a "use_gst": true, // use global style tokens
// -> wave file [path to wave] or "gst": { // gst parameter if gst is enabled
// -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15} "gst_style_input": null, // Condition the style input either on a
// with the dictionary being len(dict) == len(gst_style_tokens). // -> wave file [path to wave] or
"gst_use_speaker_embedding": true, // if true pass speaker embedding in attention input GST. // -> dictionary using the style tokens {'token1': 'value', 'token2': 'value'} example {"0": 0.15, "1": 0.15, "5": -0.15}
"gst_embedding_dim": 512, // with the dictionary being len(dict) == len(gst_style_tokens).
"gst_num_heads": 4, "gst_use_speaker_embedding": true, // if true pass speaker embedding in attention input GST.
"gst_style_tokens": 10 "gst_embedding_dim": 512,
}, "gst_num_heads": 4,
"gst_style_tokens": 10
// DATASETS },
"train_portion": 0.1, // dataset portion used for training. It is mainly for internal experiments.
"eval_portion": 0.1, // dataset portion used for training. It is mainly for internal experiments. // DATASETS
"datasets": // List of datasets. They all merged and they get different speaker_ids. "train_portion": 0.1, // dataset portion used for training. It is mainly for internal experiments.
[ "eval_portion": 0.1, // dataset portion used for training. It is mainly for internal experiments.
{ "datasets": // List of datasets. They all merged and they get different speaker_ids.
"name": "ljspeech", [
"path": "tests/data/ljspeech/", {
"meta_file_train": "metadata.csv", "name": "ljspeech",
"meta_file_val": "metadata.csv" "path": "tests/data/ljspeech/",
} "meta_file_train": "metadata.csv",
] "meta_file_val": "metadata.csv"
}
} ]
}

View File

@ -131,6 +131,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -101,6 +101,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING

View File

@ -97,6 +97,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run. "print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step" "checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING // DATA LOADING