mirror of https://github.com/coqui-ai/TTS.git
Merge branch 'pr/gerazov/650-2' into dev
This commit is contained in:
commit
d0454461de
|
@ -500,6 +500,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
criterion = GlowTTSLoss()
|
criterion = GlowTTSLoss()
|
||||||
|
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
||||||
try:
|
try:
|
||||||
# TODO: fix optimizer init, model.cuda() needs to be called before
|
# TODO: fix optimizer init, model.cuda() needs to be called before
|
||||||
|
@ -517,7 +518,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
for group in optimizer.param_groups:
|
for group in optimizer.param_groups:
|
||||||
group['initial_lr'] = c.lr
|
group['initial_lr'] = c.lr
|
||||||
print(" > Model restored from step %d" % checkpoint['step'],
|
print(f" > Model restored from step {checkpoint['step']:d}",
|
||||||
flush=True)
|
flush=True)
|
||||||
args.restore_step = checkpoint['step']
|
args.restore_step = checkpoint['step']
|
||||||
else:
|
else:
|
||||||
|
@ -541,8 +542,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_params = count_parameters(model)
|
num_params = count_parameters(model)
|
||||||
print("\n > Model has {} parameters".format(num_params), flush=True)
|
print("\n > Model has {} parameters".format(num_params), flush=True)
|
||||||
|
|
||||||
if 'best_loss' not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float('inf')
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with loaded last best loss {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
# define dataloaders
|
# define dataloaders
|
||||||
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
|
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
|
||||||
|
@ -552,7 +562,8 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
model = data_depended_init(train_loader, model)
|
model = data_depended_init(train_loader, model)
|
||||||
for epoch in range(0, c.epochs):
|
for epoch in range(0, c.epochs):
|
||||||
c_logger.print_epoch_start(epoch, c.epochs)
|
c_logger.print_epoch_start(epoch, c.epochs)
|
||||||
train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer,
|
train_avg_loss_dict, global_step = train(train_loader, model,
|
||||||
|
criterion, optimizer,
|
||||||
scheduler, ap, global_step,
|
scheduler, ap, global_step,
|
||||||
epoch)
|
epoch)
|
||||||
eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap,
|
eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap,
|
||||||
|
@ -561,8 +572,9 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
target_loss = train_avg_loss_dict['avg_loss']
|
target_loss = train_avg_loss_dict['avg_loss']
|
||||||
if c.run_eval:
|
if c.run_eval:
|
||||||
target_loss = eval_avg_loss_dict['avg_loss']
|
target_loss = eval_avg_loss_dict['avg_loss']
|
||||||
best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r,
|
best_loss = save_best_model(target_loss, best_loss, model, optimizer,
|
||||||
OUT_PATH, model_characters)
|
global_step, epoch, c.r, OUT_PATH, model_characters,
|
||||||
|
keep_all_best=keep_all_best, keep_after=keep_after)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -464,6 +464,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
criterion = SpeedySpeechLoss(c)
|
criterion = SpeedySpeechLoss(c)
|
||||||
|
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
||||||
try:
|
try:
|
||||||
# TODO: fix optimizer init, model.cuda() needs to be called before
|
# TODO: fix optimizer init, model.cuda() needs to be called before
|
||||||
|
@ -505,8 +506,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_params = count_parameters(model)
|
num_params = count_parameters(model)
|
||||||
print("\n > Model has {} parameters".format(num_params), flush=True)
|
print("\n > Model has {} parameters".format(num_params), flush=True)
|
||||||
|
|
||||||
if 'best_loss' not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float('inf')
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with loaded last best loss {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
# define dataloaders
|
# define dataloaders
|
||||||
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
|
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
|
||||||
|
@ -525,8 +535,8 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
if c.run_eval:
|
if c.run_eval:
|
||||||
target_loss = eval_avg_loss_dict['avg_loss']
|
target_loss = eval_avg_loss_dict['avg_loss']
|
||||||
best_loss = save_best_model(target_loss, best_loss, model, optimizer,
|
best_loss = save_best_model(target_loss, best_loss, model, optimizer,
|
||||||
global_step, epoch, c.r,
|
global_step, epoch, c.r, OUT_PATH, model_characters,
|
||||||
OUT_PATH, model_characters)
|
keep_all_best=keep_all_best, keep_after=keep_after)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -538,12 +538,13 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
# setup criterion
|
# setup criterion
|
||||||
criterion = TacotronLoss(c, stopnet_pos_weight=c.stopnet_pos_weight, ga_sigma=0.4)
|
criterion = TacotronLoss(c, stopnet_pos_weight=c.stopnet_pos_weight, ga_sigma=0.4)
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
||||||
try:
|
try:
|
||||||
print(" > Restoring Model.")
|
print(" > Restoring Model...")
|
||||||
model.load_state_dict(checkpoint['model'])
|
model.load_state_dict(checkpoint['model'])
|
||||||
# optimizer restore
|
# optimizer restore
|
||||||
print(" > Restoring Optimizer.")
|
print(" > Restoring Optimizer...")
|
||||||
optimizer.load_state_dict(checkpoint['optimizer'])
|
optimizer.load_state_dict(checkpoint['optimizer'])
|
||||||
if "scaler" in checkpoint and c.mixed_precision:
|
if "scaler" in checkpoint and c.mixed_precision:
|
||||||
print(" > Restoring AMP Scaler...")
|
print(" > Restoring AMP Scaler...")
|
||||||
|
@ -551,7 +552,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
if c.reinit_layers:
|
if c.reinit_layers:
|
||||||
raise RuntimeError
|
raise RuntimeError
|
||||||
except (KeyError, RuntimeError):
|
except (KeyError, RuntimeError):
|
||||||
print(" > Partial model initialization.")
|
print(" > Partial model initialization...")
|
||||||
model_dict = model.state_dict()
|
model_dict = model.state_dict()
|
||||||
model_dict = set_init_dict(model_dict, checkpoint['model'], c)
|
model_dict = set_init_dict(model_dict, checkpoint['model'], c)
|
||||||
# torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
|
# torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
|
||||||
|
@ -585,8 +586,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_params = count_parameters(model)
|
num_params = count_parameters(model)
|
||||||
print("\n > Model has {} parameters".format(num_params), flush=True)
|
print("\n > Model has {} parameters".format(num_params), flush=True)
|
||||||
|
|
||||||
if 'best_loss' not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float('inf')
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with loaded last best loss {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
# define data loaders
|
# define data loaders
|
||||||
train_loader = setup_loader(ap,
|
train_loader = setup_loader(ap,
|
||||||
|
@ -639,6 +649,8 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
c.r,
|
c.r,
|
||||||
OUT_PATH,
|
OUT_PATH,
|
||||||
model_characters,
|
model_characters,
|
||||||
|
keep_all_best=keep_all_best,
|
||||||
|
keep_after=keep_after,
|
||||||
scaler=scaler.state_dict() if c.mixed_precision else None
|
scaler=scaler.state_dict() if c.mixed_precision else None
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -485,6 +485,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
criterion_disc = DiscriminatorLoss(c)
|
criterion_disc = DiscriminatorLoss(c)
|
||||||
|
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
||||||
try:
|
try:
|
||||||
print(" > Restoring Generator Model...")
|
print(" > Restoring Generator Model...")
|
||||||
|
@ -523,7 +524,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
for group in optimizer_disc.param_groups:
|
for group in optimizer_disc.param_groups:
|
||||||
group['lr'] = c.lr_disc
|
group['lr'] = c.lr_disc
|
||||||
|
|
||||||
print(" > Model restored from step %d" % checkpoint['step'],
|
print(f" > Model restored from step {checkpoint['step']:d}",
|
||||||
flush=True)
|
flush=True)
|
||||||
args.restore_step = checkpoint['step']
|
args.restore_step = checkpoint['step']
|
||||||
else:
|
else:
|
||||||
|
@ -545,8 +546,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_params = count_parameters(model_disc)
|
num_params = count_parameters(model_disc)
|
||||||
print(" > Discriminator has {} parameters".format(num_params), flush=True)
|
print(" > Discriminator has {} parameters".format(num_params), flush=True)
|
||||||
|
|
||||||
if 'best_loss' not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float('inf')
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with best loss of {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
global_step = args.restore_step
|
global_step = args.restore_step
|
||||||
for epoch in range(0, c.epochs):
|
for epoch in range(0, c.epochs):
|
||||||
|
@ -571,7 +581,10 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
global_step,
|
global_step,
|
||||||
epoch,
|
epoch,
|
||||||
OUT_PATH,
|
OUT_PATH,
|
||||||
model_losses=eval_avg_loss_dict)
|
keep_all_best=keep_all_best,
|
||||||
|
keep_after=keep_after,
|
||||||
|
model_losses=eval_avg_loss_dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -354,6 +354,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
criterion.cuda()
|
criterion.cuda()
|
||||||
|
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
checkpoint = torch.load(args.restore_path, map_location='cpu')
|
||||||
try:
|
try:
|
||||||
print(" > Restoring Model...")
|
print(" > Restoring Model...")
|
||||||
|
@ -393,8 +394,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_params = count_parameters(model)
|
num_params = count_parameters(model)
|
||||||
print(" > WaveGrad has {} parameters".format(num_params), flush=True)
|
print(" > WaveGrad has {} parameters".format(num_params), flush=True)
|
||||||
|
|
||||||
if 'best_loss' not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float('inf')
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with loaded last best loss {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
global_step = args.restore_step
|
global_step = args.restore_step
|
||||||
for epoch in range(0, c.epochs):
|
for epoch in range(0, c.epochs):
|
||||||
|
@ -416,6 +426,8 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
global_step,
|
global_step,
|
||||||
epoch,
|
epoch,
|
||||||
OUT_PATH,
|
OUT_PATH,
|
||||||
|
keep_all_best=keep_all_best,
|
||||||
|
keep_after=keep_after,
|
||||||
model_losses=eval_avg_loss_dict,
|
model_losses=eval_avg_loss_dict,
|
||||||
scaler=scaler.state_dict() if c.mixed_precision else None
|
scaler=scaler.state_dict() if c.mixed_precision else None
|
||||||
)
|
)
|
||||||
|
|
|
@ -383,6 +383,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
|
|
||||||
# restore any checkpoint
|
# restore any checkpoint
|
||||||
if args.restore_path:
|
if args.restore_path:
|
||||||
|
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
|
||||||
checkpoint = torch.load(args.restore_path, map_location="cpu")
|
checkpoint = torch.load(args.restore_path, map_location="cpu")
|
||||||
try:
|
try:
|
||||||
print(" > Restoring Model...")
|
print(" > Restoring Model...")
|
||||||
|
@ -416,8 +417,17 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
num_parameters = count_parameters(model_wavernn)
|
num_parameters = count_parameters(model_wavernn)
|
||||||
print(" > Model has {} parameters".format(num_parameters), flush=True)
|
print(" > Model has {} parameters".format(num_parameters), flush=True)
|
||||||
|
|
||||||
if "best_loss" not in locals():
|
if args.restore_step == 0 or not args.best_path:
|
||||||
best_loss = float("inf")
|
best_loss = float('inf')
|
||||||
|
print(" > Starting with inf best loss.")
|
||||||
|
else:
|
||||||
|
print(" > Restoring best loss from "
|
||||||
|
f"{os.path.basename(args.best_path)} ...")
|
||||||
|
best_loss = torch.load(args.best_path,
|
||||||
|
map_location='cpu')['model_loss']
|
||||||
|
print(f" > Starting with loaded last best loss {best_loss}.")
|
||||||
|
keep_all_best = c.get('keep_all_best', False)
|
||||||
|
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
|
||||||
|
|
||||||
global_step = args.restore_step
|
global_step = args.restore_step
|
||||||
for epoch in range(0, c.epochs):
|
for epoch in range(0, c.epochs):
|
||||||
|
@ -440,6 +450,8 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
global_step,
|
global_step,
|
||||||
epoch,
|
epoch,
|
||||||
OUT_PATH,
|
OUT_PATH,
|
||||||
|
keep_all_best=keep_all_best,
|
||||||
|
keep_after=keep_after,
|
||||||
model_losses=eval_avg_loss_dict,
|
model_losses=eval_avg_loss_dict,
|
||||||
scaler=scaler.state_dict() if c.mixed_precision else None
|
scaler=scaler.state_dict() if c.mixed_precision else None
|
||||||
)
|
)
|
||||||
|
|
|
@ -121,6 +121,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -93,6 +93,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
"apex_amp_level": null,
|
"apex_amp_level": null,
|
||||||
|
|
||||||
|
|
|
@ -105,6 +105,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -121,6 +121,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -109,6 +109,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
|
||||||
"mixed_precision": false,
|
"mixed_precision": false,
|
||||||
|
|
||||||
|
|
|
@ -18,16 +18,11 @@ from TTS.utils.tensorboard_logger import TensorboardLogger
|
||||||
def parse_arguments(argv):
|
def parse_arguments(argv):
|
||||||
"""Parse command line arguments of training scripts.
|
"""Parse command line arguments of training scripts.
|
||||||
|
|
||||||
Parameters
|
Args:
|
||||||
----------
|
argv (list): This is a list of input arguments as given by sys.argv
|
||||||
argv : list
|
|
||||||
This is a list of input arguments as given by sys.argv
|
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
argparse.Namespace
|
|
||||||
Parsed arguments.
|
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
argparse.Namespace: Parsed arguments.
|
||||||
"""
|
"""
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -42,6 +37,12 @@ def parse_arguments(argv):
|
||||||
type=str,
|
type=str,
|
||||||
help="Model file to be restored. Use to finetune a model.",
|
help="Model file to be restored. Use to finetune a model.",
|
||||||
default="")
|
default="")
|
||||||
|
parser.add_argument(
|
||||||
|
"--best_path",
|
||||||
|
type=str,
|
||||||
|
help=("Best model file to be used for extracting best loss."
|
||||||
|
"If not specified, the latest best model in continue path is used"),
|
||||||
|
default="")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--config_path",
|
"--config_path",
|
||||||
type=str,
|
type=str,
|
||||||
|
@ -67,43 +68,51 @@ def parse_arguments(argv):
|
||||||
|
|
||||||
|
|
||||||
def get_last_checkpoint(path):
|
def get_last_checkpoint(path):
|
||||||
"""Get latest checkpoint from a list of filenames.
|
"""Get latest checkpoint or/and best model in path.
|
||||||
|
|
||||||
It is based on globbing for `*.pth.tar` and the RegEx
|
It is based on globbing for `*.pth.tar` and the RegEx
|
||||||
`checkpoint_([0-9]+)`.
|
`(checkpoint|best_model)_([0-9]+)`.
|
||||||
|
|
||||||
Parameters
|
Args:
|
||||||
----------
|
path (list): Path to files to be compared.
|
||||||
path : list
|
|
||||||
Path to files to be compared.
|
|
||||||
|
|
||||||
Raises
|
Raises:
|
||||||
------
|
ValueError: If no checkpoint or best_model files are found.
|
||||||
ValueError
|
|
||||||
If no checkpoint files are found.
|
|
||||||
|
|
||||||
Returns
|
|
||||||
-------
|
|
||||||
last_checkpoint : str
|
|
||||||
Last checkpoint filename.
|
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
last_checkpoint (str): Last checkpoint filename.
|
||||||
"""
|
"""
|
||||||
last_checkpoint_num = 0
|
file_names = glob.glob(os.path.join(path, "*.pth.tar"))
|
||||||
last_checkpoint = None
|
last_models = {}
|
||||||
filenames = glob.glob(
|
last_model_nums = {}
|
||||||
os.path.join(path, "/*.pth.tar"))
|
for key in ['checkpoint', 'best_model']:
|
||||||
for filename in filenames:
|
last_model_num = 0
|
||||||
|
last_model = None
|
||||||
|
for file_name in file_names:
|
||||||
try:
|
try:
|
||||||
checkpoint_num = int(
|
model_num = int(re.search(
|
||||||
re.search(r"checkpoint_([0-9]+)", filename).groups()[0])
|
f"{key}_([0-9]+)", file_name).groups()[0])
|
||||||
if checkpoint_num > last_checkpoint_num:
|
if model_num > last_model_num:
|
||||||
last_checkpoint_num = checkpoint_num
|
last_model_num = model_num
|
||||||
last_checkpoint = filename
|
last_model = file_name
|
||||||
except AttributeError: # if there's no match in the filename
|
except AttributeError: # if there's no match in the filename
|
||||||
pass
|
continue
|
||||||
if last_checkpoint is None:
|
last_models[key] = last_model
|
||||||
raise ValueError(f"No checkpoints in {path}!")
|
last_model_nums[key] = last_model_num
|
||||||
return last_checkpoint
|
|
||||||
|
# check what models were found
|
||||||
|
if not last_models:
|
||||||
|
raise ValueError(f"No models found in continue path {path}!")
|
||||||
|
elif 'checkpoint' not in last_models: # no checkpoint just best model
|
||||||
|
last_models['checkpoint'] = last_models['best_model']
|
||||||
|
elif 'best_model' not in last_models: # no best model
|
||||||
|
# this shouldn't happen, but let's handle it just in case
|
||||||
|
last_models['best_model'] = None
|
||||||
|
# finally check if last best model is more recent than checkpoint
|
||||||
|
elif last_model_nums['best_model'] > last_model_nums['checkpoint']:
|
||||||
|
last_models['checkpoint'] = last_models['best_model']
|
||||||
|
|
||||||
|
return last_models['checkpoint'], last_models['best_model']
|
||||||
|
|
||||||
|
|
||||||
def process_args(args, model_type):
|
def process_args(args, model_type):
|
||||||
|
@ -111,8 +120,8 @@ def process_args(args, model_type):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
args (argparse.Namespace or dict like): Parsed input arguments.
|
args (argparse.Namespace or dict like): Parsed input arguments.
|
||||||
model_type (str): Model type used to check config parameters and setup the TensorBoard
|
model_type (str): Model type used to check config parameters and setup
|
||||||
logger. One of:
|
the TensorBoard logger. One of:
|
||||||
- tacotron
|
- tacotron
|
||||||
- glow_tts
|
- glow_tts
|
||||||
- speedy_speech
|
- speedy_speech
|
||||||
|
@ -121,26 +130,23 @@ def process_args(args, model_type):
|
||||||
- wavernn
|
- wavernn
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError
|
ValueError: If `model_type` is not one of implemented choices.
|
||||||
If `model_type` is not one of implemented choices.
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
c (TTS.utils.io.AttrDict): Config paramaters.
|
c (TTS.utils.io.AttrDict): Config paramaters.
|
||||||
out_path (str): Path to save models and logging.
|
out_path (str): Path to save models and logging.
|
||||||
audio_path (str): Path to save generated test audios.
|
audio_path (str): Path to save generated test audios.
|
||||||
c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does logging to the console.
|
c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does
|
||||||
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does the TensorBoard loggind.
|
logging to the console.
|
||||||
|
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does
|
||||||
|
the TensorBoard loggind.
|
||||||
"""
|
"""
|
||||||
if args.continue_path != "":
|
if args.continue_path:
|
||||||
args.output_path = args.continue_path
|
args.output_path = args.continue_path
|
||||||
args.config_path = os.path.join(args.continue_path, "config.json")
|
args.config_path = os.path.join(args.continue_path, "config.json")
|
||||||
list_of_files = glob.glob(
|
args.restore_path, best_model = get_last_checkpoint(args.continue_path)
|
||||||
os.path.join(args.continue_path, "*.pth.tar")
|
if not args.best_path:
|
||||||
) # * means all if need specific format then *.csv
|
args.best_path = best_model
|
||||||
args.restore_path = max(list_of_files, key=os.path.getctime)
|
|
||||||
# checkpoint number based continuing
|
|
||||||
# args.restore_path = get_last_checkpoint(args.continue_path)
|
|
||||||
print(f" > Training continues for {args.restore_path}")
|
|
||||||
|
|
||||||
# setup output paths and read configs
|
# setup output paths and read configs
|
||||||
c = load_config(args.config_path)
|
c = load_config(args.config_path)
|
||||||
|
@ -154,8 +160,7 @@ def process_args(args, model_type):
|
||||||
if model_class == "TTS":
|
if model_class == "TTS":
|
||||||
check_config_tts(c)
|
check_config_tts(c)
|
||||||
elif model_class == "VOCODER":
|
elif model_class == "VOCODER":
|
||||||
print("Vocoder config checker not implemented, "
|
print("Vocoder config checker not implemented, skipping ...")
|
||||||
"skipping ...")
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"model type {model_type} not recognized!")
|
raise ValueError(f"model type {model_type} not recognized!")
|
||||||
|
|
||||||
|
@ -165,7 +170,7 @@ def process_args(args, model_type):
|
||||||
print(" > Mixed precision mode is ON")
|
print(" > Mixed precision mode is ON")
|
||||||
|
|
||||||
out_path = args.continue_path
|
out_path = args.continue_path
|
||||||
if args.continue_path == "":
|
if not out_path:
|
||||||
out_path = create_experiment_folder(c.output_path, c.run_name,
|
out_path = create_experiment_folder(c.output_path, c.run_name,
|
||||||
args.debug)
|
args.debug)
|
||||||
|
|
||||||
|
|
|
@ -138,6 +138,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -128,6 +128,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -141,6 +141,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -130,6 +130,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -124,6 +124,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -103,6 +103,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -89,6 +89,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import os
|
import os
|
||||||
|
import glob
|
||||||
import torch
|
import torch
|
||||||
import datetime
|
import datetime
|
||||||
import pickle as pickle_tts
|
import pickle as pickle_tts
|
||||||
|
@ -61,12 +62,13 @@ def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
|
||||||
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
|
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
|
def save_best_model(current_loss, best_loss, model, optimizer, scheduler,
|
||||||
model_disc, optimizer_disc, scheduler_disc, current_step,
|
model_disc, optimizer_disc, scheduler_disc, current_step,
|
||||||
epoch, output_folder, **kwargs):
|
epoch, out_path, keep_all_best=False, keep_after=10000,
|
||||||
if target_loss < best_loss:
|
**kwargs):
|
||||||
file_name = 'best_model.pth.tar'
|
if current_loss < best_loss:
|
||||||
checkpoint_path = os.path.join(output_folder, file_name)
|
best_model_name = f'best_model_{current_step}.pth.tar'
|
||||||
|
checkpoint_path = os.path.join(out_path, best_model_name)
|
||||||
print(" > BEST MODEL : {}".format(checkpoint_path))
|
print(" > BEST MODEL : {}".format(checkpoint_path))
|
||||||
save_model(model,
|
save_model(model,
|
||||||
optimizer,
|
optimizer,
|
||||||
|
@ -77,7 +79,21 @@ def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
|
||||||
current_step,
|
current_step,
|
||||||
epoch,
|
epoch,
|
||||||
checkpoint_path,
|
checkpoint_path,
|
||||||
model_loss=target_loss,
|
model_loss=current_loss,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
best_loss = target_loss
|
# only delete previous if current is saved successfully
|
||||||
|
if not keep_all_best or (current_step < keep_after):
|
||||||
|
model_names = glob.glob(
|
||||||
|
os.path.join(out_path, 'best_model*.pth.tar'))
|
||||||
|
for model_name in model_names:
|
||||||
|
if os.path.basename(model_name) == best_model_name:
|
||||||
|
continue
|
||||||
|
os.remove(model_name)
|
||||||
|
# create symlink to best model for convinience
|
||||||
|
link_name = 'best_model.pth.tar'
|
||||||
|
link_path = os.path.join(out_path, link_name)
|
||||||
|
if os.path.islink(link_path) or os.path.isfile(link_path):
|
||||||
|
os.remove(link_path)
|
||||||
|
os.symlink(best_model_name, os.path.join(out_path, link_name))
|
||||||
|
best_loss = current_loss
|
||||||
return best_loss
|
return best_loss
|
||||||
|
|
|
@ -106,6 +106,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
"apex_amp_level": null,
|
"apex_amp_level": null,
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
|
||||||
"mixed_precision": false,
|
"mixed_precision": false,
|
||||||
|
|
||||||
|
|
|
@ -122,6 +122,8 @@
|
||||||
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
|
||||||
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -131,6 +131,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -101,6 +101,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
|
@ -97,6 +97,8 @@
|
||||||
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
||||||
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
|
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
|
||||||
|
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
|
|
||||||
// DATA LOADING
|
// DATA LOADING
|
||||||
|
|
Loading…
Reference in New Issue