Merge branch 'pr/gerazov/650-2' into dev

This commit is contained in:
Eren Gölge 2021-02-17 13:40:45 +00:00
commit d0454461de
26 changed files with 728 additions and 600 deletions

View File

@ -500,6 +500,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion = GlowTTSLoss()
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
checkpoint = torch.load(args.restore_path, map_location='cpu')
try:
# TODO: fix optimizer init, model.cuda() needs to be called before
@ -517,7 +518,7 @@ def main(args): # pylint: disable=redefined-outer-name
for group in optimizer.param_groups:
group['initial_lr'] = c.lr
print(" > Model restored from step %d" % checkpoint['step'],
print(f" > Model restored from step {checkpoint['step']:d}",
flush=True)
args.restore_step = checkpoint['step']
else:
@ -541,8 +542,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals():
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define dataloaders
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
@ -552,7 +562,8 @@ def main(args): # pylint: disable=redefined-outer-name
model = data_depended_init(train_loader, model)
for epoch in range(0, c.epochs):
c_logger.print_epoch_start(epoch, c.epochs)
train_avg_loss_dict, global_step = train(train_loader, model, criterion, optimizer,
train_avg_loss_dict, global_step = train(train_loader, model,
criterion, optimizer,
scheduler, ap, global_step,
epoch)
eval_avg_loss_dict = evaluate(eval_loader, model, criterion, ap,
@ -561,8 +572,9 @@ def main(args): # pylint: disable=redefined-outer-name
target_loss = train_avg_loss_dict['avg_loss']
if c.run_eval:
target_loss = eval_avg_loss_dict['avg_loss']
best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r,
OUT_PATH, model_characters)
best_loss = save_best_model(target_loss, best_loss, model, optimizer,
global_step, epoch, c.r, OUT_PATH, model_characters,
keep_all_best=keep_all_best, keep_after=keep_after)
if __name__ == '__main__':

View File

@ -464,6 +464,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion = SpeedySpeechLoss(c)
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)} ...")
checkpoint = torch.load(args.restore_path, map_location='cpu')
try:
# TODO: fix optimizer init, model.cuda() needs to be called before
@ -505,8 +506,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals():
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define dataloaders
train_loader = setup_loader(ap, 1, is_val=False, verbose=True)
@ -525,8 +535,8 @@ def main(args): # pylint: disable=redefined-outer-name
if c.run_eval:
target_loss = eval_avg_loss_dict['avg_loss']
best_loss = save_best_model(target_loss, best_loss, model, optimizer,
global_step, epoch, c.r,
OUT_PATH, model_characters)
global_step, epoch, c.r, OUT_PATH, model_characters,
keep_all_best=keep_all_best, keep_after=keep_after)
if __name__ == '__main__':

View File

@ -538,12 +538,13 @@ def main(args): # pylint: disable=redefined-outer-name
# setup criterion
criterion = TacotronLoss(c, stopnet_pos_weight=c.stopnet_pos_weight, ga_sigma=0.4)
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu')
try:
print(" > Restoring Model.")
print(" > Restoring Model...")
model.load_state_dict(checkpoint['model'])
# optimizer restore
print(" > Restoring Optimizer.")
print(" > Restoring Optimizer...")
optimizer.load_state_dict(checkpoint['optimizer'])
if "scaler" in checkpoint and c.mixed_precision:
print(" > Restoring AMP Scaler...")
@ -551,7 +552,7 @@ def main(args): # pylint: disable=redefined-outer-name
if c.reinit_layers:
raise RuntimeError
except (KeyError, RuntimeError):
print(" > Partial model initialization.")
print(" > Partial model initialization...")
model_dict = model.state_dict()
model_dict = set_init_dict(model_dict, checkpoint['model'], c)
# torch.save(model_dict, os.path.join(OUT_PATH, 'state_dict.pt'))
@ -585,8 +586,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model)
print("\n > Model has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals():
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
# define data loaders
train_loader = setup_loader(ap,
@ -639,6 +649,8 @@ def main(args): # pylint: disable=redefined-outer-name
c.r,
OUT_PATH,
model_characters,
keep_all_best=keep_all_best,
keep_after=keep_after,
scaler=scaler.state_dict() if c.mixed_precision else None
)

View File

@ -485,6 +485,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion_disc = DiscriminatorLoss(c)
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu')
try:
print(" > Restoring Generator Model...")
@ -523,7 +524,7 @@ def main(args): # pylint: disable=redefined-outer-name
for group in optimizer_disc.param_groups:
group['lr'] = c.lr_disc
print(" > Model restored from step %d" % checkpoint['step'],
print(f" > Model restored from step {checkpoint['step']:d}",
flush=True)
args.restore_step = checkpoint['step']
else:
@ -545,8 +546,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model_disc)
print(" > Discriminator has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals():
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with best loss of {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step
for epoch in range(0, c.epochs):
@ -571,7 +581,10 @@ def main(args): # pylint: disable=redefined-outer-name
global_step,
epoch,
OUT_PATH,
model_losses=eval_avg_loss_dict)
keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict,
)
if __name__ == '__main__':

View File

@ -354,6 +354,7 @@ def main(args): # pylint: disable=redefined-outer-name
criterion.cuda()
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location='cpu')
try:
print(" > Restoring Model...")
@ -393,8 +394,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_params = count_parameters(model)
print(" > WaveGrad has {} parameters".format(num_params), flush=True)
if 'best_loss' not in locals():
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step
for epoch in range(0, c.epochs):
@ -416,6 +426,8 @@ def main(args): # pylint: disable=redefined-outer-name
global_step,
epoch,
OUT_PATH,
keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict,
scaler=scaler.state_dict() if c.mixed_precision else None
)

View File

@ -383,6 +383,7 @@ def main(args): # pylint: disable=redefined-outer-name
# restore any checkpoint
if args.restore_path:
print(f" > Restoring from {os.path.basename(args.restore_path)}...")
checkpoint = torch.load(args.restore_path, map_location="cpu")
try:
print(" > Restoring Model...")
@ -416,8 +417,17 @@ def main(args): # pylint: disable=redefined-outer-name
num_parameters = count_parameters(model_wavernn)
print(" > Model has {} parameters".format(num_parameters), flush=True)
if "best_loss" not in locals():
best_loss = float("inf")
if args.restore_step == 0 or not args.best_path:
best_loss = float('inf')
print(" > Starting with inf best loss.")
else:
print(" > Restoring best loss from "
f"{os.path.basename(args.best_path)} ...")
best_loss = torch.load(args.best_path,
map_location='cpu')['model_loss']
print(f" > Starting with loaded last best loss {best_loss}.")
keep_all_best = c.get('keep_all_best', False)
keep_after = c.get('keep_after', 10000) # void if keep_all_best False
global_step = args.restore_step
for epoch in range(0, c.epochs):
@ -440,6 +450,8 @@ def main(args): # pylint: disable=redefined-outer-name
global_step,
epoch,
OUT_PATH,
keep_all_best=keep_all_best,
keep_after=keep_after,
model_losses=eval_avg_loss_dict,
scaler=scaler.state_dict() if c.mixed_precision else None
)

View File

@ -121,6 +121,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -93,6 +93,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"apex_amp_level": null,

View File

@ -105,6 +105,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -121,6 +121,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -109,6 +109,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
"mixed_precision": false,

View File

@ -18,16 +18,11 @@ from TTS.utils.tensorboard_logger import TensorboardLogger
def parse_arguments(argv):
"""Parse command line arguments of training scripts.
Parameters
----------
argv : list
This is a list of input arguments as given by sys.argv
Returns
-------
argparse.Namespace
Parsed arguments.
Args:
argv (list): This is a list of input arguments as given by sys.argv
Returns:
argparse.Namespace: Parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
@ -42,6 +37,12 @@ def parse_arguments(argv):
type=str,
help="Model file to be restored. Use to finetune a model.",
default="")
parser.add_argument(
"--best_path",
type=str,
help=("Best model file to be used for extracting best loss."
"If not specified, the latest best model in continue path is used"),
default="")
parser.add_argument(
"--config_path",
type=str,
@ -67,43 +68,51 @@ def parse_arguments(argv):
def get_last_checkpoint(path):
"""Get latest checkpoint from a list of filenames.
"""Get latest checkpoint or/and best model in path.
It is based on globbing for `*.pth.tar` and the RegEx
`checkpoint_([0-9]+)`.
`(checkpoint|best_model)_([0-9]+)`.
Parameters
----------
path : list
Path to files to be compared.
Args:
path (list): Path to files to be compared.
Raises
------
ValueError
If no checkpoint files are found.
Returns
-------
last_checkpoint : str
Last checkpoint filename.
Raises:
ValueError: If no checkpoint or best_model files are found.
Returns:
last_checkpoint (str): Last checkpoint filename.
"""
last_checkpoint_num = 0
last_checkpoint = None
filenames = glob.glob(
os.path.join(path, "/*.pth.tar"))
for filename in filenames:
file_names = glob.glob(os.path.join(path, "*.pth.tar"))
last_models = {}
last_model_nums = {}
for key in ['checkpoint', 'best_model']:
last_model_num = 0
last_model = None
for file_name in file_names:
try:
checkpoint_num = int(
re.search(r"checkpoint_([0-9]+)", filename).groups()[0])
if checkpoint_num > last_checkpoint_num:
last_checkpoint_num = checkpoint_num
last_checkpoint = filename
model_num = int(re.search(
f"{key}_([0-9]+)", file_name).groups()[0])
if model_num > last_model_num:
last_model_num = model_num
last_model = file_name
except AttributeError: # if there's no match in the filename
pass
if last_checkpoint is None:
raise ValueError(f"No checkpoints in {path}!")
return last_checkpoint
continue
last_models[key] = last_model
last_model_nums[key] = last_model_num
# check what models were found
if not last_models:
raise ValueError(f"No models found in continue path {path}!")
elif 'checkpoint' not in last_models: # no checkpoint just best model
last_models['checkpoint'] = last_models['best_model']
elif 'best_model' not in last_models: # no best model
# this shouldn't happen, but let's handle it just in case
last_models['best_model'] = None
# finally check if last best model is more recent than checkpoint
elif last_model_nums['best_model'] > last_model_nums['checkpoint']:
last_models['checkpoint'] = last_models['best_model']
return last_models['checkpoint'], last_models['best_model']
def process_args(args, model_type):
@ -111,8 +120,8 @@ def process_args(args, model_type):
Args:
args (argparse.Namespace or dict like): Parsed input arguments.
model_type (str): Model type used to check config parameters and setup the TensorBoard
logger. One of:
model_type (str): Model type used to check config parameters and setup
the TensorBoard logger. One of:
- tacotron
- glow_tts
- speedy_speech
@ -121,26 +130,23 @@ def process_args(args, model_type):
- wavernn
Raises:
ValueError
If `model_type` is not one of implemented choices.
ValueError: If `model_type` is not one of implemented choices.
Returns:
c (TTS.utils.io.AttrDict): Config paramaters.
out_path (str): Path to save models and logging.
audio_path (str): Path to save generated test audios.
c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does logging to the console.
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does the TensorBoard loggind.
c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does
logging to the console.
tb_logger (TTS.utils.tensorboard.TensorboardLogger): Class that does
the TensorBoard loggind.
"""
if args.continue_path != "":
if args.continue_path:
args.output_path = args.continue_path
args.config_path = os.path.join(args.continue_path, "config.json")
list_of_files = glob.glob(
os.path.join(args.continue_path, "*.pth.tar")
) # * means all if need specific format then *.csv
args.restore_path = max(list_of_files, key=os.path.getctime)
# checkpoint number based continuing
# args.restore_path = get_last_checkpoint(args.continue_path)
print(f" > Training continues for {args.restore_path}")
args.restore_path, best_model = get_last_checkpoint(args.continue_path)
if not args.best_path:
args.best_path = best_model
# setup output paths and read configs
c = load_config(args.config_path)
@ -154,8 +160,7 @@ def process_args(args, model_type):
if model_class == "TTS":
check_config_tts(c)
elif model_class == "VOCODER":
print("Vocoder config checker not implemented, "
"skipping ...")
print("Vocoder config checker not implemented, skipping ...")
else:
raise ValueError(f"model type {model_type} not recognized!")
@ -165,7 +170,7 @@ def process_args(args, model_type):
print(" > Mixed precision mode is ON")
out_path = args.continue_path
if args.continue_path == "":
if not out_path:
out_path = create_experiment_folder(c.output_path, c.run_name,
args.debug)

View File

@ -138,6 +138,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -128,6 +128,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -141,6 +141,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -130,6 +130,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -124,6 +124,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -103,6 +103,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -89,6 +89,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": false, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -1,4 +1,5 @@
import os
import glob
import torch
import datetime
import pickle as pickle_tts
@ -61,12 +62,13 @@ def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
def save_best_model(current_loss, best_loss, model, optimizer, scheduler,
model_disc, optimizer_disc, scheduler_disc, current_step,
epoch, output_folder, **kwargs):
if target_loss < best_loss:
file_name = 'best_model.pth.tar'
checkpoint_path = os.path.join(output_folder, file_name)
epoch, out_path, keep_all_best=False, keep_after=10000,
**kwargs):
if current_loss < best_loss:
best_model_name = f'best_model_{current_step}.pth.tar'
checkpoint_path = os.path.join(out_path, best_model_name)
print(" > BEST MODEL : {}".format(checkpoint_path))
save_model(model,
optimizer,
@ -77,7 +79,21 @@ def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
current_step,
epoch,
checkpoint_path,
model_loss=target_loss,
model_loss=current_loss,
**kwargs)
best_loss = target_loss
# only delete previous if current is saved successfully
if not keep_all_best or (current_step < keep_after):
model_names = glob.glob(
os.path.join(out_path, 'best_model*.pth.tar'))
for model_name in model_names:
if os.path.basename(model_name) == best_model_name:
continue
os.remove(model_name)
# create symlink to best model for convinience
link_name = 'best_model.pth.tar'
link_path = os.path.join(out_path, link_name)
if os.path.islink(link_path) or os.path.isfile(link_path):
os.remove(link_path)
os.symlink(best_model_name, os.path.join(out_path, link_name))
best_loss = current_loss
return best_loss

View File

@ -106,6 +106,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
"apex_amp_level": null,

View File

@ -111,6 +111,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n
"mixed_precision": false,

View File

@ -122,6 +122,8 @@
"print_eval": false, // If True, it prints intermediate loss values in evalulation.
"save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -131,6 +131,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -101,6 +101,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING

View File

@ -97,6 +97,8 @@
"print_eval": false, // If True, it prints loss values for each step in eval run.
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
"checkpoint": true, // If true, it saves checkpoints per "save_step"
"keep_all_best": true, // If true, keeps all best_models after keep_after steps
"keep_after": 10000, // Global step after which to keep best models if keep_all_best is true
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
// DATA LOADING