save schedulers with checkpoints

This commit is contained in:
erogol 2020-06-09 23:04:16 +02:00
parent dcfd626784
commit b445bcb962
1 changed files with 25 additions and 13 deletions

View File

@ -2,18 +2,27 @@ import os
import torch
import datetime
def save_model(model, optimizer, model_disc, optimizer_disc, current_step,
epoch, output_path, **kwargs):
def save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, output_path, **kwargs):
model_state = model.state_dict()
model_disc_state = model_disc.state_dict()
optimizer_state = optimizer.state_dict() if optimizer is not None else None
optimizer_disc_state = optimizer_disc.state_dict(
) if optimizer_disc is not None else None
model_disc_state = model_disc.state_dict()\
if model_disc is not None else None
optimizer_state = optimizer.state_dict()\
if optimizer is not None else None
optimizer_disc_state = optimizer_disc.state_dict()\
if optimizer_disc is not None else None
scheduler_state = scheduler.state_dict()\
if scheduler is not None else None
scheduler_disc_state = scheduler_disc.state_dict()\
if scheduler_disc is not None else None
state = {
'model': model_state,
'optimizer': optimizer_state,
'scheduler': scheduler_state,
'model_disc': model_disc_state,
'optimizer_disc': optimizer_disc_state,
'scheduler_disc': scheduler_disc_state,
'step': current_step,
'epoch': epoch,
'date': datetime.date.today().strftime("%B %d, %Y"),
@ -22,26 +31,29 @@ def save_model(model, optimizer, model_disc, optimizer_disc, current_step,
torch.save(state, output_path)
def save_checkpoint(model, optimizer, model_disc, optimizer_disc, current_step,
epoch, output_folder, **kwargs):
def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, output_folder,
**kwargs):
file_name = 'checkpoint_{}.pth.tar'.format(current_step)
checkpoint_path = os.path.join(output_folder, file_name)
print(" > CHECKPOINT : {}".format(checkpoint_path))
save_model(model, optimizer, model_disc, optimizer_disc, current_step,
epoch, checkpoint_path, **kwargs)
save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
def save_best_model(target_loss, best_loss, model, optimizer, model_disc,
optimizer_disc, current_step, epoch, output_folder,
**kwargs):
def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
model_disc, optimizer_disc, scheduler_disc, current_step,
epoch, output_folder, **kwargs):
if target_loss < best_loss:
file_name = 'best_model.pth.tar'
checkpoint_path = os.path.join(output_folder, file_name)
print(" > BEST MODEL : {}".format(checkpoint_path))
save_model(model,
optimizer,
scheduler,
model_disc,
optimizer_disc,
scheduler_disc,
current_step,
epoch,
checkpoint_path,