mirror of https://github.com/coqui-ai/TTS.git
save schedulers with checkpoints
This commit is contained in:
parent
dcfd626784
commit
b445bcb962
|
@ -2,18 +2,27 @@ import os
|
||||||
import torch
|
import torch
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
def save_model(model, optimizer, model_disc, optimizer_disc, current_step,
|
|
||||||
epoch, output_path, **kwargs):
|
def save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
|
||||||
|
scheduler_disc, current_step, epoch, output_path, **kwargs):
|
||||||
model_state = model.state_dict()
|
model_state = model.state_dict()
|
||||||
model_disc_state = model_disc.state_dict()
|
model_disc_state = model_disc.state_dict()\
|
||||||
optimizer_state = optimizer.state_dict() if optimizer is not None else None
|
if model_disc is not None else None
|
||||||
optimizer_disc_state = optimizer_disc.state_dict(
|
optimizer_state = optimizer.state_dict()\
|
||||||
) if optimizer_disc is not None else None
|
if optimizer is not None else None
|
||||||
|
optimizer_disc_state = optimizer_disc.state_dict()\
|
||||||
|
if optimizer_disc is not None else None
|
||||||
|
scheduler_state = scheduler.state_dict()\
|
||||||
|
if scheduler is not None else None
|
||||||
|
scheduler_disc_state = scheduler_disc.state_dict()\
|
||||||
|
if scheduler_disc is not None else None
|
||||||
state = {
|
state = {
|
||||||
'model': model_state,
|
'model': model_state,
|
||||||
'optimizer': optimizer_state,
|
'optimizer': optimizer_state,
|
||||||
|
'scheduler': scheduler_state,
|
||||||
'model_disc': model_disc_state,
|
'model_disc': model_disc_state,
|
||||||
'optimizer_disc': optimizer_disc_state,
|
'optimizer_disc': optimizer_disc_state,
|
||||||
|
'scheduler_disc': scheduler_disc_state,
|
||||||
'step': current_step,
|
'step': current_step,
|
||||||
'epoch': epoch,
|
'epoch': epoch,
|
||||||
'date': datetime.date.today().strftime("%B %d, %Y"),
|
'date': datetime.date.today().strftime("%B %d, %Y"),
|
||||||
|
@ -22,26 +31,29 @@ def save_model(model, optimizer, model_disc, optimizer_disc, current_step,
|
||||||
torch.save(state, output_path)
|
torch.save(state, output_path)
|
||||||
|
|
||||||
|
|
||||||
def save_checkpoint(model, optimizer, model_disc, optimizer_disc, current_step,
|
def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc,
|
||||||
epoch, output_folder, **kwargs):
|
scheduler_disc, current_step, epoch, output_folder,
|
||||||
|
**kwargs):
|
||||||
file_name = 'checkpoint_{}.pth.tar'.format(current_step)
|
file_name = 'checkpoint_{}.pth.tar'.format(current_step)
|
||||||
checkpoint_path = os.path.join(output_folder, file_name)
|
checkpoint_path = os.path.join(output_folder, file_name)
|
||||||
print(" > CHECKPOINT : {}".format(checkpoint_path))
|
print(" > CHECKPOINT : {}".format(checkpoint_path))
|
||||||
save_model(model, optimizer, model_disc, optimizer_disc, current_step,
|
save_model(model, optimizer, scheduler, model_disc, optimizer_disc,
|
||||||
epoch, checkpoint_path, **kwargs)
|
scheduler_disc, current_step, epoch, checkpoint_path, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def save_best_model(target_loss, best_loss, model, optimizer, model_disc,
|
def save_best_model(target_loss, best_loss, model, optimizer, scheduler,
|
||||||
optimizer_disc, current_step, epoch, output_folder,
|
model_disc, optimizer_disc, scheduler_disc, current_step,
|
||||||
**kwargs):
|
epoch, output_folder, **kwargs):
|
||||||
if target_loss < best_loss:
|
if target_loss < best_loss:
|
||||||
file_name = 'best_model.pth.tar'
|
file_name = 'best_model.pth.tar'
|
||||||
checkpoint_path = os.path.join(output_folder, file_name)
|
checkpoint_path = os.path.join(output_folder, file_name)
|
||||||
print(" > BEST MODEL : {}".format(checkpoint_path))
|
print(" > BEST MODEL : {}".format(checkpoint_path))
|
||||||
save_model(model,
|
save_model(model,
|
||||||
optimizer,
|
optimizer,
|
||||||
|
scheduler,
|
||||||
model_disc,
|
model_disc,
|
||||||
optimizer_disc,
|
optimizer_disc,
|
||||||
|
scheduler_disc,
|
||||||
current_step,
|
current_step,
|
||||||
epoch,
|
epoch,
|
||||||
checkpoint_path,
|
checkpoint_path,
|
||||||
|
|
Loading…
Reference in New Issue