mirror of https://github.com/coqui-ai/TTS.git
train_encoder refactoring for coqpit
This commit is contained in:
parent
9ee70af9bb
commit
3fde2001b1
|
@ -13,7 +13,7 @@ from torch.utils.data import DataLoader
|
|||
from TTS.speaker_encoder.dataset import MyDataset
|
||||
from TTS.speaker_encoder.losses import AngleProtoLoss, GE2ELoss
|
||||
from TTS.speaker_encoder.model import SpeakerEncoder
|
||||
from TTS.speaker_encoder.utils.generic_utils import check_config_speaker_encoder, save_best_model
|
||||
from TTS.speaker_encoder.utils.io import save_best_model, save_checkpoint
|
||||
from TTS.speaker_encoder.utils.visual import plot_embeddings
|
||||
from TTS.tts.datasets.preprocess import load_meta_data
|
||||
from TTS.utils.audio import AudioProcessor
|
||||
|
@ -28,6 +28,8 @@ from TTS.utils.io import copy_model_files, load_config
|
|||
from TTS.utils.radam import RAdam
|
||||
from TTS.utils.tensorboard_logger import TensorboardLogger
|
||||
from TTS.utils.training import NoamLR, check_update
|
||||
from TTS.utils.arguments import init_training
|
||||
|
||||
|
||||
torch.backends.cudnn.enabled = True
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
@ -105,8 +107,9 @@ def train(model, criterion, optimizer, scheduler, ap, global_step):
|
|||
|
||||
# Averaged Loss and Averaged Loader Time
|
||||
avg_loss = 0.01 * loss.item() + 0.99 * avg_loss if avg_loss != 0 else loss.item()
|
||||
num_loader_workers = c.num_loader_workers if c.num_loader_workers > 0 else 1
|
||||
avg_loader_time = (
|
||||
1 / c.num_loader_workers * loader_time + (c.num_loader_workers - 1) / c.num_loader_workers * avg_loader_time
|
||||
1 / num_loader_workers * loader_time + (num_loader_workers - 1) / num_loader_workers * avg_loader_time
|
||||
if avg_loader_time != 0
|
||||
else loader_time
|
||||
)
|
||||
|
@ -139,8 +142,13 @@ def train(model, criterion, optimizer, scheduler, ap, global_step):
|
|||
|
||||
# save best model
|
||||
best_loss = save_best_model(model, optimizer, avg_loss, best_loss, OUT_PATH, global_step)
|
||||
|
||||
end_time = time.time()
|
||||
|
||||
# checkpoint and check stop train cond.
|
||||
if global_step >= c.max_train_step or global_step % c.save_step == 0:
|
||||
save_checkpoint(model, optimizer, avg_loss, OUT_PATH, global_step)
|
||||
if global_step >= c.max_train_step:
|
||||
break
|
||||
return avg_loss, global_step
|
||||
|
||||
|
||||
|
@ -149,12 +157,12 @@ def main(args): # pylint: disable=redefined-outer-name
|
|||
global meta_data_train
|
||||
global meta_data_eval
|
||||
|
||||
ap = AudioProcessor(**c.audio)
|
||||
ap = AudioProcessor(**c.audio.to_dict())
|
||||
model = SpeakerEncoder(
|
||||
input_dim=c.model["input_dim"],
|
||||
proj_dim=c.model["proj_dim"],
|
||||
lstm_dim=c.model["lstm_dim"],
|
||||
num_lstm_layers=c.model["num_lstm_layers"],
|
||||
input_dim=c.model_params["input_dim"],
|
||||
proj_dim=c.model_params["proj_dim"],
|
||||
lstm_dim=c.model_params["lstm_dim"],
|
||||
num_lstm_layers=c.model_params["num_lstm_layers"],
|
||||
)
|
||||
optimizer = RAdam(model.parameters(), lr=c.lr)
|
||||
|
||||
|
@ -168,11 +176,6 @@ def main(args): # pylint: disable=redefined-outer-name
|
|||
if args.restore_path:
|
||||
checkpoint = torch.load(args.restore_path)
|
||||
try:
|
||||
# TODO: fix optimizer init, model.cuda() needs to be called before
|
||||
# optimizer restore
|
||||
# optimizer.load_state_dict(checkpoint['optimizer'])
|
||||
if c.reinit_layers:
|
||||
raise RuntimeError
|
||||
model.load_state_dict(checkpoint["model"])
|
||||
except KeyError:
|
||||
print(" > Partial model initialization.")
|
||||
|
@ -207,47 +210,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--restore_path", type=str, help="Path to model outputs (checkpoint, tensorboard etc.).", default=0
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config_path",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to config file for training.",
|
||||
)
|
||||
parser.add_argument("--debug", type=bool, default=True, help="Do not verify commit integrity to run training.")
|
||||
parser.add_argument("--data_path", type=str, default="", help="Defines the data path. It overwrites config.json.")
|
||||
parser.add_argument("--output_path", type=str, help="path for training outputs.", default="")
|
||||
parser.add_argument("--output_folder", type=str, default="", help="folder name for training outputs.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# setup output paths and read configs
|
||||
c = load_config(args.config_path)
|
||||
check_config_speaker_encoder(c)
|
||||
_ = os.path.dirname(os.path.realpath(__file__))
|
||||
if args.data_path != "":
|
||||
c.data_path = args.data_path
|
||||
|
||||
if args.output_path == "":
|
||||
OUT_PATH = os.path.join(_, c.output_path)
|
||||
else:
|
||||
OUT_PATH = args.output_path
|
||||
|
||||
if args.output_folder == "":
|
||||
OUT_PATH = create_experiment_folder(OUT_PATH, c.run_name, args.debug)
|
||||
else:
|
||||
OUT_PATH = os.path.join(OUT_PATH, args.output_folder)
|
||||
|
||||
new_fields = {}
|
||||
if args.restore_path:
|
||||
new_fields["restore_path"] = args.restore_path
|
||||
new_fields["github_branch"] = get_git_branch()
|
||||
copy_model_files(c, args.config_path, OUT_PATH, new_fields)
|
||||
|
||||
LOG_DIR = OUT_PATH
|
||||
tb_logger = TensorboardLogger(LOG_DIR, model_name="Speaker_Encoder")
|
||||
args, c, OUT_PATH, AUDIO_PATH, c_logger, tb_logger = init_training(sys.argv)
|
||||
|
||||
try:
|
||||
main(args)
|
||||
|
|
|
@ -9,7 +9,7 @@ from TTS.utils.generic_utils import find_module
|
|||
|
||||
def _search_configs(model_name):
|
||||
config_class = None
|
||||
paths = ["TTS.tts.configs", "TTS.vocoder.configs"]
|
||||
paths = ["TTS.tts.configs", "TTS.vocoder.configs", "TTS.speaker_encoder"]
|
||||
for path in paths:
|
||||
try:
|
||||
config_class = find_module(path, model_name + "_config")
|
||||
|
|
Loading…
Reference in New Issue