From 9af37e43a4bb61f6b148743ca780fe11e7864701 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Fri, 26 Jan 2018 02:53:01 -0800 Subject: [PATCH] Better config for training --- config.json | 4 +++- train.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/config.json b/config.json index 7eca8172..0ad25529 100644 --- a/config.json +++ b/config.json @@ -12,7 +12,7 @@ "text_cleaner": "english_cleaners", "epochs": 2000, - "lr": 0.01, + "lr": 0.001, "lr_patience": 2, "lr_decay": 0.5, "batch_size": 256, @@ -20,6 +20,8 @@ "power": 1.5, "r": 5, + "num_loader_workers": 32, + "save_step": 200, "data_path": "/data/shared/KeithIto/LJSpeech-1.0", "output_path": "result", diff --git a/train.py b/train.py index bb297233..470e0d16 100644 --- a/train.py +++ b/train.py @@ -106,7 +106,7 @@ def main(args): dataloader = DataLoader(dataset, batch_size=c.batch_size, shuffle=True, collate_fn=dataset.collate_fn, - drop_last=True, num_workers=32) + drop_last=True, num_workers=c.num_loader_workers) print("\n | > Epoch {}/{}".format(epoch, c.epochs)) progbar = Progbar(len(dataset) / c.batch_size)