number of workers as config parameter

This commit is contained in:
thllwg 2020-05-15 10:19:52 +02:00
parent 7a9247f91f
commit 65b9c7d3d6
2 changed files with 2 additions and 1 deletions

View File

@ -34,6 +34,7 @@
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints. "save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
"print_step": 1, // Number of steps to log traning on console. "print_step": 1, // Number of steps to log traning on console.
"output_path": "/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/", // DATASET-RELATED: output path for all training outputs. "output_path": "/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/", // DATASET-RELATED: output path for all training outputs.
"num_loader_workers": 0, // number of training data loader processes. Don't set it too big. 4-8 are good values.
"model": { "model": {
"input_dim": 40, "input_dim": 40,
"proj_dim": 128, "proj_dim": 128,

View File

@ -44,7 +44,7 @@ def setup_loader(ap, is_val=False, verbose=False):
loader = DataLoader(dataset, loader = DataLoader(dataset,
batch_size=c.num_speakers_in_batch, batch_size=c.num_speakers_in_batch,
shuffle=False, shuffle=False,
num_workers=0, num_workers=c.num_loader_workers,
collate_fn=dataset.collate_fn) collate_fn=dataset.collate_fn)
return loader return loader