mirror of https://github.com/coqui-ai/TTS.git
Merge pull request #407 from thllwg/dev
Configurable number of workers for DataLoader
This commit is contained in:
commit
d023872f0d
|
@ -34,6 +34,7 @@
|
||||||
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
|
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
|
||||||
"print_step": 1, // Number of steps to log traning on console.
|
"print_step": 1, // Number of steps to log traning on console.
|
||||||
"output_path": "/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/", // DATASET-RELATED: output path for all training outputs.
|
"output_path": "/media/erogol/data_ssd/Models/libri_tts/speaker_encoder/", // DATASET-RELATED: output path for all training outputs.
|
||||||
|
"num_loader_workers": 0, // number of training data loader processes. Don't set it too big. 4-8 are good values.
|
||||||
"model": {
|
"model": {
|
||||||
"input_dim": 40,
|
"input_dim": 40,
|
||||||
"proj_dim": 128,
|
"proj_dim": 128,
|
||||||
|
|
|
@ -44,7 +44,7 @@ def setup_loader(ap, is_val=False, verbose=False):
|
||||||
loader = DataLoader(dataset,
|
loader = DataLoader(dataset,
|
||||||
batch_size=c.num_speakers_in_batch,
|
batch_size=c.num_speakers_in_batch,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
num_workers=0,
|
num_workers=c.num_loader_workers,
|
||||||
collate_fn=dataset.collate_fn)
|
collate_fn=dataset.collate_fn)
|
||||||
return loader
|
return loader
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue