From 70f2cb9c0ed7a7c6452a89e09d1bf055d818038f Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Fri, 24 Nov 2023 15:53:34 -0300 Subject: [PATCH] Update gradio demo --- TTS/demos/xtts_ft_demo/utils/gpt_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/demos/xtts_ft_demo/utils/gpt_train.py b/TTS/demos/xtts_ft_demo/utils/gpt_train.py index 2c51e436..4d33d6fc 100644 --- a/TTS/demos/xtts_ft_demo/utils/gpt_train.py +++ b/TTS/demos/xtts_ft_demo/utils/gpt_train.py @@ -23,7 +23,7 @@ def train_gpt(language, num_epochs, batch_size, train_csv, eval_csv, output_path OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False START_WITH_EVAL = True # if True it will star with evaluation BATCH_SIZE = batch_size # set here the batch size - GRAD_ACUMM_STEPS = 4 # set here the grad accumulation steps + GRAD_ACUMM_STEPS = 1 # set here the grad accumulation steps # Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly.