diff --git a/TTS/demos/xtts_ft_demo/utils/gpt_train.py b/TTS/demos/xtts_ft_demo/utils/gpt_train.py index 4d33d6fc..2c51e436 100644 --- a/TTS/demos/xtts_ft_demo/utils/gpt_train.py +++ b/TTS/demos/xtts_ft_demo/utils/gpt_train.py @@ -23,7 +23,7 @@ def train_gpt(language, num_epochs, batch_size, train_csv, eval_csv, output_path OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False START_WITH_EVAL = True # if True it will star with evaluation BATCH_SIZE = batch_size # set here the batch size - GRAD_ACUMM_STEPS = 1 # set here the grad accumulation steps + GRAD_ACUMM_STEPS = 4 # set here the grad accumulation steps # Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly. diff --git a/TTS/demos/xtts_ft_demo/xtts_demo.py b/TTS/demos/xtts_ft_demo/xtts_demo.py index 24a449ec..69d2dd60 100644 --- a/TTS/demos/xtts_ft_demo/xtts_demo.py +++ b/TTS/demos/xtts_ft_demo/xtts_demo.py @@ -189,7 +189,7 @@ with gr.Blocks() as demo: minimum=2, maximum=512, step=1, - value=16, + value=4, ) progress_train = gr.Label( label="Progress:"