diff --git a/TTS/bin/train_tts.py b/TTS/bin/train_tts.py index 0c01a79e..f5bc364f 100644 --- a/TTS/bin/train_tts.py +++ b/TTS/bin/train_tts.py @@ -637,7 +637,7 @@ if __name__ == '__main__': check_config(c) _ = os.path.dirname(os.path.realpath(__file__)) - if c.apex_amp_level: + if c.apex_amp_level is 'O1': print(" > apex AMP level: ", c.apex_amp_level) OUT_PATH = args.continue_path diff --git a/TTS/tts/configs/config.json b/TTS/tts/configs/config.json index d98802f7..6a60fe81 100644 --- a/TTS/tts/configs/config.json +++ b/TTS/tts/configs/config.json @@ -67,7 +67,7 @@ "gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed. "loss_masking": true, // enable / disable loss masking against the sequence padding. "ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled. - "apex_amp_level": "", // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, use "" (empty string) to deactivate + "apex_amp_level": null, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate. // VALIDATION "run_eval": true,