diff --git a/TTS/bin/compute_embeddings.py b/TTS/bin/compute_embeddings.py index 4e242825..a3230401 100644 --- a/TTS/bin/compute_embeddings.py +++ b/TTS/bin/compute_embeddings.py @@ -1,6 +1,7 @@ import argparse import os from argparse import RawTextHelpFormatter +import torch from tqdm import tqdm @@ -22,11 +23,13 @@ parser.add_argument("config_path", type=str, help="Path to model config file.") parser.add_argument("config_dataset_path", type=str, help="Path to dataset config file.") parser.add_argument("--output_path", type=str, help="Path for output `pth` or `json` file.", default="speakers.pth") parser.add_argument("--old_file", type=str, help="Previous embedding file to only compute new audios.", default=None) -parser.add_argument("--use_cuda", type=bool, help="flag to set cuda. Default False", default=False) +parser.add_argument("--disable_cuda", type=bool, help="Flag to disable cuda.", default=False) parser.add_argument("--no_eval", type=bool, help="Do not compute eval?. Default False", default=False) args = parser.parse_args() +use_cuda = torch.cuda.is_available() and not args.disable_cuda + c_dataset = load_config(args.config_dataset_path) meta_data_train, meta_data_eval = load_tts_samples(c_dataset.datasets, eval_split=not args.no_eval) @@ -40,7 +43,7 @@ encoder_manager = SpeakerManager( encoder_model_path=args.model_path, encoder_config_path=args.config_path, d_vectors_file_path=args.old_file, - use_cuda=args.use_cuda, + use_cuda=use_cuda, ) class_name_key = encoder_manager.encoder_config.class_name_key