mirror of https://github.com/coqui-ai/TTS.git
Fix unit tests
This commit is contained in:
parent
a3ecaf3bdd
commit
d1ab3298ba
|
@ -1,5 +1,6 @@
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
import torch
|
||||||
from argparse import RawTextHelpFormatter
|
from argparse import RawTextHelpFormatter
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
@ -28,12 +29,13 @@ parser.add_argument(
|
||||||
type=str,
|
type=str,
|
||||||
help="Path to dataset config file.",
|
help="Path to dataset config file.",
|
||||||
)
|
)
|
||||||
parser.add_argument("output_path", type=str, help="path for output speakers.json and/or speakers.npy.")
|
parser.add_argument("output_path", type=str, help="path for output .json file.")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--old_file", type=str, help="Previous speakers.json file, only compute for new audios.", default=None
|
"--old_file", type=str, help="Previous .json file, only compute for new audios.", default=None
|
||||||
)
|
)
|
||||||
parser.add_argument("--use_cuda", type=bool, help="flag to set cuda. Default False", default=False)
|
parser.add_argument("--use_cuda", type=bool, help="flag to set cuda. Default False", default=False)
|
||||||
parser.add_argument("--no_eval", type=bool, help="Do not compute eval?. Default False", default=False)
|
parser.add_argument("--no_eval", type=bool, help="Do not compute eval?. Default False", default=False)
|
||||||
|
parser.add_argument("--use_predicted_label", type=bool, help="If True and predicted label is available with will use it.", default=False)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
@ -56,10 +58,10 @@ encoder_manager = SpeakerManager(
|
||||||
class_name_key = encoder_manager.encoder_config.class_name_key
|
class_name_key = encoder_manager.encoder_config.class_name_key
|
||||||
|
|
||||||
# compute speaker embeddings
|
# compute speaker embeddings
|
||||||
speaker_mapping = {}
|
class_mapping = {}
|
||||||
for idx, wav_file in enumerate(tqdm(wav_files)):
|
for idx, wav_file in enumerate(tqdm(wav_files)):
|
||||||
if isinstance(wav_file, dict):
|
if isinstance(wav_file, dict):
|
||||||
class_name = wav_file[class_name_key]
|
class_name = wav_file[class_name_key] if class_name_key in wav_file else None
|
||||||
wav_file = wav_file["audio_file"]
|
wav_file = wav_file["audio_file"]
|
||||||
else:
|
else:
|
||||||
class_name = None
|
class_name = None
|
||||||
|
@ -72,20 +74,37 @@ for idx, wav_file in enumerate(tqdm(wav_files)):
|
||||||
# extract the embedding
|
# extract the embedding
|
||||||
embedd = encoder_manager.compute_embedding_from_clip(wav_file)
|
embedd = encoder_manager.compute_embedding_from_clip(wav_file)
|
||||||
|
|
||||||
# create speaker_mapping if target dataset is defined
|
if args.use_predicted_label:
|
||||||
speaker_mapping[wav_file_name] = {}
|
map_classid_to_classname = getattr(encoder_manager.encoder_config, 'map_classid_to_classname', None)
|
||||||
speaker_mapping[wav_file_name]["name"] = class_name
|
if encoder_manager.encoder_criterion is not None and map_classid_to_classname is not None:
|
||||||
speaker_mapping[wav_file_name]["embedding"] = embedd
|
embedding = torch.FloatTensor(embedd).unsqueeze(0)
|
||||||
|
if encoder_manager.use_cuda:
|
||||||
|
embedding = embedding.cuda()
|
||||||
|
|
||||||
if speaker_mapping:
|
class_id = encoder_manager.encoder_criterion.softmax.inference(embedding).item()
|
||||||
# save speaker_mapping if target dataset is defined
|
class_name = map_classid_to_classname[str(class_id)]
|
||||||
|
else:
|
||||||
|
raise RuntimeError(
|
||||||
|
" [!] use_predicted_label is enable and predicted_labels is not available !!"
|
||||||
|
)
|
||||||
|
|
||||||
|
# create class_mapping if target dataset is defined
|
||||||
|
class_mapping[wav_file_name] = {}
|
||||||
|
class_mapping[wav_file_name]["name"] = class_name
|
||||||
|
class_mapping[wav_file_name]["embedding"] = embedd
|
||||||
|
|
||||||
|
if class_mapping:
|
||||||
|
# save class_mapping if target dataset is defined
|
||||||
if ".json" not in args.output_path:
|
if ".json" not in args.output_path:
|
||||||
|
if class_name_key == "speaker_name":
|
||||||
mapping_file_path = os.path.join(args.output_path, "speakers.json")
|
mapping_file_path = os.path.join(args.output_path, "speakers.json")
|
||||||
|
else:
|
||||||
|
mapping_file_path = os.path.join(args.output_path, "emotions.json")
|
||||||
else:
|
else:
|
||||||
mapping_file_path = args.output_path
|
mapping_file_path = args.output_path
|
||||||
|
|
||||||
os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
|
os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
|
||||||
|
|
||||||
# pylint: disable=W0212
|
# pylint: disable=W0212
|
||||||
encoder_manager._save_json(mapping_file_path, speaker_mapping)
|
encoder_manager._save_json(mapping_file_path, class_mapping)
|
||||||
print("Speaker embeddings saved at:", mapping_file_path)
|
print("Embeddings saved at:", mapping_file_path)
|
||||||
|
|
|
@ -237,6 +237,7 @@ If you don't specify any models, then it uses LJSpeech based English model.
|
||||||
model_path = None
|
model_path = None
|
||||||
config_path = None
|
config_path = None
|
||||||
speakers_file_path = None
|
speakers_file_path = None
|
||||||
|
emotions_file_path = None
|
||||||
language_ids_file_path = None
|
language_ids_file_path = None
|
||||||
vocoder_path = None
|
vocoder_path = None
|
||||||
vocoder_config_path = None
|
vocoder_config_path = None
|
||||||
|
|
|
@ -265,9 +265,9 @@ class Synthesizer(object):
|
||||||
|
|
||||||
# handle emotion
|
# handle emotion
|
||||||
emotion_embedding, emotion_id = None, None
|
emotion_embedding, emotion_id = None, None
|
||||||
if self.tts_emotions_file or hasattr(self.tts_model.emotion_manager, "ids"):
|
if self.tts_emotions_file or (getattr(self.tts_model, "emotion_manager", None) and getattr(self.tts_model.emotion_manager, "ids", None)):
|
||||||
if emotion_name and isinstance(emotion_name, str):
|
if emotion_name and isinstance(emotion_name, str):
|
||||||
if getattr(self.tts_config, "use_external_emotions_embeddings", False) or getattr(self.tts_config.model_args, "use_external_emotions_embeddings", False):
|
if getattr(self.tts_config, "use_external_emotions_embeddings", False) or (getattr(self.tts_config, "model_args", None) and getattr(self.tts_config.model_args, "use_external_emotions_embeddings", False)):
|
||||||
# get the average speaker embedding from the saved embeddings.
|
# get the average speaker embedding from the saved embeddings.
|
||||||
emotion_embedding = self.tts_model.emotion_manager.get_mean_embedding(emotion_name, num_samples=None, randomize=False)
|
emotion_embedding = self.tts_model.emotion_manager.get_mean_embedding(emotion_name, num_samples=None, randomize=False)
|
||||||
emotion_embedding = np.array(emotion_embedding)[None, :] # [1 x embedding_dim]
|
emotion_embedding = np.array(emotion_embedding)[None, :] # [1 x embedding_dim]
|
||||||
|
|
Loading…
Reference in New Issue