mirror of https://github.com/coqui-ai/TTS.git
Use fsspec and torch for embedding file IO (#1581)
* Use fsspec and torch for embedding file * Fixup * Fix load and save files * Fix compute embedding script * Set use_cuda to true if available * Add dummy speakers.pth file * Make style * Change default speakers file extension Co-authored-by: WeberJulian <julian.weber@hotmail.fr>
This commit is contained in:
parent
b6bd74a9a9
commit
f70e82cd19
|
@ -117,6 +117,7 @@ venv.bak/
|
||||||
# pytorch models
|
# pytorch models
|
||||||
*.pth
|
*.pth
|
||||||
*.pth.tar
|
*.pth.tar
|
||||||
|
!dummy_speakers.pth
|
||||||
result/
|
result/
|
||||||
|
|
||||||
# setup.py
|
# setup.py
|
||||||
|
|
|
@ -2,41 +2,34 @@ import argparse
|
||||||
import os
|
import os
|
||||||
from argparse import RawTextHelpFormatter
|
from argparse import RawTextHelpFormatter
|
||||||
|
|
||||||
|
import torch
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from TTS.config import load_config
|
from TTS.config import load_config
|
||||||
from TTS.tts.datasets import load_tts_samples
|
from TTS.tts.datasets import load_tts_samples
|
||||||
|
from TTS.tts.utils.managers import save_file
|
||||||
from TTS.tts.utils.speakers import SpeakerManager
|
from TTS.tts.utils.speakers import SpeakerManager
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="""Compute embedding vectors for each wav file in a dataset.\n\n"""
|
description="""Compute embedding vectors for each wav file in a dataset.\n\n"""
|
||||||
"""
|
"""
|
||||||
Example runs:
|
Example runs:
|
||||||
python TTS/bin/compute_embeddings.py speaker_encoder_model.pth speaker_encoder_config.json dataset_config.json embeddings_output_path/
|
python TTS/bin/compute_embeddings.py speaker_encoder_model.pth speaker_encoder_config.json dataset_config.json
|
||||||
""",
|
""",
|
||||||
formatter_class=RawTextHelpFormatter,
|
formatter_class=RawTextHelpFormatter,
|
||||||
)
|
)
|
||||||
parser.add_argument("model_path", type=str, help="Path to model checkpoint file.")
|
parser.add_argument("model_path", type=str, help="Path to model checkpoint file.")
|
||||||
parser.add_argument(
|
parser.add_argument("config_path", type=str, help="Path to model config file.")
|
||||||
"config_path",
|
parser.add_argument("config_dataset_path", type=str, help="Path to dataset config file.")
|
||||||
type=str,
|
parser.add_argument("--output_path", type=str, help="Path for output `pth` or `json` file.", default="speakers.pth")
|
||||||
help="Path to model config file.",
|
parser.add_argument("--old_file", type=str, help="Previous embedding file to only compute new audios.", default=None)
|
||||||
)
|
parser.add_argument("--disable_cuda", type=bool, help="Flag to disable cuda.", default=False)
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"config_dataset_path",
|
|
||||||
type=str,
|
|
||||||
help="Path to dataset config file.",
|
|
||||||
)
|
|
||||||
parser.add_argument("output_path", type=str, help="path for output speakers.json and/or speakers.npy.")
|
|
||||||
parser.add_argument(
|
|
||||||
"--old_file", type=str, help="Previous speakers.json file, only compute for new audios.", default=None
|
|
||||||
)
|
|
||||||
parser.add_argument("--use_cuda", type=bool, help="flag to set cuda. Default False", default=False)
|
|
||||||
parser.add_argument("--no_eval", type=bool, help="Do not compute eval?. Default False", default=False)
|
parser.add_argument("--no_eval", type=bool, help="Do not compute eval?. Default False", default=False)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
use_cuda = torch.cuda.is_available() and not args.disable_cuda
|
||||||
|
|
||||||
c_dataset = load_config(args.config_dataset_path)
|
c_dataset = load_config(args.config_dataset_path)
|
||||||
|
|
||||||
meta_data_train, meta_data_eval = load_tts_samples(c_dataset.datasets, eval_split=not args.no_eval)
|
meta_data_train, meta_data_eval = load_tts_samples(c_dataset.datasets, eval_split=not args.no_eval)
|
||||||
|
@ -50,7 +43,7 @@ encoder_manager = SpeakerManager(
|
||||||
encoder_model_path=args.model_path,
|
encoder_model_path=args.model_path,
|
||||||
encoder_config_path=args.config_path,
|
encoder_config_path=args.config_path,
|
||||||
d_vectors_file_path=args.old_file,
|
d_vectors_file_path=args.old_file,
|
||||||
use_cuda=args.use_cuda,
|
use_cuda=use_cuda,
|
||||||
)
|
)
|
||||||
|
|
||||||
class_name_key = encoder_manager.encoder_config.class_name_key
|
class_name_key = encoder_manager.encoder_config.class_name_key
|
||||||
|
@ -79,13 +72,13 @@ for idx, wav_file in enumerate(tqdm(wav_files)):
|
||||||
|
|
||||||
if speaker_mapping:
|
if speaker_mapping:
|
||||||
# save speaker_mapping if target dataset is defined
|
# save speaker_mapping if target dataset is defined
|
||||||
if ".json" not in args.output_path:
|
if os.path.isdir(args.output_path):
|
||||||
mapping_file_path = os.path.join(args.output_path, "speakers.json")
|
mapping_file_path = os.path.join(args.output_path, "speakers.pth")
|
||||||
else:
|
else:
|
||||||
mapping_file_path = args.output_path
|
mapping_file_path = args.output_path
|
||||||
|
|
||||||
|
if os.path.dirname(mapping_file_path) != "":
|
||||||
os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
|
os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
|
||||||
|
|
||||||
# pylint: disable=W0212
|
save_file(speaker_mapping, mapping_file_path)
|
||||||
encoder_manager._save_json(mapping_file_path, speaker_mapping)
|
|
||||||
print("Speaker embeddings saved at:", mapping_file_path)
|
print("Speaker embeddings saved at:", mapping_file_path)
|
||||||
|
|
|
@ -407,16 +407,16 @@ class BaseTTS(BaseTrainerModel):
|
||||||
return test_figures, test_audios
|
return test_figures, test_audios
|
||||||
|
|
||||||
def on_init_start(self, trainer):
|
def on_init_start(self, trainer):
|
||||||
"""Save the speaker.json and language_ids.json at the beginning of the training. Also update both paths."""
|
"""Save the speaker.pth and language_ids.json at the beginning of the training. Also update both paths."""
|
||||||
if self.speaker_manager is not None:
|
if self.speaker_manager is not None:
|
||||||
output_path = os.path.join(trainer.output_path, "speakers.json")
|
output_path = os.path.join(trainer.output_path, "speakers.pth")
|
||||||
self.speaker_manager.save_ids_to_file(output_path)
|
self.speaker_manager.save_ids_to_file(output_path)
|
||||||
trainer.config.speakers_file = output_path
|
trainer.config.speakers_file = output_path
|
||||||
# some models don't have `model_args` set
|
# some models don't have `model_args` set
|
||||||
if hasattr(trainer.config, "model_args"):
|
if hasattr(trainer.config, "model_args"):
|
||||||
trainer.config.model_args.speakers_file = output_path
|
trainer.config.model_args.speakers_file = output_path
|
||||||
trainer.config.save_json(os.path.join(trainer.output_path, "config.json"))
|
trainer.config.save_json(os.path.join(trainer.output_path, "config.json"))
|
||||||
print(f" > `speakers.json` is saved to {output_path}.")
|
print(f" > `speakers.pth` is saved to {output_path}.")
|
||||||
print(" > `speakers_file` is updated in the config.json.")
|
print(" > `speakers_file` is updated in the config.json.")
|
||||||
|
|
||||||
if hasattr(self, "language_manager") and self.language_manager is not None:
|
if hasattr(self, "language_manager") and self.language_manager is not None:
|
||||||
|
|
|
@ -11,6 +11,28 @@ from TTS.encoder.utils.generic_utils import setup_encoder_model
|
||||||
from TTS.utils.audio import AudioProcessor
|
from TTS.utils.audio import AudioProcessor
|
||||||
|
|
||||||
|
|
||||||
|
def load_file(path: str):
|
||||||
|
if path.endswith(".json"):
|
||||||
|
with fsspec.open(path, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
elif path.endswith(".pth"):
|
||||||
|
with fsspec.open(path, "rb") as f:
|
||||||
|
return torch.load(f, map_location="cpu")
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported file type")
|
||||||
|
|
||||||
|
|
||||||
|
def save_file(obj: Any, path: str):
|
||||||
|
if path.endswith(".json"):
|
||||||
|
with fsspec.open(path, "w") as f:
|
||||||
|
json.dump(obj, f, indent=4)
|
||||||
|
elif path.endswith(".pth"):
|
||||||
|
with fsspec.open(path, "wb") as f:
|
||||||
|
torch.save(obj, f)
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported file type")
|
||||||
|
|
||||||
|
|
||||||
class BaseIDManager:
|
class BaseIDManager:
|
||||||
"""Base `ID` Manager class. Every new `ID` manager must inherit this.
|
"""Base `ID` Manager class. Every new `ID` manager must inherit this.
|
||||||
It defines common `ID` manager specific functions.
|
It defines common `ID` manager specific functions.
|
||||||
|
@ -46,7 +68,7 @@ class BaseIDManager:
|
||||||
Args:
|
Args:
|
||||||
file_path (str): Path to the file.
|
file_path (str): Path to the file.
|
||||||
"""
|
"""
|
||||||
self.ids = self._load_json(file_path)
|
self.ids = load_file(file_path)
|
||||||
|
|
||||||
def save_ids_to_file(self, file_path: str) -> None:
|
def save_ids_to_file(self, file_path: str) -> None:
|
||||||
"""Save IDs to a json file.
|
"""Save IDs to a json file.
|
||||||
|
@ -54,7 +76,7 @@ class BaseIDManager:
|
||||||
Args:
|
Args:
|
||||||
file_path (str): Path to the output file.
|
file_path (str): Path to the output file.
|
||||||
"""
|
"""
|
||||||
self._save_json(file_path, self.ids)
|
save_file(self.ids, file_path)
|
||||||
|
|
||||||
def get_random_id(self) -> Any:
|
def get_random_id(self) -> Any:
|
||||||
"""Get a random embedding.
|
"""Get a random embedding.
|
||||||
|
@ -125,7 +147,7 @@ class EmbeddingManager(BaseIDManager):
|
||||||
Args:
|
Args:
|
||||||
file_path (str): Path to the output file.
|
file_path (str): Path to the output file.
|
||||||
"""
|
"""
|
||||||
self._save_json(file_path, self.embeddings)
|
save_file(self.embeddings, file_path)
|
||||||
|
|
||||||
def load_embeddings_from_file(self, file_path: str) -> None:
|
def load_embeddings_from_file(self, file_path: str) -> None:
|
||||||
"""Load embeddings from a json file.
|
"""Load embeddings from a json file.
|
||||||
|
@ -133,7 +155,7 @@ class EmbeddingManager(BaseIDManager):
|
||||||
Args:
|
Args:
|
||||||
file_path (str): Path to the target json file.
|
file_path (str): Path to the target json file.
|
||||||
"""
|
"""
|
||||||
self.embeddings = self._load_json(file_path)
|
self.embeddings = load_file(file_path)
|
||||||
|
|
||||||
speakers = sorted({x["name"] for x in self.embeddings.values()})
|
speakers = sorted({x["name"] for x in self.embeddings.values()})
|
||||||
self.ids = {name: i for i, name in enumerate(speakers)}
|
self.ids = {name: i for i, name in enumerate(speakers)}
|
||||||
|
|
|
@ -16,6 +16,7 @@ encoder_model_path = os.path.join(get_tests_input_path(), "checkpoint_0.pth")
|
||||||
sample_wav_path = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0001.wav")
|
sample_wav_path = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0001.wav")
|
||||||
sample_wav_path2 = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0002.wav")
|
sample_wav_path2 = os.path.join(get_tests_input_path(), "../data/ljspeech/wavs/LJ001-0002.wav")
|
||||||
d_vectors_file_path = os.path.join(get_tests_input_path(), "../data/dummy_speakers.json")
|
d_vectors_file_path = os.path.join(get_tests_input_path(), "../data/dummy_speakers.json")
|
||||||
|
d_vectors_file_pth_path = os.path.join(get_tests_input_path(), "../data/dummy_speakers.pth")
|
||||||
|
|
||||||
|
|
||||||
class SpeakerManagerTest(unittest.TestCase):
|
class SpeakerManagerTest(unittest.TestCase):
|
||||||
|
@ -58,12 +59,13 @@ class SpeakerManagerTest(unittest.TestCase):
|
||||||
# remove dummy model
|
# remove dummy model
|
||||||
os.remove(encoder_model_path)
|
os.remove(encoder_model_path)
|
||||||
|
|
||||||
@staticmethod
|
def test_speakers_file_processing(self):
|
||||||
def test_speakers_file_processing():
|
|
||||||
manager = SpeakerManager(d_vectors_file_path=d_vectors_file_path)
|
manager = SpeakerManager(d_vectors_file_path=d_vectors_file_path)
|
||||||
print(manager.num_speakers)
|
self.assertEqual(manager.num_speakers, 1)
|
||||||
print(manager.embedding_dim)
|
self.assertEqual(manager.embedding_dim, 256)
|
||||||
print(manager.clip_ids)
|
manager = SpeakerManager(d_vectors_file_path=d_vectors_file_pth_path)
|
||||||
|
self.assertEqual(manager.num_speakers, 1)
|
||||||
|
self.assertEqual(manager.embedding_dim, 256)
|
||||||
d_vector = manager.get_embedding_by_clip(manager.clip_ids[0])
|
d_vector = manager.get_embedding_by_clip(manager.clip_ids[0])
|
||||||
assert len(d_vector) == 256
|
assert len(d_vector) == 256
|
||||||
d_vectors = manager.get_embeddings_by_name(manager.speaker_names[0])
|
d_vectors = manager.get_embeddings_by_name(manager.speaker_names[0])
|
||||||
|
|
Binary file not shown.
Loading…
Reference in New Issue