From c437db15fdecd7ea2a525e0b95a416e5def40856 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Tue, 17 May 2022 13:34:38 +0200 Subject: [PATCH] Fix dirt --- TTS/encoder/utils/generic_utils.py | 2 +- TTS/tts/datasets/__init__.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/TTS/encoder/utils/generic_utils.py b/TTS/encoder/utils/generic_utils.py index 91a896f6..e18aa0ee 100644 --- a/TTS/encoder/utils/generic_utils.py +++ b/TTS/encoder/utils/generic_utils.py @@ -9,7 +9,7 @@ from scipy import signal from TTS.encoder.models.lstm import LSTMSpeakerEncoder from TTS.encoder.models.resnet import ResNetSpeakerEncoder -from TTS.utils.io import save_fsspec +from trainer.io import save_fsspec class AugmentWAV(object): diff --git a/TTS/tts/datasets/__init__.py b/TTS/tts/datasets/__init__.py index 6c7c9edd..7fe3d65b 100644 --- a/TTS/tts/datasets/__init__.py +++ b/TTS/tts/datasets/__init__.py @@ -13,7 +13,6 @@ def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): """Split a dataset into train and eval. Consider speaker distribution in multi-speaker training. Args: - <<<<<<< HEAD items (List[List]): A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`. @@ -23,9 +22,6 @@ def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): eval_split_size (float): If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set. If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%). - ======= - items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`. - >>>>>>> Fix docstring """ speakers = [item["speaker_name"] for item in items] is_multi_speaker = len(set(speakers)) > 1