mirror of https://github.com/coqui-ai/TTS.git
Allow ignore speakers for all multispeaker datasets
This commit is contained in:
parent
234a4aacb3
commit
c9c1960040
|
@ -67,16 +67,17 @@ def load_tts_samples(
|
|||
root_path = dataset["path"]
|
||||
meta_file_train = dataset["meta_file_train"]
|
||||
meta_file_val = dataset["meta_file_val"]
|
||||
ununsed_speakers = dataset["ununsed_speakers"]
|
||||
# setup the right data processor
|
||||
if formatter is None:
|
||||
formatter = _get_formatter_by_name(name)
|
||||
# load train set
|
||||
meta_data_train = formatter(root_path, meta_file_train)
|
||||
meta_data_train = formatter(root_path, meta_file_train, ununsed_speakers=ununsed_speakers)
|
||||
print(f" | > Found {len(meta_data_train)} files in {Path(root_path).resolve()}")
|
||||
# load evaluation split if set
|
||||
if eval_split:
|
||||
if meta_file_val:
|
||||
meta_data_eval = formatter(root_path, meta_file_val)
|
||||
meta_data_eval = formatter(root_path, meta_file_val, ununsed_speakers=ununsed_speakers)
|
||||
else:
|
||||
meta_data_eval, meta_data_train = split_dataset(meta_data_train)
|
||||
meta_data_eval_all += meta_data_eval
|
||||
|
|
|
@ -12,7 +12,7 @@ from tqdm import tqdm
|
|||
########################
|
||||
|
||||
|
||||
def tweb(root_path, meta_file):
|
||||
def tweb(root_path, meta_file, **kwargs):
|
||||
"""Normalize TWEB dataset.
|
||||
https://www.kaggle.com/bryanpark/the-world-english-bible-speech-dataset
|
||||
"""
|
||||
|
@ -28,7 +28,7 @@ def tweb(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def mozilla(root_path, meta_file):
|
||||
def mozilla(root_path, meta_file, **kwargs):
|
||||
"""Normalizes Mozilla meta data files to TTS format"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
@ -43,7 +43,7 @@ def mozilla(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def mozilla_de(root_path, meta_file):
|
||||
def mozilla_de(root_path, meta_file, **kwargs):
|
||||
"""Normalizes Mozilla meta data files to TTS format"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
@ -82,6 +82,10 @@ def mailabs(root_path, meta_files=None):
|
|||
if speaker_name_match is None:
|
||||
continue
|
||||
speaker_name = speaker_name_match.group("speaker_name")
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_name in ununsed_speakers:
|
||||
continue
|
||||
print(" | > {}".format(csv_file))
|
||||
with open(txt_file, "r", encoding="utf-8") as ttf:
|
||||
for line in ttf:
|
||||
|
@ -98,7 +102,7 @@ def mailabs(root_path, meta_files=None):
|
|||
return items
|
||||
|
||||
|
||||
def ljspeech(root_path, meta_file):
|
||||
def ljspeech(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the LJSpeech meta data file to TTS format
|
||||
https://keithito.com/LJ-Speech-Dataset/"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
|
@ -113,7 +117,7 @@ def ljspeech(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def ljspeech_test(root_path, meta_file):
|
||||
def ljspeech_test(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the LJSpeech meta data file for TTS testing
|
||||
https://keithito.com/LJ-Speech-Dataset/"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
|
@ -127,7 +131,7 @@ def ljspeech_test(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def sam_accenture(root_path, meta_file):
|
||||
def sam_accenture(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the sam-accenture meta data file to TTS format
|
||||
https://github.com/Sam-Accenture-Non-Binary-Voice/non-binary-voice-files"""
|
||||
xml_file = os.path.join(root_path, "voice_over_recordings", meta_file)
|
||||
|
@ -144,12 +148,12 @@ def sam_accenture(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def ruslan(root_path, meta_file):
|
||||
def ruslan(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the RUSLAN meta data file to TTS format
|
||||
https://ruslan-corpus.github.io/"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
speaker_name = "ljspeech"
|
||||
speaker_name = "ruslan"
|
||||
with open(txt_file, "r", encoding="utf-8") as ttf:
|
||||
for line in ttf:
|
||||
cols = line.split("|")
|
||||
|
@ -159,11 +163,11 @@ def ruslan(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def css10(root_path, meta_file):
|
||||
def css10(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the CSS10 dataset file to TTS format"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
speaker_name = "ljspeech"
|
||||
speaker_name = "css10"
|
||||
with open(txt_file, "r", encoding="utf-8") as ttf:
|
||||
for line in ttf:
|
||||
cols = line.split("|")
|
||||
|
@ -173,7 +177,7 @@ def css10(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def nancy(root_path, meta_file):
|
||||
def nancy(root_path, meta_file, **kwargs):
|
||||
"""Normalizes the Nancy meta data file to TTS format"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
@ -187,7 +191,7 @@ def nancy(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
def common_voice(root_path, meta_file):
|
||||
def common_voice(root_path, meta_file, ununsed_speakers=None):
|
||||
"""Normalize the common voice meta data file to TTS format."""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
@ -198,12 +202,16 @@ def common_voice(root_path, meta_file):
|
|||
cols = line.split("\t")
|
||||
text = cols[2]
|
||||
speaker_name = cols[0]
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_name in ununsed_speakers:
|
||||
continue
|
||||
wav_file = os.path.join(root_path, "clips", cols[1].replace(".mp3", ".wav"))
|
||||
items.append([text, wav_file, "MCV_" + speaker_name])
|
||||
return items
|
||||
|
||||
|
||||
def libri_tts(root_path, meta_files=None):
|
||||
def libri_tts(root_path, meta_files=None, ununsed_speakers=None):
|
||||
"""https://ai.google/tools/datasets/libri-tts/"""
|
||||
items = []
|
||||
if meta_files is None:
|
||||
|
@ -222,13 +230,17 @@ def libri_tts(root_path, meta_files=None):
|
|||
_root_path = os.path.join(root_path, f"{speaker_name}/{chapter_id}")
|
||||
wav_file = os.path.join(_root_path, file_name + ".wav")
|
||||
text = cols[2]
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_name in ununsed_speakers:
|
||||
continue
|
||||
items.append([text, wav_file, "LTTS_" + speaker_name])
|
||||
for item in items:
|
||||
assert os.path.exists(item[1]), f" [!] wav files don't exist - {item[1]}"
|
||||
return items
|
||||
|
||||
|
||||
def custom_turkish(root_path, meta_file):
|
||||
def custom_turkish(root_path, meta_file, **kwargs):
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
speaker_name = "turkish-female"
|
||||
|
@ -247,7 +259,7 @@ def custom_turkish(root_path, meta_file):
|
|||
|
||||
|
||||
# ToDo: add the dataset link when the dataset is released publicly
|
||||
def brspeech(root_path, meta_file):
|
||||
def brspeech(root_path, meta_file, ununsed_speakers=None):
|
||||
"""BRSpeech 3.0 beta"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
@ -258,21 +270,25 @@ def brspeech(root_path, meta_file):
|
|||
cols = line.split("|")
|
||||
wav_file = os.path.join(root_path, cols[0])
|
||||
text = cols[2]
|
||||
speaker_name = cols[3]
|
||||
items.append([text, wav_file, speaker_name])
|
||||
speaker_id = cols[3]
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_id in ununsed_speakers:
|
||||
continue
|
||||
items.append([text, wav_file, speaker_id])
|
||||
return items
|
||||
|
||||
|
||||
def vctk(root_path, meta_files=None, wavs_path="wav48"):
|
||||
def vctk(root_path, meta_files=None, wavs_path="wav48", ununsed_speakers=None):
|
||||
"""homepages.inf.ed.ac.uk/jyamagis/release/VCTK-Corpus.tar.gz"""
|
||||
test_speakers = meta_files
|
||||
items = []
|
||||
meta_files = glob(f"{os.path.join(root_path,'txt')}/**/*.txt", recursive=True)
|
||||
for meta_file in meta_files:
|
||||
_, speaker_id, txt_file = os.path.relpath(meta_file, root_path).split(os.sep)
|
||||
file_id = txt_file.split(".")[0]
|
||||
if isinstance(test_speakers, list): # if is list ignore this speakers ids
|
||||
if speaker_id in test_speakers:
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_id in ununsed_speakers:
|
||||
continue
|
||||
with open(meta_file, "r", encoding="utf-8") as file_text:
|
||||
text = file_text.readlines()[0]
|
||||
|
@ -282,15 +298,16 @@ def vctk(root_path, meta_files=None, wavs_path="wav48"):
|
|||
return items
|
||||
|
||||
|
||||
def vctk_slim(root_path, meta_files=None, wavs_path="wav48"):
|
||||
def vctk_slim(root_path, meta_files=None, wavs_path="wav48", ununsed_speakers=None):
|
||||
"""homepages.inf.ed.ac.uk/jyamagis/release/VCTK-Corpus.tar.gz"""
|
||||
items = []
|
||||
txt_files = glob(f"{os.path.join(root_path,'txt')}/**/*.txt", recursive=True)
|
||||
for text_file in txt_files:
|
||||
_, speaker_id, txt_file = os.path.relpath(text_file, root_path).split(os.sep)
|
||||
file_id = txt_file.split(".")[0]
|
||||
if isinstance(meta_files, list): # if is list ignore this speakers ids
|
||||
if speaker_id in meta_files:
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker_id in ununsed_speakers:
|
||||
continue
|
||||
wav_file = os.path.join(root_path, wavs_path, speaker_id, file_id + ".wav")
|
||||
items.append([None, wav_file, "VCTK_" + speaker_id])
|
||||
|
@ -298,7 +315,7 @@ def vctk_slim(root_path, meta_files=None, wavs_path="wav48"):
|
|||
return items
|
||||
|
||||
|
||||
def mls(root_path, meta_files=None):
|
||||
def mls(root_path, meta_files=None, ununsed_speakers=None):
|
||||
"""http://www.openslr.org/94/"""
|
||||
items = []
|
||||
with open(os.path.join(root_path, meta_files), "r", encoding="utf-8") as meta:
|
||||
|
@ -307,19 +324,23 @@ def mls(root_path, meta_files=None):
|
|||
text = text[:-1]
|
||||
speaker, book, *_ = file.split("_")
|
||||
wav_file = os.path.join(root_path, os.path.dirname(meta_files), "audio", speaker, book, file + ".wav")
|
||||
# ignore speakers
|
||||
if isinstance(ununsed_speakers, list):
|
||||
if speaker in ununsed_speakers:
|
||||
continue
|
||||
items.append([text, wav_file, "MLS_" + speaker])
|
||||
return items
|
||||
|
||||
|
||||
# ======================================== VOX CELEB ===========================================
|
||||
def voxceleb2(root_path, meta_file=None):
|
||||
def voxceleb2(root_path, meta_file=None, **kwargs):
|
||||
"""
|
||||
:param meta_file Used only for consistency with load_tts_samples api
|
||||
"""
|
||||
return _voxcel_x(root_path, meta_file, voxcel_idx="2")
|
||||
|
||||
|
||||
def voxceleb1(root_path, meta_file=None):
|
||||
def voxceleb1(root_path, meta_file=None, **kwargs):
|
||||
"""
|
||||
:param meta_file Used only for consistency with load_tts_samples api
|
||||
"""
|
||||
|
@ -361,7 +382,7 @@ def _voxcel_x(root_path, meta_file, voxcel_idx):
|
|||
return [x.strip().split("|") for x in f.readlines()]
|
||||
|
||||
|
||||
def baker(root_path: str, meta_file: str) -> List[List[str]]:
|
||||
def baker(root_path: str, meta_file: str, **kwargs) -> List[List[str]]:
|
||||
"""Normalizes the Baker meta data file to TTS format
|
||||
|
||||
Args:
|
||||
|
@ -381,7 +402,7 @@ def baker(root_path: str, meta_file: str) -> List[List[str]]:
|
|||
return items
|
||||
|
||||
|
||||
def kokoro(root_path, meta_file):
|
||||
def kokoro(root_path, meta_file, **kwargs):
|
||||
"""Japanese single-speaker dataset from https://github.com/kaiidams/Kokoro-Speech-Dataset"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
items = []
|
||||
|
|
Loading…
Reference in New Issue