mirror of https://github.com/coqui-ai/TTS.git
update processors for loading attention maps
This commit is contained in:
parent
fa6907fa0e
commit
27a75de15f
|
@ -8,6 +8,9 @@ from tqdm import tqdm
|
|||
|
||||
from TTS.tts.utils.generic_utils import split_dataset
|
||||
|
||||
####################
|
||||
# UTILITIES
|
||||
####################
|
||||
|
||||
def load_meta_data(datasets, eval_split=True):
|
||||
meta_data_train_all = []
|
||||
|
@ -17,9 +20,12 @@ def load_meta_data(datasets, eval_split=True):
|
|||
root_path = dataset['path']
|
||||
meta_file_train = dataset['meta_file_train']
|
||||
meta_file_val = dataset['meta_file_val']
|
||||
# setup the right data processor
|
||||
preprocessor = get_preprocessor_by_name(name)
|
||||
# load train set
|
||||
meta_data_train = preprocessor(root_path, meta_file_train)
|
||||
print(f" | > Found {len(meta_data_train)} files in {Path(root_path).resolve()}")
|
||||
# load evaluation split if set
|
||||
if eval_split:
|
||||
if meta_file_val is None:
|
||||
meta_data_eval, meta_data_train = split_dataset(meta_data_train)
|
||||
|
@ -27,15 +33,41 @@ def load_meta_data(datasets, eval_split=True):
|
|||
meta_data_eval = preprocessor(root_path, meta_file_val)
|
||||
meta_data_eval_all += meta_data_eval
|
||||
meta_data_train_all += meta_data_train
|
||||
# load attention masks for duration predictor training
|
||||
if 'meta_file_attn_mask' in dataset:
|
||||
meta_data = dict(load_attention_mask_meta_data(dataset['meta_file_attn_mask']))
|
||||
for idx, ins in enumerate(meta_data_train_all):
|
||||
attn_file = meta_data[ins[1]].strip()
|
||||
meta_data_train_all[idx].append(attn_file)
|
||||
if meta_data_eval_all is not None:
|
||||
for idx, ins in enumerate(meta_data_eval_all):
|
||||
attn_file = meta_data[ins[1]].strip()
|
||||
meta_data_eval_all[idx].append(attn_file)
|
||||
return meta_data_train_all, meta_data_eval_all
|
||||
|
||||
|
||||
def load_attention_mask_meta_data(metafile_path):
|
||||
"""Load meta data file created by compute_attention_masks.py"""
|
||||
with open(metafile_path, 'r') as f:
|
||||
lines = f.readlines()
|
||||
|
||||
meta_data = []
|
||||
for line in lines:
|
||||
wav_file, attn_file = line.split('|')
|
||||
meta_data.append([wav_file, attn_file])
|
||||
return meta_data
|
||||
|
||||
|
||||
def get_preprocessor_by_name(name):
|
||||
"""Returns the respective preprocessing function."""
|
||||
thismodule = sys.modules[__name__]
|
||||
return getattr(thismodule, name.lower())
|
||||
|
||||
|
||||
########################
|
||||
# DATASETS
|
||||
########################
|
||||
|
||||
def tweb(root_path, meta_file):
|
||||
"""Normalize TWEB dataset.
|
||||
https://www.kaggle.com/bryanpark/the-world-english-bible-speech-dataset
|
||||
|
@ -52,19 +84,6 @@ def tweb(root_path, meta_file):
|
|||
return items
|
||||
|
||||
|
||||
# def kusal(root_path, meta_file):
|
||||
# txt_file = os.path.join(root_path, meta_file)
|
||||
# texts = []
|
||||
# wavs = []
|
||||
# with open(txt_file, "r", encoding="utf8") as f:
|
||||
# frames = [
|
||||
# line.split('\t') for line in f
|
||||
# if line.split('\t')[0] in self.wav_files_dict.keys()
|
||||
# ]
|
||||
# # TODO: code the rest
|
||||
# return {'text': texts, 'wavs': wavs}
|
||||
|
||||
|
||||
def mozilla(root_path, meta_file):
|
||||
"""Normalizes Mozilla meta data files to TTS format"""
|
||||
txt_file = os.path.join(root_path, meta_file)
|
||||
|
|
Loading…
Reference in New Issue