mirror of https://github.com/coqui-ai/TTS.git
remove ignore generate eval flag
This commit is contained in:
parent
d906fea08c
commit
b1620d1f3f
|
@ -32,8 +32,8 @@ args = parser.parse_args()
|
||||||
|
|
||||||
c_dataset = load_config(args.config_dataset_path)
|
c_dataset = load_config(args.config_dataset_path)
|
||||||
|
|
||||||
train_files, dev_files = load_meta_data(c_dataset.datasets, eval_split=args.eval, ignore_generated_eval=True)
|
meta_data_train, meta_data_eval = load_meta_data(c_dataset.datasets, eval_split=args.eval)
|
||||||
wav_files = train_files + dev_files
|
wav_files = meta_data_train + meta_data_eval
|
||||||
|
|
||||||
speaker_manager = SpeakerManager(encoder_model_path=args.model_path, encoder_config_path=args.config_path, use_cuda=args.use_cuda)
|
speaker_manager = SpeakerManager(encoder_model_path=args.model_path, encoder_config_path=args.config_path, use_cuda=args.use_cuda)
|
||||||
|
|
||||||
|
|
|
@ -227,7 +227,7 @@ def main(args): # pylint: disable=redefined-outer-name
|
||||||
ap = AudioProcessor(**c.audio)
|
ap = AudioProcessor(**c.audio)
|
||||||
|
|
||||||
# load data instances
|
# load data instances
|
||||||
meta_data_train, meta_data_eval = load_meta_data(c.datasets, eval_split=args.eval, ignore_generated_eval=True)
|
meta_data_train, meta_data_eval = load_meta_data(c.datasets, eval_split=args.eval)
|
||||||
|
|
||||||
# use eval and training partitions
|
# use eval and training partitions
|
||||||
meta_data = meta_data_train + meta_data_eval
|
meta_data = meta_data_train + meta_data_eval
|
||||||
|
|
|
@ -24,7 +24,7 @@ def main():
|
||||||
c = load_config(args.config_path)
|
c = load_config(args.config_path)
|
||||||
|
|
||||||
# load all datasets
|
# load all datasets
|
||||||
train_items, eval_items = load_meta_data(c.datasets, eval_split=True, ignore_generated_eval=True)
|
train_items, eval_items = load_meta_data(c.datasets, eval_split=True)
|
||||||
items = train_items + eval_items
|
items = train_items + eval_items
|
||||||
|
|
||||||
texts = "".join(item[0] for item in items)
|
texts = "".join(item[0] for item in items)
|
||||||
|
|
|
@ -30,7 +30,7 @@ def split_dataset(items):
|
||||||
return items[:eval_split_size], items[eval_split_size:]
|
return items[:eval_split_size], items[eval_split_size:]
|
||||||
|
|
||||||
|
|
||||||
def load_meta_data(datasets, eval_split=True, ignore_generated_eval=False):
|
def load_meta_data(datasets, eval_split=True):
|
||||||
meta_data_train_all = []
|
meta_data_train_all = []
|
||||||
meta_data_eval_all = [] if eval_split else None
|
meta_data_eval_all = [] if eval_split else None
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
|
@ -47,11 +47,9 @@ def load_meta_data(datasets, eval_split=True, ignore_generated_eval=False):
|
||||||
if eval_split:
|
if eval_split:
|
||||||
if meta_file_val:
|
if meta_file_val:
|
||||||
meta_data_eval = preprocessor(root_path, meta_file_val)
|
meta_data_eval = preprocessor(root_path, meta_file_val)
|
||||||
meta_data_eval_all += meta_data_eval
|
else:
|
||||||
elif not ignore_generated_eval:
|
|
||||||
meta_data_eval, meta_data_train = split_dataset(meta_data_train)
|
meta_data_eval, meta_data_train = split_dataset(meta_data_train)
|
||||||
meta_data_eval_all += meta_data_eval
|
meta_data_eval_all += meta_data_eval
|
||||||
|
|
||||||
meta_data_train_all += meta_data_train
|
meta_data_train_all += meta_data_train
|
||||||
# load attention masks for duration predictor training
|
# load attention masks for duration predictor training
|
||||||
if dataset.meta_file_attn_mask:
|
if dataset.meta_file_attn_mask:
|
||||||
|
|
Loading…
Reference in New Issue