mirror of https://github.com/coqui-ai/TTS.git
Enforce phonemizer definition for synthesis (#1441)
* Enforce phonemizer definition for synthesis * Fix train_tts, tokenizer init can now edit config * Add small change to trigger CI pipeline * fix wrong output path for one tts_test * Fix style * Test config overides by args and tokenizer * Fix style
This commit is contained in:
parent
37896e1743
commit
c66a6241fd
|
@ -57,7 +57,7 @@ def main():
|
||||||
# init the trainer and 🚀
|
# init the trainer and 🚀
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
train_args,
|
train_args,
|
||||||
config,
|
model.config,
|
||||||
config.output_path,
|
config.output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
|
|
|
@ -191,6 +191,7 @@ class TTSTokenizer:
|
||||||
phonemizer = get_phonemizer_by_name(
|
phonemizer = get_phonemizer_by_name(
|
||||||
DEF_LANG_TO_PHONEMIZER[config.phoneme_language], **phonemizer_kwargs
|
DEF_LANG_TO_PHONEMIZER[config.phoneme_language], **phonemizer_kwargs
|
||||||
)
|
)
|
||||||
|
new_config.phonemizer = phonemizer.name()
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"""No phonemizer found for language {config.phoneme_language}.
|
f"""No phonemizer found for language {config.phoneme_language}.
|
||||||
|
|
|
@ -112,6 +112,9 @@ class Synthesizer(object):
|
||||||
self.use_phonemes = self.tts_config.use_phonemes
|
self.use_phonemes = self.tts_config.use_phonemes
|
||||||
self.tts_model = setup_tts_model(config=self.tts_config)
|
self.tts_model = setup_tts_model(config=self.tts_config)
|
||||||
|
|
||||||
|
if self.use_phonemes and self.tts_config["phonemizer"] is None:
|
||||||
|
raise ValueError("Phonemizer is not defined in the TTS config.")
|
||||||
|
|
||||||
if not self.encoder_checkpoint:
|
if not self.encoder_checkpoint:
|
||||||
self._set_speaker_encoder_paths_from_tts_config()
|
self._set_speaker_encoder_paths_from_tts_config()
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ tensorboardX
|
||||||
pyworld
|
pyworld
|
||||||
# coqui stack
|
# coqui stack
|
||||||
coqui-trainer
|
coqui-trainer
|
||||||
coqpit # config managemenr
|
coqpit # config management
|
||||||
# chinese g2p deps
|
# chinese g2p deps
|
||||||
jieba
|
jieba
|
||||||
pypinyin
|
pypinyin
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -42,7 +43,7 @@ command_train = (
|
||||||
"--coqpit.datasets.0.meta_file_train metadata.csv "
|
"--coqpit.datasets.0.meta_file_train metadata.csv "
|
||||||
"--coqpit.datasets.0.meta_file_val metadata.csv "
|
"--coqpit.datasets.0.meta_file_val metadata.csv "
|
||||||
"--coqpit.datasets.0.path tests/data/ljspeech "
|
"--coqpit.datasets.0.path tests/data/ljspeech "
|
||||||
"--coqpit.test_delay_epochs -1"
|
"--coqpit.test_delay_epochs 0 "
|
||||||
)
|
)
|
||||||
run_cli(command_train)
|
run_cli(command_train)
|
||||||
|
|
||||||
|
@ -54,6 +55,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -74,6 +75,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -73,6 +74,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -61,6 +62,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = config.d_vector_file
|
continue_speakers_path = config.d_vector_file
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -58,6 +59,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -55,6 +56,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -54,6 +55,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example for it.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example for it.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -61,6 +62,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = config.d_vector_file
|
continue_speakers_path = config.d_vector_file
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -59,6 +60,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -54,6 +55,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,55 +0,0 @@
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
from tests import get_device_id, get_tests_output_path, run_cli
|
|
||||||
from TTS.tts.configs.tacotron2_config import Tacotron2Config
|
|
||||||
|
|
||||||
config_path = os.path.join(get_tests_output_path(), "test_model_config.json")
|
|
||||||
output_path = os.path.join(get_tests_output_path(), "train_outputs")
|
|
||||||
|
|
||||||
config = Tacotron2Config(
|
|
||||||
r=5,
|
|
||||||
batch_size=8,
|
|
||||||
eval_batch_size=8,
|
|
||||||
num_loader_workers=0,
|
|
||||||
num_eval_loader_workers=0,
|
|
||||||
text_cleaner="english_cleaners",
|
|
||||||
use_phonemes=False,
|
|
||||||
phoneme_language="en-us",
|
|
||||||
phoneme_cache_path=os.path.join(get_tests_output_path(), "train_outputs/phoneme_cache/"),
|
|
||||||
run_eval=True,
|
|
||||||
test_delay_epochs=-1,
|
|
||||||
epochs=1,
|
|
||||||
print_step=1,
|
|
||||||
test_sentences=[
|
|
||||||
"Be a voice, not an echo.",
|
|
||||||
],
|
|
||||||
print_eval=True,
|
|
||||||
max_decoder_steps=50,
|
|
||||||
)
|
|
||||||
config.audio.do_trim_silence = True
|
|
||||||
config.audio.trim_db = 60
|
|
||||||
config.save_json(config_path)
|
|
||||||
|
|
||||||
# train the model for one epoch
|
|
||||||
command_train = (
|
|
||||||
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path file://{config_path} "
|
|
||||||
f"--coqpit.output_path file://{output_path} "
|
|
||||||
"--coqpit.datasets.0.name ljspeech "
|
|
||||||
"--coqpit.datasets.0.meta_file_train metadata.csv "
|
|
||||||
"--coqpit.datasets.0.meta_file_val metadata.csv "
|
|
||||||
"--coqpit.datasets.0.path tests/data/ljspeech "
|
|
||||||
"--coqpit.test_delay_epochs 0 "
|
|
||||||
)
|
|
||||||
run_cli(command_train)
|
|
||||||
|
|
||||||
# Find latest folder
|
|
||||||
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
|
|
||||||
|
|
||||||
# restore the model and continue training for one more epoch
|
|
||||||
command_train = (
|
|
||||||
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path file://{continue_path} "
|
|
||||||
)
|
|
||||||
run_cli(command_train)
|
|
||||||
shutil.rmtree(continue_path)
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -92,6 +93,14 @@ languae_id = "en"
|
||||||
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
||||||
continue_languages_path = os.path.join(continue_path, "language_ids.json")
|
continue_languages_path = os.path.join(continue_path, "language_ids.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --language_ids_file_path {continue_languages_path} --language_idx {languae_id} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --language_ids_file_path {continue_languages_path} --language_idx {languae_id} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -99,6 +100,14 @@ languae_id = "en"
|
||||||
continue_speakers_path = config.d_vector_file
|
continue_speakers_path = config.d_vector_file
|
||||||
continue_languages_path = os.path.join(continue_path, "language_ids.json")
|
continue_languages_path = os.path.join(continue_path, "language_ids.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --language_ids_file_path {continue_languages_path} --language_idx {languae_id} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --language_ids_file_path {continue_languages_path} --language_idx {languae_id} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -65,6 +66,14 @@ out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
speaker_id = "ljspeech-1"
|
speaker_id = "ljspeech-1"
|
||||||
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
continue_speakers_path = os.path.join(continue_path, "speakers.json")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --speaker_idx {speaker_id} --speakers_file_path {continue_speakers_path} --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import glob
|
import glob
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
@ -54,6 +55,14 @@ continue_config_path = os.path.join(continue_path, "config.json")
|
||||||
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
||||||
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
||||||
|
|
||||||
|
# Check integrity of the config
|
||||||
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
||||||
|
config_loaded = json.load(f)
|
||||||
|
assert config_loaded["characters"] is not None
|
||||||
|
assert config_loaded["output_path"] in continue_path
|
||||||
|
assert config_loaded["test_delay_epochs"] == 0
|
||||||
|
|
||||||
|
# Load the model and run inference
|
||||||
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
||||||
run_cli(inference_command)
|
run_cli(inference_command)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue