From 48f6bb405ac90295368ec53329e87055fbf809bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 10 Mar 2022 11:36:38 +0100 Subject: [PATCH] Fix recipes as to the recent API changes. (#1367) * Fix recipes -> #1366 * Fix trainer docs --- docs/source/main_classes/trainer_api.md | 16 +--------------- recipes/ljspeech/hifigan/train_hifigan.py | 5 ++--- .../multiband_melgan/train_multiband_melgan.py | 5 ++--- .../ljspeech/tacotron2-DDC/train_tacotron_ddc.py | 3 +-- recipes/ljspeech/univnet/train.py | 5 ++--- recipes/multilingual/vits_tts/train_vits_tts.py | 3 +-- 6 files changed, 9 insertions(+), 28 deletions(-) diff --git a/docs/source/main_classes/trainer_api.md b/docs/source/main_classes/trainer_api.md index a5c3cfb7..f765fff7 100644 --- a/docs/source/main_classes/trainer_api.md +++ b/docs/source/main_classes/trainer_api.md @@ -1,17 +1,3 @@ # Trainer API -The {class}`TTS.trainer.Trainer` provides a lightweight, extensible, and feature-complete training run-time. We optimized it for 🐸 but -can also be used for any DL training in different domains. It supports distributed multi-gpu, mixed-precision (apex or torch.amp) training. - - -## Trainer -```{eval-rst} -.. autoclass:: TTS.trainer.Trainer - :members: -``` - -## TrainingArgs -```{eval-rst} -.. autoclass:: TTS.trainer.TrainingArgs - :members: -``` \ No newline at end of file +We made the trainer a seprate project on https://github.com/coqui-ai/Trainer diff --git a/recipes/ljspeech/hifigan/train_hifigan.py b/recipes/ljspeech/hifigan/train_hifigan.py index 1e5bbf30..6a739009 100644 --- a/recipes/ljspeech/hifigan/train_hifigan.py +++ b/recipes/ljspeech/hifigan/train_hifigan.py @@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict()) eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # init model -model = GAN(config) +model = GAN(config, ap) # init the trainer and 🚀 trainer = Trainer( @@ -46,7 +46,6 @@ trainer = Trainer( output_path, model=model, train_samples=train_samples, - eval_samples=eval_samples, - training_assets={"audio_processor": ap}, + eval_samples=eval_samples ) trainer.fit() diff --git a/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py b/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py index 40ff5a00..d5ca9a76 100644 --- a/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py +++ b/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py @@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict()) eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # init model -model = GAN(config) +model = GAN(config, ap) # init the trainer and 🚀 trainer = Trainer( @@ -46,7 +46,6 @@ trainer = Trainer( output_path, model=model, train_samples=train_samples, - eval_samples=eval_samples, - training_assets={"audio_processor": ap}, + eval_samples=eval_samples ) trainer.fit() diff --git a/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py b/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py index d00f8ed7..a0ff8b02 100644 --- a/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py +++ b/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py @@ -89,7 +89,6 @@ trainer = Trainer( output_path, model=model, train_samples=train_samples, - eval_samples=eval_samples, - training_assets={"audio_processor": ap}, + eval_samples=eval_samples ) trainer.fit() diff --git a/recipes/ljspeech/univnet/train.py b/recipes/ljspeech/univnet/train.py index 19c91925..592b9a76 100644 --- a/recipes/ljspeech/univnet/train.py +++ b/recipes/ljspeech/univnet/train.py @@ -36,7 +36,7 @@ ap = AudioProcessor(**config.audio.to_dict()) eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # init model -model = GAN(config) +model = GAN(config, ap) # init the trainer and 🚀 trainer = Trainer( @@ -45,7 +45,6 @@ trainer = Trainer( output_path, model=model, train_samples=train_samples, - eval_samples=eval_samples, - training_assets={"audio_processor": ap}, + eval_samples=eval_samples ) trainer.fit() diff --git a/recipes/multilingual/vits_tts/train_vits_tts.py b/recipes/multilingual/vits_tts/train_vits_tts.py index 3f35275a..c4ed0dda 100644 --- a/recipes/multilingual/vits_tts/train_vits_tts.py +++ b/recipes/multilingual/vits_tts/train_vits_tts.py @@ -136,7 +136,6 @@ trainer = Trainer( output_path, model=model, train_samples=train_samples, - eval_samples=eval_samples, - training_assets={"audio_processor": ap}, + eval_samples=eval_samples ) trainer.fit()