mirror of https://github.com/coqui-ai/TTS.git
Fix recipes as to the recent API changes. (#1367)
* Fix recipes -> #1366 * Fix trainer docs
This commit is contained in:
parent
d792b78703
commit
48f6bb405a
|
@ -1,17 +1,3 @@
|
||||||
# Trainer API
|
# Trainer API
|
||||||
|
|
||||||
The {class}`TTS.trainer.Trainer` provides a lightweight, extensible, and feature-complete training run-time. We optimized it for 🐸 but
|
We made the trainer a seprate project on https://github.com/coqui-ai/Trainer
|
||||||
can also be used for any DL training in different domains. It supports distributed multi-gpu, mixed-precision (apex or torch.amp) training.
|
|
||||||
|
|
||||||
|
|
||||||
## Trainer
|
|
||||||
```{eval-rst}
|
|
||||||
.. autoclass:: TTS.trainer.Trainer
|
|
||||||
:members:
|
|
||||||
```
|
|
||||||
|
|
||||||
## TrainingArgs
|
|
||||||
```{eval-rst}
|
|
||||||
.. autoclass:: TTS.trainer.TrainingArgs
|
|
||||||
:members:
|
|
||||||
```
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict())
|
||||||
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
||||||
|
|
||||||
# init model
|
# init model
|
||||||
model = GAN(config)
|
model = GAN(config, ap)
|
||||||
|
|
||||||
# init the trainer and 🚀
|
# init the trainer and 🚀
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
|
@ -46,7 +46,6 @@ trainer = Trainer(
|
||||||
output_path,
|
output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
eval_samples=eval_samples,
|
eval_samples=eval_samples
|
||||||
training_assets={"audio_processor": ap},
|
|
||||||
)
|
)
|
||||||
trainer.fit()
|
trainer.fit()
|
||||||
|
|
|
@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict())
|
||||||
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
||||||
|
|
||||||
# init model
|
# init model
|
||||||
model = GAN(config)
|
model = GAN(config, ap)
|
||||||
|
|
||||||
# init the trainer and 🚀
|
# init the trainer and 🚀
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
|
@ -46,7 +46,6 @@ trainer = Trainer(
|
||||||
output_path,
|
output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
eval_samples=eval_samples,
|
eval_samples=eval_samples
|
||||||
training_assets={"audio_processor": ap},
|
|
||||||
)
|
)
|
||||||
trainer.fit()
|
trainer.fit()
|
||||||
|
|
|
@ -89,7 +89,6 @@ trainer = Trainer(
|
||||||
output_path,
|
output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
eval_samples=eval_samples,
|
eval_samples=eval_samples
|
||||||
training_assets={"audio_processor": ap},
|
|
||||||
)
|
)
|
||||||
trainer.fit()
|
trainer.fit()
|
||||||
|
|
|
@ -36,7 +36,7 @@ ap = AudioProcessor(**config.audio.to_dict())
|
||||||
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
|
||||||
|
|
||||||
# init model
|
# init model
|
||||||
model = GAN(config)
|
model = GAN(config, ap)
|
||||||
|
|
||||||
# init the trainer and 🚀
|
# init the trainer and 🚀
|
||||||
trainer = Trainer(
|
trainer = Trainer(
|
||||||
|
@ -45,7 +45,6 @@ trainer = Trainer(
|
||||||
output_path,
|
output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
eval_samples=eval_samples,
|
eval_samples=eval_samples
|
||||||
training_assets={"audio_processor": ap},
|
|
||||||
)
|
)
|
||||||
trainer.fit()
|
trainer.fit()
|
||||||
|
|
|
@ -136,7 +136,6 @@ trainer = Trainer(
|
||||||
output_path,
|
output_path,
|
||||||
model=model,
|
model=model,
|
||||||
train_samples=train_samples,
|
train_samples=train_samples,
|
||||||
eval_samples=eval_samples,
|
eval_samples=eval_samples
|
||||||
training_assets={"audio_processor": ap},
|
|
||||||
)
|
)
|
||||||
trainer.fit()
|
trainer.fit()
|
||||||
|
|
Loading…
Reference in New Issue