From 8c381e3e48662097e661c0d45b61ad19de56cd30 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Tue, 3 Dec 2024 22:06:39 +0100 Subject: [PATCH] docs: use .to("cuda") instead of deprecated gpu=True --- TTS/api.py | 8 ++++---- docs/source/models/bark.md | 4 ++-- docs/source/models/xtts.md | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/TTS/api.py b/TTS/api.py index ed828250..86787e03 100644 --- a/TTS/api.py +++ b/TTS/api.py @@ -35,21 +35,21 @@ class TTS(nn.Module): >>> tts.tts_to_file(text="Hello world!", speaker=tts.speakers[0], language=tts.languages[0], file_path="output.wav") Example with a single-speaker model: - >>> tts = TTS(model_name="tts_models/de/thorsten/tacotron2-DDC", progress_bar=False, gpu=False) + >>> tts = TTS(model_name="tts_models/de/thorsten/tacotron2-DDC", progress_bar=False) >>> tts.tts_to_file(text="Ich bin eine Testnachricht.", file_path="output.wav") Example loading a model from a path: - >>> tts = TTS(model_path="/path/to/checkpoint_100000.pth", config_path="/path/to/config.json", progress_bar=False, gpu=False) + >>> tts = TTS(model_path="/path/to/checkpoint_100000.pth", config_path="/path/to/config.json", progress_bar=False) >>> tts.tts_to_file(text="Ich bin eine Testnachricht.", file_path="output.wav") Example voice cloning with YourTTS in English, French and Portuguese: - >>> tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=True) + >>> tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False).to("cuda") >>> tts.tts_to_file("This is voice cloning.", speaker_wav="my/cloning/audio.wav", language="en", file_path="thisisit.wav") >>> tts.tts_to_file("C'est le clonage de la voix.", speaker_wav="my/cloning/audio.wav", language="fr", file_path="thisisit.wav") >>> tts.tts_to_file("Isso é clonagem de voz.", speaker_wav="my/cloning/audio.wav", language="pt", file_path="thisisit.wav") Example Fairseq TTS models (uses ISO language codes in https://dl.fbaipublicfiles.com/mms/tts/all-tts-languages.html): - >>> tts = TTS(model_name="tts_models/eng/fairseq/vits", progress_bar=False, gpu=True) + >>> tts = TTS(model_name="tts_models/eng/fairseq/vits", progress_bar=False).to("cuda") >>> tts.tts_to_file("This is a test.", file_path="output.wav") Args: diff --git a/docs/source/models/bark.md b/docs/source/models/bark.md index a180afbb..77f99c0d 100644 --- a/docs/source/models/bark.md +++ b/docs/source/models/bark.md @@ -37,7 +37,7 @@ from TTS.api import TTS # Load the model to GPU # Bark is really slow on CPU, so we recommend using GPU. -tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/bark").to("cuda") # Cloning a new speaker @@ -57,7 +57,7 @@ tts.tts_to_file(text="Hello, my name is Manmay , how are you?", # random speaker -tts = TTS("tts_models/multilingual/multi-dataset/bark", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/bark").to("cuda") tts.tts_to_file("hello world", file_path="out.wav") ``` diff --git a/docs/source/models/xtts.md b/docs/source/models/xtts.md index c07d879f..7c0f1c4a 100644 --- a/docs/source/models/xtts.md +++ b/docs/source/models/xtts.md @@ -118,7 +118,7 @@ You can optionally disable sentence splitting for better coherence but more VRAM ```python from TTS.api import TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cuda") # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", @@ -137,15 +137,15 @@ You can pass multiple audio files to the `speaker_wav` argument for better voice from TTS.api import TTS # using the default version set in 🐸TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cuda") # using a specific version # 👀 see the branch names for versions on https://huggingface.co/coqui/XTTS-v2/tree/main # ❗some versions might be incompatible with the API -tts = TTS("xtts_v2.0.2", gpu=True) +tts = TTS("xtts_v2.0.2").to("cuda") # getting the latest XTTS_v2 -tts = TTS("xtts", gpu=True) +tts = TTS("xtts").to("cuda") # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", @@ -160,7 +160,7 @@ You can do inference using one of the available speakers using the following cod ```python from TTS.api import TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to("cuda") # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",