{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "4d50310e-f094-42e0-af30-1e42b13ceb95", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2023-04-22 16:58:42.388656: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2023-04-22 16:58:43.345225: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" ] } ], "source": [ "#@title # Setup\n", "# Imports used through the rest of the notebook.\n", "import torch\n", "import torchaudio\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "\n", "import IPython\n", "\n", "from TTS.tts.models.tortoise import TextToSpeech\n", "from TTS.tts.layers.tortoise.audio_utils import load_audio, load_voice, load_voices\n", "\n", "# This will download all the models used by Tortoise from the HuggingFace hub.\n", "tts = TextToSpeech()" ] }, { "cell_type": "code", "execution_count": 2, "id": "e126c3c3-d90a-492f-b5bb-0d86587f15cc", "metadata": {}, "outputs": [], "source": [ "# This is the text that will be spoken.\n", "text = \"Joining two modalities results in a surprising increase in generalization! What would happen if we combined them all?\" #@param {type:\"string\"}\n", "#@markdown Show code for multiline text input\n", "# Here's something for the poetically inclined.. (set text=)\n", "\"\"\"\n", "Then took the other, as just as fair,\n", "And having perhaps the better claim,\n", "Because it was grassy and wanted wear;\n", "Though as for that the passing there\n", "Had worn them really about the same,\"\"\"\n", "\n", "# Pick a \"preset mode\" to determine quality. Options: {\"ultra_fast\", \"fast\" (default), \"standard\", \"high_quality\"}. See docs in api.py\n", "preset = \"fast\" #@param [\"ultra_fast\", \"fast\", \"standard\", \"high_quality\"]" ] }, { "cell_type": "code", "execution_count": 3, "id": "9413f553-5bd0-4820-bad4-edd7fd7d2370", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[0m\u001b[01;34mangie\u001b[0m/ \u001b[01;34mfreeman\u001b[0m/ \u001b[01;34mmyself\u001b[0m/ \u001b[01;34mtom\u001b[0m/ \u001b[01;34mtrain_grace\u001b[0m/\n", "\u001b[01;34mapplejack\u001b[0m/ \u001b[01;34mgeralt\u001b[0m/ \u001b[01;34mpat\u001b[0m/ \u001b[01;34mtrain_atkins\u001b[0m/ \u001b[01;34mtrain_kennard\u001b[0m/\n", "\u001b[01;34mcond_latent_example\u001b[0m/ \u001b[01;34mhalle\u001b[0m/ \u001b[01;34mpat2\u001b[0m/ \u001b[01;34mtrain_daws\u001b[0m/ \u001b[01;34mtrain_lescault\u001b[0m/\n", "\u001b[01;34mdaniel\u001b[0m/ \u001b[01;34mjlaw\u001b[0m/ \u001b[01;34mrainbow\u001b[0m/ \u001b[01;34mtrain_dotrice\u001b[0m/ \u001b[01;34mtrain_mouse\u001b[0m/\n", "\u001b[01;34mdeniro\u001b[0m/ \u001b[01;34mlj\u001b[0m/ \u001b[01;34msnakes\u001b[0m/ \u001b[01;34mtrain_dreams\u001b[0m/ \u001b[01;34mweaver\u001b[0m/\n", "\u001b[01;34memma\u001b[0m/ \u001b[01;34mmol\u001b[0m/ \u001b[01;34mtim_reynolds\u001b[0m/ \u001b[01;34mtrain_empire\u001b[0m/ \u001b[01;34mwilliam\u001b[0m/\n" ] }, { "data": { "text/html": [ "\n", " \n", " " ], "text/plain": [ "" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "%ls ../TTS/tts/utils/assets/tortoise/voices/\n", "import IPython\n", "IPython.display.Audio(filename='../TTS/tts/utils/assets/tortoise/voices/tom/1.wav')" ] }, { "cell_type": "code", "execution_count": 4, "id": "96a98ae5-313b-40d1-9311-5a785f2c9a4e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "mode 0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/manmay/anaconda3/envs/tts/lib/python3.8/site-packages/torch/functional.py:641: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\n", "Note: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at /opt/conda/conda-bld/pytorch_1678402379298/work/aten/src/ATen/native/SpectralOps.cpp:862.)\n", " return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Generating autoregressive samples..\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [01:05<00:00, 4.11s/it]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Computing best candidates using CLVP\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [00:05<00:00, 3.14it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Transforming autoregressive outputs into audio..\n" ] }, { "data": { "application/json": { "ascii": false, "bar_format": null, "colour": null, "elapsed": 0.06691813468933105, "initial": 0, "n": 0, "ncols": null, "nrows": 25, "postfix": null, "prefix": "", "rate": null, "total": 50, "unit": "it", "unit_divisor": 1000, "unit_scale": false }, "application/vnd.jupyter.widget-view+json": { "model_id": "19f00339415746cbbfdf4cf70c84b88a", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/50 [00:00\n", " \n", " Your browser does not support the audio element.\n", " \n", " " ], "text/plain": [ "" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "#@markdown Pick one of the voices from the output above\n", "voice = 'tom' #@param {type:\"string\"}\n", "\n", "#@markdown Load it and send it through Tortoise.\n", "voice_samples, conditioning_latents = load_voice(voice)\n", "gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents, \n", " preset=preset)\n", "torchaudio.save('generated.wav', gen.squeeze(0).cpu(), 24000)\n", "IPython.display.Audio('generated.wav')" ] }, { "cell_type": "code", "execution_count": null, "id": "04e473e5-c489-4a78-aa11-03e89a778ed8", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.16" } }, "nbformat": 4, "nbformat_minor": 5 }