From 5ae369d6296959db652d29ac33c06218098ce07e Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Tue, 31 Oct 2023 16:56:25 +0200 Subject: [PATCH 01/67] Move FreeVCConfig to TTS.vc.configs (like all other config classes) --- TTS/vc/configs/freevc_config.py | 277 ++++++++++++++++++++++++++++++- TTS/vc/models/freevc.py | 279 +------------------------------- 2 files changed, 278 insertions(+), 278 deletions(-) diff --git a/TTS/vc/configs/freevc_config.py b/TTS/vc/configs/freevc_config.py index 890a2693..207181b3 100644 --- a/TTS/vc/configs/freevc_config.py +++ b/TTS/vc/configs/freevc_config.py @@ -1,5 +1,278 @@ from dataclasses import dataclass, field -from typing import List +from typing import List, Optional + +from coqpit import Coqpit from TTS.vc.configs.shared_configs import BaseVCConfig -from TTS.vc.models.freevc import FreeVCArgs, FreeVCAudioConfig, FreeVCConfig + + +@dataclass +class FreeVCAudioConfig(Coqpit): + """Audio configuration + + Args: + max_wav_value (float): + The maximum value of the waveform. + + input_sample_rate (int): + The sampling rate of the input waveform. + + output_sample_rate (int): + The sampling rate of the output waveform. + + filter_length (int): + The length of the filter. + + hop_length (int): + The hop length. + + win_length (int): + The window length. + + n_mel_channels (int): + The number of mel channels. + + mel_fmin (float): + The minimum frequency of the mel filterbank. + + mel_fmax (Optional[float]): + The maximum frequency of the mel filterbank. + """ + + max_wav_value: float = field(default=32768.0) + input_sample_rate: int = field(default=16000) + output_sample_rate: int = field(default=24000) + filter_length: int = field(default=1280) + hop_length: int = field(default=320) + win_length: int = field(default=1280) + n_mel_channels: int = field(default=80) + mel_fmin: float = field(default=0.0) + mel_fmax: Optional[float] = field(default=None) + + +@dataclass +class FreeVCArgs(Coqpit): + """FreeVC model arguments + + Args: + spec_channels (int): + The number of channels in the spectrogram. + + inter_channels (int): + The number of channels in the intermediate layers. + + hidden_channels (int): + The number of channels in the hidden layers. + + filter_channels (int): + The number of channels in the filter layers. + + n_heads (int): + The number of attention heads. + + n_layers (int): + The number of layers. + + kernel_size (int): + The size of the kernel. + + p_dropout (float): + The dropout probability. + + resblock (str): + The type of residual block. + + resblock_kernel_sizes (List[int]): + The kernel sizes for the residual blocks. + + resblock_dilation_sizes (List[List[int]]): + The dilation sizes for the residual blocks. + + upsample_rates (List[int]): + The upsample rates. + + upsample_initial_channel (int): + The number of channels in the initial upsample layer. + + upsample_kernel_sizes (List[int]): + The kernel sizes for the upsample layers. + + n_layers_q (int): + The number of layers in the quantization network. + + use_spectral_norm (bool): + Whether to use spectral normalization. + + gin_channels (int): + The number of channels in the global conditioning vector. + + ssl_dim (int): + The dimension of the self-supervised learning embedding. + + use_spk (bool): + Whether to use external speaker encoder. + """ + + spec_channels: int = field(default=641) + inter_channels: int = field(default=192) + hidden_channels: int = field(default=192) + filter_channels: int = field(default=768) + n_heads: int = field(default=2) + n_layers: int = field(default=6) + kernel_size: int = field(default=3) + p_dropout: float = field(default=0.1) + resblock: str = field(default="1") + resblock_kernel_sizes: List[int] = field(default_factory=lambda: [3, 7, 11]) + resblock_dilation_sizes: List[List[int]] = field(default_factory=lambda: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]) + upsample_rates: List[int] = field(default_factory=lambda: [10, 8, 2, 2]) + upsample_initial_channel: int = field(default=512) + upsample_kernel_sizes: List[int] = field(default_factory=lambda: [16, 16, 4, 4]) + n_layers_q: int = field(default=3) + use_spectral_norm: bool = field(default=False) + gin_channels: int = field(default=256) + ssl_dim: int = field(default=1024) + use_spk: bool = field(default=False) + num_spks: int = field(default=0) + segment_size: int = field(default=8960) + + +@dataclass +class FreeVCConfig(BaseVCConfig): + """Defines parameters for FreeVC End2End TTS model. + + Args: + model (str): + Model name. Do not change unless you know what you are doing. + + model_args (FreeVCArgs): + Model architecture arguments. Defaults to `FreeVCArgs()`. + + audio (FreeVCAudioConfig): + Audio processing configuration. Defaults to `FreeVCAudioConfig()`. + + grad_clip (List): + Gradient clipping thresholds for each optimizer. Defaults to `[1000.0, 1000.0]`. + + lr_gen (float): + Initial learning rate for the generator. Defaults to 0.0002. + + lr_disc (float): + Initial learning rate for the discriminator. Defaults to 0.0002. + + lr_scheduler_gen (str): + Name of the learning rate scheduler for the generator. One of the `torch.optim.lr_scheduler.*`. Defaults to + `ExponentialLR`. + + lr_scheduler_gen_params (dict): + Parameters for the learning rate scheduler of the generator. Defaults to `{'gamma': 0.999875, "last_epoch":-1}`. + + lr_scheduler_disc (str): + Name of the learning rate scheduler for the discriminator. One of the `torch.optim.lr_scheduler.*`. Defaults to + `ExponentialLR`. + + lr_scheduler_disc_params (dict): + Parameters for the learning rate scheduler of the discriminator. Defaults to `{'gamma': 0.999875, "last_epoch":-1}`. + + scheduler_after_epoch (bool): + If true, step the schedulers after each epoch else after each step. Defaults to `False`. + + optimizer (str): + Name of the optimizer to use with both the generator and the discriminator networks. One of the + `torch.optim.*`. Defaults to `AdamW`. + + kl_loss_alpha (float): + Loss weight for KL loss. Defaults to 1.0. + + disc_loss_alpha (float): + Loss weight for the discriminator loss. Defaults to 1.0. + + gen_loss_alpha (float): + Loss weight for the generator loss. Defaults to 1.0. + + feat_loss_alpha (float): + Loss weight for the feature matching loss. Defaults to 1.0. + + mel_loss_alpha (float): + Loss weight for the mel loss. Defaults to 45.0. + + return_wav (bool): + If true, data loader returns the waveform as well as the other outputs. Do not change. Defaults to `True`. + + compute_linear_spec (bool): + If true, the linear spectrogram is computed and returned alongside the mel output. Do not change. Defaults to `True`. + + use_weighted_sampler (bool): + If true, use weighted sampler with bucketing for balancing samples between datasets used in training. Defaults to `False`. + + weighted_sampler_attrs (dict): + Key retuned by the formatter to be used for weighted sampler. For example `{"root_path": 2.0, "speaker_name": 1.0}` sets sample probabilities + by overweighting `root_path` by 2.0. Defaults to `{}`. + + weighted_sampler_multipliers (dict): + Weight each unique value of a key returned by the formatter for weighted sampling. + For example `{"root_path":{"/raid/datasets/libritts-clean-16khz-bwe-coqui_44khz/LibriTTS/train-clean-100/":1.0, "/raid/datasets/libritts-clean-16khz-bwe-coqui_44khz/LibriTTS/train-clean-360/": 0.5}`. + It will sample instances from `train-clean-100` 2 times more than `train-clean-360`. Defaults to `{}`. + + r (int): + Number of spectrogram frames to be generated at a time. Do not change. Defaults to `1`. + + add_blank (bool): + If true, a blank token is added in between every character. Defaults to `True`. + + test_sentences (List[List]): + List of sentences with speaker and language information to be used for testing. + + language_ids_file (str): + Path to the language ids file. + + use_language_embedding (bool): + If true, language embedding is used. Defaults to `False`. + + Note: + Check :class:`TTS.tts.configs.shared_configs.BaseTTSConfig` for the inherited parameters. + + Example: + + >>> from TTS.vc.configs.freevc_config import FreeVCConfig + >>> config = FreeVCConfig() + """ + + model: str = "freevc" + # model specific params + model_args: FreeVCArgs = field(default_factory=FreeVCArgs) + audio: FreeVCAudioConfig = field(default_factory=FreeVCAudioConfig) + + # optimizer + # TODO with training support + + # loss params + # TODO with training support + + # data loader params + return_wav: bool = True + compute_linear_spec: bool = True + + # sampler params + use_weighted_sampler: bool = False # TODO: move it to the base config + weighted_sampler_attrs: dict = field(default_factory=lambda: {}) + weighted_sampler_multipliers: dict = field(default_factory=lambda: {}) + + # overrides + r: int = 1 # DO NOT CHANGE + add_blank: bool = True + + # multi-speaker settings + # use speaker embedding layer + num_speakers: int = 0 + speakers_file: str = None + speaker_embedding_channels: int = 256 + + # use d-vectors + use_d_vector_file: bool = False + d_vector_file: List[str] = None + d_vector_dim: int = None + + def __post_init__(self): + for key, val in self.model_args.items(): + if hasattr(self, key): + self[key] = val diff --git a/TTS/vc/models/freevc.py b/TTS/vc/models/freevc.py index ae22ad28..fd53a77f 100644 --- a/TTS/vc/models/freevc.py +++ b/TTS/vc/models/freevc.py @@ -1,4 +1,3 @@ -from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple, Union import librosa @@ -13,8 +12,8 @@ from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm import TTS.vc.modules.freevc.commons as commons import TTS.vc.modules.freevc.modules as modules from TTS.tts.utils.speakers import SpeakerManager -from TTS.utils.io import load_fsspec, save_checkpoint -from TTS.vc.configs.shared_configs import BaseVCConfig +from TTS.utils.io import load_fsspec +from TTS.vc.configs.freevc_config import FreeVCConfig from TTS.vc.models.base_vc import BaseVC from TTS.vc.modules.freevc.commons import get_padding, init_weights from TTS.vc.modules.freevc.mel_processing import mel_spectrogram_torch @@ -294,136 +293,6 @@ class SpeakerEncoder(torch.nn.Module): return embed -@dataclass -class FreeVCAudioConfig(Coqpit): - """Audio configuration - - Args: - max_wav_value (float): - The maximum value of the waveform. - - input_sample_rate (int): - The sampling rate of the input waveform. - - output_sample_rate (int): - The sampling rate of the output waveform. - - filter_length (int): - The length of the filter. - - hop_length (int): - The hop length. - - win_length (int): - The window length. - - n_mel_channels (int): - The number of mel channels. - - mel_fmin (float): - The minimum frequency of the mel filterbank. - - mel_fmax (Optional[float]): - The maximum frequency of the mel filterbank. - """ - - max_wav_value: float = field(default=32768.0) - input_sample_rate: int = field(default=16000) - output_sample_rate: int = field(default=24000) - filter_length: int = field(default=1280) - hop_length: int = field(default=320) - win_length: int = field(default=1280) - n_mel_channels: int = field(default=80) - mel_fmin: float = field(default=0.0) - mel_fmax: Optional[float] = field(default=None) - - -@dataclass -class FreeVCArgs(Coqpit): - """FreeVC model arguments - - Args: - spec_channels (int): - The number of channels in the spectrogram. - - inter_channels (int): - The number of channels in the intermediate layers. - - hidden_channels (int): - The number of channels in the hidden layers. - - filter_channels (int): - The number of channels in the filter layers. - - n_heads (int): - The number of attention heads. - - n_layers (int): - The number of layers. - - kernel_size (int): - The size of the kernel. - - p_dropout (float): - The dropout probability. - - resblock (str): - The type of residual block. - - resblock_kernel_sizes (List[int]): - The kernel sizes for the residual blocks. - - resblock_dilation_sizes (List[List[int]]): - The dilation sizes for the residual blocks. - - upsample_rates (List[int]): - The upsample rates. - - upsample_initial_channel (int): - The number of channels in the initial upsample layer. - - upsample_kernel_sizes (List[int]): - The kernel sizes for the upsample layers. - - n_layers_q (int): - The number of layers in the quantization network. - - use_spectral_norm (bool): - Whether to use spectral normalization. - - gin_channels (int): - The number of channels in the global conditioning vector. - - ssl_dim (int): - The dimension of the self-supervised learning embedding. - - use_spk (bool): - Whether to use external speaker encoder. - """ - - spec_channels: int = field(default=641) - inter_channels: int = field(default=192) - hidden_channels: int = field(default=192) - filter_channels: int = field(default=768) - n_heads: int = field(default=2) - n_layers: int = field(default=6) - kernel_size: int = field(default=3) - p_dropout: float = field(default=0.1) - resblock: str = field(default="1") - resblock_kernel_sizes: List[int] = field(default_factory=lambda: [3, 7, 11]) - resblock_dilation_sizes: List[List[int]] = field(default_factory=lambda: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]) - upsample_rates: List[int] = field(default_factory=lambda: [10, 8, 2, 2]) - upsample_initial_channel: int = field(default=512) - upsample_kernel_sizes: List[int] = field(default_factory=lambda: [16, 16, 4, 4]) - n_layers_q: int = field(default=3) - use_spectral_norm: bool = field(default=False) - gin_channels: int = field(default=256) - ssl_dim: int = field(default=1024) - use_spk: bool = field(default=False) - num_spks: int = field(default=0) - segment_size: int = field(default=8960) - - class FreeVC(BaseVC): """ @@ -677,7 +546,7 @@ class FreeVC(BaseVC): ... @staticmethod - def init_from_config(config: "VitsConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): + def init_from_config(config: FreeVCConfig, samples: Union[List[List], List[Dict]] = None, verbose=True): model = FreeVC(config) return model @@ -689,145 +558,3 @@ class FreeVC(BaseVC): def train_step(): ... - - -@dataclass -class FreeVCConfig(BaseVCConfig): - """Defines parameters for FreeVC End2End TTS model. - - Args: - model (str): - Model name. Do not change unless you know what you are doing. - - model_args (FreeVCArgs): - Model architecture arguments. Defaults to `FreeVCArgs()`. - - audio (FreeVCAudioConfig): - Audio processing configuration. Defaults to `FreeVCAudioConfig()`. - - grad_clip (List): - Gradient clipping thresholds for each optimizer. Defaults to `[1000.0, 1000.0]`. - - lr_gen (float): - Initial learning rate for the generator. Defaults to 0.0002. - - lr_disc (float): - Initial learning rate for the discriminator. Defaults to 0.0002. - - lr_scheduler_gen (str): - Name of the learning rate scheduler for the generator. One of the `torch.optim.lr_scheduler.*`. Defaults to - `ExponentialLR`. - - lr_scheduler_gen_params (dict): - Parameters for the learning rate scheduler of the generator. Defaults to `{'gamma': 0.999875, "last_epoch":-1}`. - - lr_scheduler_disc (str): - Name of the learning rate scheduler for the discriminator. One of the `torch.optim.lr_scheduler.*`. Defaults to - `ExponentialLR`. - - lr_scheduler_disc_params (dict): - Parameters for the learning rate scheduler of the discriminator. Defaults to `{'gamma': 0.999875, "last_epoch":-1}`. - - scheduler_after_epoch (bool): - If true, step the schedulers after each epoch else after each step. Defaults to `False`. - - optimizer (str): - Name of the optimizer to use with both the generator and the discriminator networks. One of the - `torch.optim.*`. Defaults to `AdamW`. - - kl_loss_alpha (float): - Loss weight for KL loss. Defaults to 1.0. - - disc_loss_alpha (float): - Loss weight for the discriminator loss. Defaults to 1.0. - - gen_loss_alpha (float): - Loss weight for the generator loss. Defaults to 1.0. - - feat_loss_alpha (float): - Loss weight for the feature matching loss. Defaults to 1.0. - - mel_loss_alpha (float): - Loss weight for the mel loss. Defaults to 45.0. - - return_wav (bool): - If true, data loader returns the waveform as well as the other outputs. Do not change. Defaults to `True`. - - compute_linear_spec (bool): - If true, the linear spectrogram is computed and returned alongside the mel output. Do not change. Defaults to `True`. - - use_weighted_sampler (bool): - If true, use weighted sampler with bucketing for balancing samples between datasets used in training. Defaults to `False`. - - weighted_sampler_attrs (dict): - Key retuned by the formatter to be used for weighted sampler. For example `{"root_path": 2.0, "speaker_name": 1.0}` sets sample probabilities - by overweighting `root_path` by 2.0. Defaults to `{}`. - - weighted_sampler_multipliers (dict): - Weight each unique value of a key returned by the formatter for weighted sampling. - For example `{"root_path":{"/raid/datasets/libritts-clean-16khz-bwe-coqui_44khz/LibriTTS/train-clean-100/":1.0, "/raid/datasets/libritts-clean-16khz-bwe-coqui_44khz/LibriTTS/train-clean-360/": 0.5}`. - It will sample instances from `train-clean-100` 2 times more than `train-clean-360`. Defaults to `{}`. - - r (int): - Number of spectrogram frames to be generated at a time. Do not change. Defaults to `1`. - - add_blank (bool): - If true, a blank token is added in between every character. Defaults to `True`. - - test_sentences (List[List]): - List of sentences with speaker and language information to be used for testing. - - language_ids_file (str): - Path to the language ids file. - - use_language_embedding (bool): - If true, language embedding is used. Defaults to `False`. - - Note: - Check :class:`TTS.tts.configs.shared_configs.BaseTTSConfig` for the inherited parameters. - - Example: - - >>> from TTS.tts.configs.freevc_config import FreeVCConfig - >>> config = FreeVCConfig() - """ - - model: str = "freevc" - # model specific params - model_args: FreeVCArgs = field(default_factory=FreeVCArgs) - audio: FreeVCAudioConfig = field(default_factory=FreeVCAudioConfig) - - # optimizer - # TODO with training support - - # loss params - # TODO with training support - - # data loader params - return_wav: bool = True - compute_linear_spec: bool = True - - # sampler params - use_weighted_sampler: bool = False # TODO: move it to the base config - weighted_sampler_attrs: dict = field(default_factory=lambda: {}) - weighted_sampler_multipliers: dict = field(default_factory=lambda: {}) - - # overrides - r: int = 1 # DO NOT CHANGE - add_blank: bool = True - - # multi-speaker settings - # use speaker embedding layer - num_speakers: int = 0 - speakers_file: str = None - speaker_embedding_channels: int = 256 - - # use d-vectors - use_d_vector_file: bool = False - d_vector_file: List[str] = None - d_vector_dim: int = None - - def __post_init__(self): - for key, val in self.model_args.items(): - if hasattr(self, key): - self[key] = val From 38f6f8f0bb456088c7c41a1b975e3d22dd27a4c8 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 6 Nov 2023 12:36:37 +0200 Subject: [PATCH 02/67] Run `make style` & re-enable it in CI (#3127) --- .github/workflows/style_check.yml | 5 +- TTS/api.py | 6 +- TTS/bin/synthesize.py | 4 +- TTS/tts/layers/xtts/tokenizer.py | 132 ++++++++++++--------- TTS/tts/layers/xtts/zh_num2words.py | 9 +- TTS/tts/models/xtts.py | 58 ++++++--- TTS/utils/audio/numpy_transforms.py | 2 +- TTS/utils/audio/processor.py | 2 +- TTS/utils/synthesizer.py | 2 +- recipes/ljspeech/xtts_v1/train_gpt_xtts.py | 11 +- tests/api_tests/test_synthesize_api.py | 5 +- 11 files changed, 141 insertions(+), 95 deletions(-) diff --git a/.github/workflows/style_check.yml b/.github/workflows/style_check.yml index c167f7ca..b7c6393b 100644 --- a/.github/workflows/style_check.yml +++ b/.github/workflows/style_check.yml @@ -42,6 +42,5 @@ jobs: run: | python3 -m pip install .[all] python3 setup.py egg_info - # - name: Lint check - # run: | - # make lint \ No newline at end of file + - name: Style check + run: make style diff --git a/TTS/api.py b/TTS/api.py index 39f53f57..5d1fbb5a 100644 --- a/TTS/api.py +++ b/TTS/api.py @@ -264,7 +264,7 @@ class TTS(nn.Module): language: str = None, emotion: str = None, speed: float = 1.0, - pipe_out = None, + pipe_out=None, file_path: str = None, ) -> Union[np.ndarray, str]: """Convert text to speech using Coqui Studio models. Use `CS_API` class if you are only interested in the API. @@ -359,7 +359,7 @@ class TTS(nn.Module): speaker_wav: str = None, emotion: str = None, speed: float = 1.0, - pipe_out = None, + pipe_out=None, file_path: str = "output.wav", **kwargs, ): @@ -460,7 +460,7 @@ class TTS(nn.Module): """ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: # Lazy code... save it to a temp file to resample it while reading it for VC - self.tts_to_file(text=text, speaker=None, language=language, file_path=fp.name,speaker_wav=speaker_wav) + self.tts_to_file(text=text, speaker=None, language=language, file_path=fp.name, speaker_wav=speaker_wav) if self.voice_converter is None: self.load_vc_model_by_name("voice_conversion_models/multilingual/vctk/freevc24") wav = self.voice_converter.voice_conversion(source_wav=fp.name, target_wav=speaker_wav) diff --git a/TTS/bin/synthesize.py b/TTS/bin/synthesize.py index 78a20c25..ef41c8e1 100755 --- a/TTS/bin/synthesize.py +++ b/TTS/bin/synthesize.py @@ -427,7 +427,9 @@ def main(): tts_path = model_path tts_config_path = config_path if "default_vocoder" in model_item: - args.vocoder_name = model_item["default_vocoder"] if args.vocoder_name is None else args.vocoder_name + args.vocoder_name = ( + model_item["default_vocoder"] if args.vocoder_name is None else args.vocoder_name + ) # voice conversion model if model_item["model_type"] == "voice_conversion_models": diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index c25d4296..456f8081 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -1,12 +1,12 @@ +import json import os import re -import json - -import torch -from tokenizers import Tokenizer import pypinyin +import torch from num2words import num2words +from tokenizers import Tokenizer + from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words _whitespace_re = re.compile(r"\s+") @@ -87,7 +87,7 @@ _abbreviations = { "it": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ - #("sig.ra", "signora"), + # ("sig.ra", "signora"), ("sig", "signore"), ("dr", "dottore"), ("st", "santo"), @@ -121,49 +121,51 @@ _abbreviations = { "cs": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ - ("dr", "doktor"), # doctor - ("ing", "inženýr"), # engineer - ("p", "pan"), # Could also map to pani for woman but no easy way to do it + ("dr", "doktor"), # doctor + ("ing", "inženýr"), # engineer + ("p", "pan"), # Could also map to pani for woman but no easy way to do it # Other abbreviations would be specialized and not as common. ] ], "ru": [ (re.compile("\\b%s\\b" % x[0], re.IGNORECASE), x[1]) for x in [ - ("г-жа", "госпожа"), # Mrs. - ("г-н", "господин"), # Mr. - ("д-р", "доктор"), # doctor + ("г-жа", "госпожа"), # Mrs. + ("г-н", "господин"), # Mr. + ("д-р", "доктор"), # doctor # Other abbreviations are less common or specialized. ] ], "nl": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ - ("dhr", "de heer"), # Mr. + ("dhr", "de heer"), # Mr. ("mevr", "mevrouw"), # Mrs. - ("dr", "dokter"), # doctor - ("jhr", "jonkheer"), # young lord or nobleman + ("dr", "dokter"), # doctor + ("jhr", "jonkheer"), # young lord or nobleman # Dutch uses more abbreviations, but these are the most common ones. ] ], "tr": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ - ("b", "bay"), # Mr. + ("b", "bay"), # Mr. ("byk", "büyük"), # büyük - ("dr", "doktor"), # doctor + ("dr", "doktor"), # doctor # Add other Turkish abbreviations here if needed. ] ], } -def expand_abbreviations_multilingual(text, lang='en'): + +def expand_abbreviations_multilingual(text, lang="en"): for regex, replacement in _abbreviations[lang]: text = re.sub(regex, replacement, text) return text + _symbols_multilingual = { - 'en': [ + "en": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " and "), @@ -172,10 +174,10 @@ _symbols_multilingual = { ("#", " hash "), ("$", " dollar "), ("£", " pound "), - ("°", " degree ") + ("°", " degree "), ] ], - 'es': [ + "es": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " y "), @@ -184,10 +186,10 @@ _symbols_multilingual = { ("#", " numeral "), ("$", " dolar "), ("£", " libra "), - ("°", " grados ") + ("°", " grados "), ] ], - 'fr': [ + "fr": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " et "), @@ -196,10 +198,10 @@ _symbols_multilingual = { ("#", " dièse "), ("$", " dollar "), ("£", " livre "), - ("°", " degrés ") + ("°", " degrés "), ] ], - 'de': [ + "de": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " und "), @@ -208,10 +210,10 @@ _symbols_multilingual = { ("#", " raute "), ("$", " dollar "), ("£", " pfund "), - ("°", " grad ") + ("°", " grad "), ] ], - 'pt': [ + "pt": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " e "), @@ -220,10 +222,10 @@ _symbols_multilingual = { ("#", " cardinal "), ("$", " dólar "), ("£", " libra "), - ("°", " graus ") + ("°", " graus "), ] ], - 'it': [ + "it": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " e "), @@ -232,10 +234,10 @@ _symbols_multilingual = { ("#", " cancelletto "), ("$", " dollaro "), ("£", " sterlina "), - ("°", " gradi ") + ("°", " gradi "), ] ], - 'pl': [ + "pl": [ (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ ("&", " i "), @@ -244,7 +246,7 @@ _symbols_multilingual = { ("#", " krzyżyk "), ("$", " dolar "), ("£", " funt "), - ("°", " stopnie ") + ("°", " stopnie "), ] ], "ar": [ @@ -257,7 +259,7 @@ _symbols_multilingual = { ("#", " رقم "), ("$", " دولار "), ("£", " جنيه "), - ("°", " درجة ") + ("°", " درجة "), ] ], "zh-cn": [ @@ -270,7 +272,7 @@ _symbols_multilingual = { ("#", " 号 "), ("$", " 美元 "), ("£", " 英镑 "), - ("°", " 度 ") + ("°", " 度 "), ] ], "cs": [ @@ -283,7 +285,7 @@ _symbols_multilingual = { ("#", " křížek "), ("$", " dolar "), ("£", " libra "), - ("°", " stupně ") + ("°", " stupně "), ] ], "ru": [ @@ -296,7 +298,7 @@ _symbols_multilingual = { ("#", " номер "), ("$", " доллар "), ("£", " фунт "), - ("°", " градус ") + ("°", " градус "), ] ], "nl": [ @@ -309,7 +311,7 @@ _symbols_multilingual = { ("#", " hekje "), ("$", " dollar "), ("£", " pond "), - ("°", " graden ") + ("°", " graden "), ] ], "tr": [ @@ -321,15 +323,16 @@ _symbols_multilingual = { ("#", " diyez "), ("$", " dolar "), ("£", " sterlin "), - ("°", " derece ") + ("°", " derece "), ] ], } -def expand_symbols_multilingual(text, lang='en'): + +def expand_symbols_multilingual(text, lang="en"): for regex, replacement in _symbols_multilingual[lang]: text = re.sub(regex, replacement, text) - text = text.replace(' ', ' ') # Ensure there are no double spaces + text = text.replace(" ", " ") # Ensure there are no double spaces return text.strip() @@ -342,41 +345,45 @@ _ordinal_re = { "it": re.compile(r"([0-9]+)(º|°|ª|o|a|i|e)"), "pl": re.compile(r"([0-9]+)(º|ª|st|nd|rd|th)"), "ar": re.compile(r"([0-9]+)(ون|ين|ث|ر|ى)"), - "cs": re.compile(r"([0-9]+)\.(?=\s|$)"), # In Czech, a dot is often used after the number to indicate ordinals. + "cs": re.compile(r"([0-9]+)\.(?=\s|$)"), # In Czech, a dot is often used after the number to indicate ordinals. "ru": re.compile(r"([0-9]+)(-й|-я|-е|-ое|-ье|-го)"), "nl": re.compile(r"([0-9]+)(de|ste|e)"), "tr": re.compile(r"([0-9]+)(\.|inci|nci|uncu|üncü|\.)"), } _number_re = re.compile(r"[0-9]+") _currency_re = { - 'USD': re.compile(r"((\$[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+\$))"), - 'GBP': re.compile(r"((£[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+£))"), - 'EUR': re.compile(r"(([0-9\.\,]*[0-9]+€)|((€[0-9\.\,]*[0-9]+)))") + "USD": re.compile(r"((\$[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+\$))"), + "GBP": re.compile(r"((£[0-9\.\,]*[0-9]+)|([0-9\.\,]*[0-9]+£))"), + "EUR": re.compile(r"(([0-9\.\,]*[0-9]+€)|((€[0-9\.\,]*[0-9]+)))"), } _comma_number_re = re.compile(r"\b\d{1,3}(,\d{3})*(\.\d+)?\b") _dot_number_re = re.compile(r"\b\d{1,3}(.\d{3})*(\,\d+)?\b") _decimal_number_re = re.compile(r"([0-9]+[.,][0-9]+)") + def _remove_commas(m): text = m.group(0) if "," in text: text = text.replace(",", "") return text + def _remove_dots(m): text = m.group(0) if "." in text: text = text.replace(".", "") return text -def _expand_decimal_point(m, lang='en'): + +def _expand_decimal_point(m, lang="en"): amount = m.group(1).replace(",", ".") return num2words(float(amount), lang=lang if lang != "cs" else "cz") -def _expand_currency(m, lang='en', currency='USD'): - amount = float((re.sub(r'[^\d.]', '', m.group(0).replace(",", ".")))) - full_amount = num2words(amount, to='currency', currency=currency, lang=lang if lang != "cs" else "cz") + +def _expand_currency(m, lang="en", currency="USD"): + amount = float((re.sub(r"[^\d.]", "", m.group(0).replace(",", ".")))) + full_amount = num2words(amount, to="currency", currency=currency, lang=lang if lang != "cs" else "cz") and_equivalents = { "en": ", ", @@ -400,13 +407,16 @@ def _expand_currency(m, lang='en', currency='USD'): return full_amount -def _expand_ordinal(m, lang='en'): + +def _expand_ordinal(m, lang="en"): return num2words(int(m.group(1)), ordinal=True, lang=lang if lang != "cs" else "cz") -def _expand_number(m, lang='en'): + +def _expand_number(m, lang="en"): return num2words(int(m.group(0)), lang=lang if lang != "cs" else "cz") -def expand_numbers_multilingual(text, lang='en'): + +def expand_numbers_multilingual(text, lang="en"): if lang == "zh-cn": text = zh_num2words()(text) else: @@ -415,9 +425,9 @@ def expand_numbers_multilingual(text, lang='en'): else: text = re.sub(_dot_number_re, _remove_dots, text) try: - text = re.sub(_currency_re['GBP'], lambda m: _expand_currency(m, lang, 'GBP'), text) - text = re.sub(_currency_re['USD'], lambda m: _expand_currency(m, lang, 'USD'), text) - text = re.sub(_currency_re['EUR'], lambda m: _expand_currency(m, lang, 'EUR'), text) + text = re.sub(_currency_re["GBP"], lambda m: _expand_currency(m, lang, "GBP"), text) + text = re.sub(_currency_re["USD"], lambda m: _expand_currency(m, lang, "USD"), text) + text = re.sub(_currency_re["EUR"], lambda m: _expand_currency(m, lang, "EUR"), text) except: pass if lang != "tr": @@ -426,15 +436,18 @@ def expand_numbers_multilingual(text, lang='en'): text = re.sub(_number_re, lambda m: _expand_number(m, lang), text) return text + def lowercase(text): return text.lower() + def collapse_whitespace(text): return re.sub(_whitespace_re, " ", text) + def multilingual_cleaners(text, lang): - text = text.replace('"', '') - if lang=="tr": + text = text.replace('"', "") + if lang == "tr": text = text.replace("İ", "i") text = text.replace("Ö", "ö") text = text.replace("Ü", "ü") @@ -445,20 +458,26 @@ def multilingual_cleaners(text, lang): text = collapse_whitespace(text) return text + def basic_cleaners(text): """Basic pipeline that lowercases and collapses whitespace without transliteration.""" text = lowercase(text) text = collapse_whitespace(text) return text + def chinese_transliterate(text): - return "".join([p[0] for p in pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True)]) + return "".join( + p[0] for p in pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True) + ) + def japanese_cleaners(text, katsu): text = katsu.romaji(text) text = lowercase(text) return text + class VoiceBpeTokenizer: def __init__(self, vocab_file=None, preprocess=None): self.tokenizer = None @@ -485,6 +504,7 @@ class VoiceBpeTokenizer: elif lang == "ja": if self.katsu is None: import cutlet + self.katsu = cutlet.Cutlet() txt = japanese_cleaners(txt, self.katsu) else: diff --git a/TTS/tts/layers/xtts/zh_num2words.py b/TTS/tts/layers/xtts/zh_num2words.py index d5117474..2c56e3bb 100644 --- a/TTS/tts/layers/xtts/zh_num2words.py +++ b/TTS/tts/layers/xtts/zh_num2words.py @@ -2,9 +2,14 @@ # 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git) # 2019.9 - 2022 Jiayu DU -import sys, os, argparse -import string, re +import argparse import csv +import os +import re +import string +import sys + +# fmt: off # ================================================================================ # # basic constant diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 60af2d1e..c0532b36 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -2,10 +2,10 @@ import os from contextlib import contextmanager from dataclasses import dataclass +import librosa import torch import torch.nn.functional as F import torchaudio -import librosa from coqpit import Coqpit from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, wav_to_univnet_mel @@ -386,10 +386,12 @@ class Xtts(BaseTTS): @torch.inference_mode() def get_speaker_embedding(self, audio, sr): audio_16k = torchaudio.functional.resample(audio, sr, 16000) - return self.hifigan_decoder.speaker_encoder.forward( - audio_16k.to(self.device), l2_norm=True - ).unsqueeze(-1).to(self.device) - + return ( + self.hifigan_decoder.speaker_encoder.forward(audio_16k.to(self.device), l2_norm=True) + .unsqueeze(-1) + .to(self.device) + ) + @torch.inference_mode() def get_conditioning_latents( self, @@ -398,7 +400,7 @@ class Xtts(BaseTTS): max_ref_length=10, librosa_trim_db=None, sound_norm_refs=False, - ): + ): speaker_embedding = None diffusion_cond_latents = None @@ -647,13 +649,19 @@ class Xtts(BaseTTS): break if decoder == "hifigan": - assert hasattr(self, "hifigan_decoder"), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" + assert hasattr( + self, "hifigan_decoder" + ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) elif decoder == "ne_hifigan": - assert hasattr(self, "ne_hifigan_decoder"), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" + assert hasattr( + self, "ne_hifigan_decoder" + ), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" wav = self.ne_hifigan_decoder(gpt_latents, g=speaker_embedding) else: - assert hasattr(self, "diffusion_decoder"), "You must disable hifigan decoders to use difffusion by setting config `use_ne_hifigan: false` and `use_hifigan: false`" + assert hasattr( + self, "diffusion_decoder" + ), "You must disable hifigan decoders to use difffusion by setting config `use_ne_hifigan: false` and `use_hifigan: false`" mel = do_spectrogram_diffusion( self.diffusion_decoder, diffuser, @@ -742,10 +750,14 @@ class Xtts(BaseTTS): if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): gpt_latents = torch.cat(all_latents, dim=0)[None, :] if decoder == "hifigan": - assert hasattr(self, "hifigan_decoder"), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" + assert hasattr( + self, "hifigan_decoder" + ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) elif decoder == "ne_hifigan": - assert hasattr(self, "ne_hifigan_decoder"), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" + assert hasattr( + self, "ne_hifigan_decoder" + ), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" wav_gen = self.ne_hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) else: raise NotImplementedError("Diffusion for streaming inference not implemented.") @@ -756,10 +768,14 @@ class Xtts(BaseTTS): yield wav_chunk def forward(self): - raise NotImplementedError("XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training") + raise NotImplementedError( + "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" + ) def eval_step(self): - raise NotImplementedError("XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training") + raise NotImplementedError( + "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" + ) @staticmethod def init_from_config(config: "XttsConfig", **kwargs): # pylint: disable=unused-argument @@ -835,12 +851,18 @@ class Xtts(BaseTTS): self.load_state_dict(checkpoint, strict=strict) if eval: - if hasattr(self, "hifigan_decoder"): self.hifigan_decoder.eval() - if hasattr(self, "ne_hifigan_decoder"): self.hifigan_decoder.eval() - if hasattr(self, "diffusion_decoder"): self.diffusion_decoder.eval() - if hasattr(self, "vocoder"): self.vocoder.eval() + if hasattr(self, "hifigan_decoder"): + self.hifigan_decoder.eval() + if hasattr(self, "ne_hifigan_decoder"): + self.hifigan_decoder.eval() + if hasattr(self, "diffusion_decoder"): + self.diffusion_decoder.eval() + if hasattr(self, "vocoder"): + self.vocoder.eval() self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache, use_deepspeed=use_deepspeed) self.gpt.eval() def train_step(self): - raise NotImplementedError("XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training") + raise NotImplementedError( + "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" + ) diff --git a/TTS/utils/audio/numpy_transforms.py b/TTS/utils/audio/numpy_transforms.py index e2b71fb2..b701e767 100644 --- a/TTS/utils/audio/numpy_transforms.py +++ b/TTS/utils/audio/numpy_transforms.py @@ -428,7 +428,7 @@ def load_wav(*, filename: str, sample_rate: int = None, resample: bool = False, return x -def save_wav(*, wav: np.ndarray, path: str, sample_rate: int = None, pipe_out = None, **kwargs) -> None: +def save_wav(*, wav: np.ndarray, path: str, sample_rate: int = None, pipe_out=None, **kwargs) -> None: """Save float waveform to a file using Scipy. Args: diff --git a/TTS/utils/audio/processor.py b/TTS/utils/audio/processor.py index 248e15b8..4ceb7da4 100644 --- a/TTS/utils/audio/processor.py +++ b/TTS/utils/audio/processor.py @@ -694,7 +694,7 @@ class AudioProcessor(object): x = self.rms_volume_norm(x, self.db_level) return x - def save_wav(self, wav: np.ndarray, path: str, sr: int = None, pipe_out = None) -> None: + def save_wav(self, wav: np.ndarray, path: str, sr: int = None, pipe_out=None) -> None: """Save a waveform to a file using Scipy. Args: diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py index a7370cd2..8efe608b 100644 --- a/TTS/utils/synthesizer.py +++ b/TTS/utils/synthesizer.py @@ -235,7 +235,7 @@ class Synthesizer(nn.Module): """ return self.seg.segment(text) - def save_wav(self, wav: List[int], path: str, pipe_out = None) -> None: + def save_wav(self, wav: List[int], path: str, pipe_out=None) -> None: """Save the waveform as a file. Args: diff --git a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py index 94f3975c..9134be0d 100644 --- a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py @@ -7,7 +7,6 @@ from TTS.tts.datasets import load_tts_samples from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig from TTS.utils.manage import ModelManager - # Logging parameters RUN_NAME = "GPT_XTTS_LJSpeech_FT" PROJECT_NAME = "XTTS_trainer" @@ -60,13 +59,15 @@ TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/vo XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/model.pth" # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. -TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file -XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, XTTS_CHECKPOINT_LINK.split("/")[-1]) # model.pth file +TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file +XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, XTTS_CHECKPOINT_LINK.split("/")[-1]) # model.pth file # download XTTS v1.1 files if needed if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT): print(" > Downloading XTTS v1.1 files!") - ModelManager._download_model_files([TOKENIZER_FILE_LINK, XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True) + ModelManager._download_model_files( + [TOKENIZER_FILE_LINK, XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True + ) # Training sentences generations @@ -93,7 +94,7 @@ def main(): gpt_num_audio_tokens=8194, gpt_start_audio_token=8192, gpt_stop_audio_token=8193, - use_ne_hifigan=True, # if it is true it will keep the non-enhanced keys on the output checkpoint + use_ne_hifigan=True, # if it is true it will keep the non-enhanced keys on the output checkpoint ) # define audio config audio_config = XttsAudioConfig( diff --git a/tests/api_tests/test_synthesize_api.py b/tests/api_tests/test_synthesize_api.py index 084f81d4..e7b4f120 100644 --- a/tests/api_tests/test_synthesize_api.py +++ b/tests/api_tests/test_synthesize_api.py @@ -22,7 +22,4 @@ def test_synthesize(): ) # test pipe_out command - run_cli( - 'tts --text "test." --pipe_out ' - f'--out_path "{output_path}" | aplay' - ) + run_cli(f'tts --text "test." --pipe_out --out_path "{output_path}" | aplay') From e45227d9ff77a98cddf38bd6238a592393abaab8 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 10:58:18 -0300 Subject: [PATCH 03/67] XTTS v2.0 (#3137) * Implement most similar ref training approach * Use non-enhanced hifigan for test samples * Add Perceiver * Update GPT Trainer for perceiver support * Update XTTS docs * Bug fix masking with XTTS perceiver * Bug fix on gpt forward * Bug Fix on XTTS v2.0 training * Add XTTS v2.0 unit tests * Add XTTS v2.0 inference unit tests * Bug Fix on diffusion inference * Add XTTS v2.0 training recipe * Placeholder model entry * Add cloning params to config * Make prompt embedding configurable * Make cloning configurable * Cheap fix for a cheaper fix * Prevent resampling * Update model entry * Update docs * Update requirements * Code linting * Add xtts v2 to sep tests * Bug fix on XTTS get_gpt_cond_latents * Bug fix on rebase * Make style * Bug fix in Japenese tokenizer * Add num2words to deps * Remove unused kwarg and added num_beams=1 as default --------- Co-authored-by: Eren G??lge --- .gitignore | 1 + TTS/.models.json | 14 + TTS/tts/configs/xtts_config.py | 34 +- TTS/tts/layers/tortoise/dpm_solver.py | 21 +- TTS/tts/layers/xtts/gpt.py | 65 +- TTS/tts/layers/xtts/perceiver_encoder.py | 319 ++++++ TTS/tts/layers/xtts/tokenizer.py | 285 ++++- TTS/tts/layers/xtts/trainer/dataset.py | 38 +- TTS/tts/layers/xtts/trainer/gpt_trainer.py | 59 +- TTS/tts/layers/xtts/zh_num2words.py | 1033 +++++++++--------- TTS/tts/models/base_tacotron.py | 7 +- TTS/tts/models/tortoise.py | 7 +- TTS/tts/models/xtts.py | 122 ++- TTS/utils/manage.py | 4 +- docs/source/models/xtts.md | 29 +- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 180 +++ requirements.txt | 3 + tests/xtts_tests/test_xtts_gpt_train.py | 1 + tests/xtts_tests/test_xtts_v2-0_gpt_train.py | 162 +++ tests/zoo_tests/test_models.py | 53 + 20 files changed, 1779 insertions(+), 658 deletions(-) create mode 100644 TTS/tts/layers/xtts/perceiver_encoder.py create mode 100644 recipes/ljspeech/xtts_v2/train_gpt_xtts.py create mode 100644 tests/xtts_tests/test_xtts_v2-0_gpt_train.py diff --git a/.gitignore b/.gitignore index 563040e8..22ec6e41 100644 --- a/.gitignore +++ b/.gitignore @@ -169,3 +169,4 @@ wandb depot/* coqui_recipes/* local_scripts/* +coqui_demos/* \ No newline at end of file diff --git a/TTS/.models.json b/TTS/.models.json index 0c318740..b33e4fd3 100644 --- a/TTS/.models.json +++ b/TTS/.models.json @@ -2,6 +2,20 @@ "tts_models": { "multilingual": { "multi-dataset": { + "xtts_v2": { + "description": "XTTS-v2 by Coqui with 16 languages.", + "hf_url": [ + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/hash.md5" + ], + "default_vocoder": null, + "commit": "480a6cdf7", + "license": "CPML", + "contact": "info@coqui.ai", + "tos_required": true + }, "xtts_v1": { "description": "XTTS-v1 by Coqui with 13 languages and cross-language voice cloning.", "hf_url": [ diff --git a/TTS/tts/configs/xtts_config.py b/TTS/tts/configs/xtts_config.py index 4e5031ba..1865a3fd 100644 --- a/TTS/tts/configs/xtts_config.py +++ b/TTS/tts/configs/xtts_config.py @@ -59,6 +59,16 @@ class XttsConfig(BaseTTSConfig): decoder_sampler (str): Diffusion sampler to be used. `ddim` or `dpm++2m`. Defaults to `ddim`. + + gpt_cond_len (int): + Secs audio to be used as conditioning for the autoregressive model. Defaults to `3`. + + max_ref_len (int): + Maximum number of seconds of audio to be used as conditioning for the decoder. Defaults to `10`. + + sound_norm_refs (bool): + Whether to normalize the conditioning audio. Defaults to `False`. + Note: Check :class:`TTS.tts.configs.shared_configs.BaseTTSConfig` for the inherited parameters. @@ -74,7 +84,24 @@ class XttsConfig(BaseTTSConfig): audio: XttsAudioConfig = field(default_factory=XttsAudioConfig) model_dir: str = None languages: List[str] = field( - default_factory=lambda: ["en", "es", "fr", "de", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn"] + default_factory=lambda: [ + "en", + "es", + "fr", + "de", + "it", + "pt", + "pl", + "tr", + "ru", + "nl", + "cs", + "ar", + "zh-cn", + "hu", + "ko", + "ja", + ] ) # inference params @@ -88,3 +115,8 @@ class XttsConfig(BaseTTSConfig): num_gpt_outputs: int = 1 decoder_iterations: int = 30 decoder_sampler: str = "ddim" + + # cloning + gpt_cond_len: int = 3 + max_ref_len: int = 10 + sound_norm_refs: bool = False diff --git a/TTS/tts/layers/tortoise/dpm_solver.py b/TTS/tts/layers/tortoise/dpm_solver.py index c70888df..cb540577 100644 --- a/TTS/tts/layers/tortoise/dpm_solver.py +++ b/TTS/tts/layers/tortoise/dpm_solver.py @@ -562,21 +562,15 @@ class DPM_Solver: if order == 3: K = steps // 3 + 1 if steps % 3 == 0: - orders = [ - 3, - ] * ( + orders = [3,] * ( K - 2 ) + [2, 1] elif steps % 3 == 1: - orders = [ - 3, - ] * ( + orders = [3,] * ( K - 1 ) + [1] else: - orders = [ - 3, - ] * ( + orders = [3,] * ( K - 1 ) + [2] elif order == 2: @@ -587,9 +581,7 @@ class DPM_Solver: ] * K else: K = steps // 2 + 1 - orders = [ - 2, - ] * ( + orders = [2,] * ( K - 1 ) + [1] elif order == 1: @@ -1448,10 +1440,7 @@ class DPM_Solver: model_prev_list[-1] = self.model_fn(x, t) elif method in ["singlestep", "singlestep_fixed"]: if method == "singlestep": - ( - timesteps_outer, - orders, - ) = self.get_orders_and_timesteps_for_singlestep_solver( + (timesteps_outer, orders,) = self.get_orders_and_timesteps_for_singlestep_solver( steps=steps, order=order, skip_type=skip_type, diff --git a/TTS/tts/layers/xtts/gpt.py b/TTS/tts/layers/xtts/gpt.py index dfd7774e..683104d8 100644 --- a/TTS/tts/layers/xtts/gpt.py +++ b/TTS/tts/layers/xtts/gpt.py @@ -11,6 +11,7 @@ from transformers import GPT2Config from TTS.tts.layers.xtts.gpt_inference import GPT2InferenceModel from TTS.tts.layers.xtts.latent_encoder import ConditioningEncoder +from TTS.tts.layers.xtts.perceiver_encoder import PerceiverResampler def null_position_embeddings(range, dim): @@ -105,6 +106,8 @@ class GPT(nn.Module): checkpointing=False, average_conditioning_embeddings=False, label_smoothing=0.0, + use_perceiver_resampler=False, + perceiver_cond_length_compression=256, ): """ Args: @@ -132,13 +135,12 @@ class GPT(nn.Module): self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads) self.conditioning_dropout = nn.Dropout1d(0.1) self.average_conditioning_embeddings = average_conditioning_embeddings + self.use_perceiver_resampler = use_perceiver_resampler + self.perceiver_cond_length_compression = perceiver_cond_length_compression self.text_embedding = nn.Embedding(self.number_text_tokens, model_dim) self.mel_embedding = nn.Embedding(self.num_audio_tokens, model_dim) - self.prompt_embedding = nn.Embedding(self.num_audio_tokens, model_dim) - self.prompt_pos_embedding = LearnedPositionEmbeddings(24 * 9, model_dim) - ( self.gpt, self.mel_pos_embedding, @@ -165,9 +167,29 @@ class GPT(nn.Module): self.text_head = nn.Linear(model_dim, self.number_text_tokens) self.mel_head = nn.Linear(model_dim, self.num_audio_tokens) + if self.use_perceiver_resampler: + # XTTS v2 + self.conditioning_perceiver = PerceiverResampler( + dim=model_dim, + depth=2, + dim_context=model_dim, + num_latents=32, + dim_head=64, + heads=8, + ff_mult=4, + use_flash_attn=False, + ) + else: + # XTTS v1 + self.prompt_embedding = nn.Embedding(self.num_audio_tokens, model_dim) + self.prompt_pos_embedding = LearnedPositionEmbeddings(24 * 9, model_dim) + def get_grad_norm_parameter_groups(self): return { "conditioning_encoder": list(self.conditioning_encoder.parameters()), + "conditioning_perceiver": list(self.conditioning_perceiver.parameters()) + if self.use_perceiver_resampler + else None, "gpt": list(self.gpt.parameters()), "heads": list(self.text_head.parameters()) + list(self.mel_head.parameters()), } @@ -250,11 +272,8 @@ class GPT(nn.Module): if attn_mask_text is not None: attn_mask = torch.cat([attn_mask_text, attn_mask_mel], dim=1) if prompt is not None: - if attn_mask_cond is not None: - attn_mask = torch.cat([attn_mask_cond, attn_mask], dim=1) - else: - attn_mask_cond = torch.ones(prompt.shape[0], offset, dtype=torch.bool, device=emb.device) - attn_mask = torch.cat([attn_mask_cond, attn_mask], dim=1) + attn_mask_cond = torch.ones(prompt.shape[0], offset, dtype=torch.bool, device=emb.device) + attn_mask = torch.cat([attn_mask_cond, attn_mask], dim=1) gpt_out = self.gpt( inputs_embeds=emb, @@ -318,7 +337,6 @@ class GPT(nn.Module): prompt_len = 3 prompt_len = prompt_len * 24 # in frames if prompt_codes.shape[-1] >= prompt_len: - new_prompt = [] for i in range(prompt_codes.shape[0]): if lengths[i] < prompt_len: start = 0 @@ -340,7 +358,9 @@ class GPT(nn.Module): if not return_latent: if cond_input.ndim == 4: cond_input = cond_input.squeeze(1) - conds = self.conditioning_encoder(cond_input) + conds = self.conditioning_encoder(cond_input) # (b, d, s) + if self.use_perceiver_resampler: + conds = self.conditioning_perceiver(conds.permute(0, 2, 1)).transpose(1, 2) # (b, d, 32) else: # already computed conds = cond_input.unsqueeze(1) @@ -354,6 +374,7 @@ class GPT(nn.Module): wav_lengths, cond_mels=None, cond_idxs=None, + cond_lens=None, cond_latents=None, return_attentions=False, return_latent=False, @@ -379,10 +400,24 @@ class GPT(nn.Module): max_text_len = text_lengths.max() code_lengths = torch.ceil(wav_lengths / self.code_stride_len).long() + 3 + if cond_lens is not None: + if self.use_perceiver_resampler: + cond_lens = cond_lens // self.perceiver_cond_length_compression + else: + cond_lens = cond_lens // self.code_stride_len + if cond_idxs is not None: # recompute cond idxs for mel lengths - for idx, l in enumerate(code_lengths): - cond_idxs[idx] = cond_idxs[idx] / self.code_stride_len + for idx in range(cond_idxs.size(0)): + if self.use_perceiver_resampler: + cond_idxs[idx] = cond_idxs[idx] // self.perceiver_cond_length_compression + else: + cond_idxs[idx] = cond_idxs[idx] // self.code_stride_len + + # ensure that the cond_mel does not have padding + # if cond_lens is not None and cond_idxs is None: + # min_cond_len = torch.min(cond_lens) + # cond_mels = cond_mels[:, :, :, :min_cond_len] # If len(codes) + 3 is larger than maxiumum allowed length, we truncate the codes. max_mel_len = code_lengths.max() @@ -450,9 +485,13 @@ class GPT(nn.Module): ) if cond_idxs is not None: + # use masking approach for idx, r in enumerate(cond_idxs): l = r[1] - r[0] attn_mask_cond[idx, l:] = 0.0 + elif cond_lens is not None: + for idx, l in enumerate(cond_lens): + attn_mask_cond[idx, l:] = 0.0 for idx, l in enumerate(text_lengths): attn_mask_text[idx, l + 1 :] = 0.0 @@ -523,7 +562,7 @@ class GPT(nn.Module): def inference(self, cond_latents, text_inputs, **hf_generate_kwargs): self.compute_embeddings(cond_latents, text_inputs) - return self.generate(cond_latents, text_inputs, input_tokens=None, **hf_generate_kwargs) + return self.generate(cond_latents, text_inputs, **hf_generate_kwargs) def compute_embeddings( self, diff --git a/TTS/tts/layers/xtts/perceiver_encoder.py b/TTS/tts/layers/xtts/perceiver_encoder.py new file mode 100644 index 00000000..7b7ee79b --- /dev/null +++ b/TTS/tts/layers/xtts/perceiver_encoder.py @@ -0,0 +1,319 @@ +# Adapted from https://github.com/lucidrains/naturalspeech2-pytorch/blob/659bec7f7543e7747e809e950cc2f84242fbeec7/naturalspeech2_pytorch/naturalspeech2_pytorch.py#L532 + +from collections import namedtuple +from functools import wraps + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from einops.layers.torch import Rearrange +from packaging import version +from torch import einsum, nn + + +def exists(val): + return val is not None + + +def once(fn): + called = False + + @wraps(fn) + def inner(x): + nonlocal called + if called: + return + called = True + return fn(x) + + return inner + + +print_once = once(print) + +# main class + + +class Attend(nn.Module): + def __init__(self, dropout=0.0, causal=False, use_flash=False): + super().__init__() + self.dropout = dropout + self.attn_dropout = nn.Dropout(dropout) + + self.causal = causal + self.register_buffer("mask", None, persistent=False) + + self.use_flash = use_flash + assert not ( + use_flash and version.parse(torch.__version__) < version.parse("2.0.0") + ), "in order to use flash attention, you must be using pytorch 2.0 or above" + + # determine efficient attention configs for cuda and cpu + self.config = namedtuple("EfficientAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"]) + self.cpu_config = self.config(True, True, True) + self.cuda_config = None + + if not torch.cuda.is_available() or not use_flash: + return + + device_properties = torch.cuda.get_device_properties(torch.device("cuda")) + + if device_properties.major == 8 and device_properties.minor == 0: + print_once("A100 GPU detected, using flash attention if input tensor is on cuda") + self.cuda_config = self.config(True, False, False) + else: + print_once("Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda") + self.cuda_config = self.config(False, True, True) + + def get_mask(self, n, device): + if exists(self.mask) and self.mask.shape[-1] >= n: + return self.mask[:n, :n] + + mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1) + self.register_buffer("mask", mask, persistent=False) + return mask + + def flash_attn(self, q, k, v, mask=None): + _, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda + + # Recommended for multi-query single-key-value attention by Tri Dao + # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) + + if k.ndim == 3: + k = rearrange(k, "b ... -> b 1 ...").expand_as(q) + + if v.ndim == 3: + v = rearrange(v, "b ... -> b 1 ...").expand_as(q) + + # Check if mask exists and expand to compatible shape + # The mask is B L, so it would have to be expanded to B H N L + + if exists(mask): + mask = rearrange(mask, "b j -> b 1 1 j") + mask = mask.expand(-1, heads, q_len, -1) + + # Check if there is a compatible device for flash attention + + config = self.cuda_config if is_cuda else self.cpu_config + + # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale + + with torch.backends.cuda.sdp_kernel(**config._asdict()): + out = F.scaled_dot_product_attention( + q, k, v, attn_mask=mask, dropout_p=self.dropout if self.training else 0.0, is_causal=self.causal + ) + + return out + + def forward(self, q, k, v, mask=None): + """ + einstein notation + b - batch + h - heads + n, i, j - sequence length (base sequence length, source, target) + d - feature dimension + """ + + n, device = q.shape[-2], q.device + + scale = q.shape[-1] ** -0.5 + + if self.use_flash: + return self.flash_attn(q, k, v, mask=mask) + + kv_einsum_eq = "b j d" if k.ndim == 3 else "b h j d" + + # similarity + + sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale + + # key padding mask + + if exists(mask): + mask = rearrange(mask, "b j -> b 1 1 j") + sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max) + + # causal mask + + if self.causal: + causal_mask = self.get_mask(n, device) + sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) + + # attention + + attn = sim.softmax(dim=-1) + attn = self.attn_dropout(attn) + + # aggregate values + + out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v) + + return out + + +def Sequential(*mods): + return nn.Sequential(*filter(exists, mods)) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if callable(d) else d + + +class RMSNorm(nn.Module): + def __init__(self, dim, scale=True, dim_cond=None): + super().__init__() + self.cond = exists(dim_cond) + self.to_gamma_beta = nn.Linear(dim_cond, dim * 2) if self.cond else None + + self.scale = dim**0.5 + self.gamma = nn.Parameter(torch.ones(dim)) if scale else None + + def forward(self, x, cond=None): + gamma = default(self.gamma, 1) + out = F.normalize(x, dim=-1) * self.scale * gamma + + if not self.cond: + return out + + assert exists(cond) + gamma, beta = self.to_gamma_beta(cond).chunk(2, dim=-1) + gamma, beta = map(lambda t: rearrange(t, "b d -> b 1 d"), (gamma, beta)) + return out * gamma + beta + + +class CausalConv1d(nn.Conv1d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + (kernel_size,) = self.kernel_size + (dilation,) = self.dilation + (stride,) = self.stride + + assert stride == 1 + self.causal_padding = dilation * (kernel_size - 1) + + def forward(self, x): + causal_padded_x = F.pad(x, (self.causal_padding, 0), value=0.0) + return super().forward(causal_padded_x) + + +class GEGLU(nn.Module): + def forward(self, x): + x, gate = x.chunk(2, dim=-1) + return F.gelu(gate) * x + + +def FeedForward(dim, mult=4, causal_conv=False): + dim_inner = int(dim * mult * 2 / 3) + + conv = None + if causal_conv: + conv = nn.Sequential( + Rearrange("b n d -> b d n"), + CausalConv1d(dim_inner, dim_inner, 3), + Rearrange("b d n -> b n d"), + ) + + return Sequential(nn.Linear(dim, dim_inner * 2), GEGLU(), conv, nn.Linear(dim_inner, dim)) + + +class PerceiverResampler(nn.Module): + def __init__( + self, + *, + dim, + depth=2, + dim_context=None, + num_latents=32, + dim_head=64, + heads=8, + ff_mult=4, + use_flash_attn=False, + ): + super().__init__() + dim_context = default(dim_context, dim) + + self.proj_context = nn.Linear(dim_context, dim) if dim_context != dim else nn.Identity() + + self.latents = nn.Parameter(torch.randn(num_latents, dim)) + nn.init.normal_(self.latents, std=0.02) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + Attention( + dim=dim, + dim_head=dim_head, + heads=heads, + use_flash=use_flash_attn, + cross_attn_include_queries=True, + ), + FeedForward(dim=dim, mult=ff_mult), + ] + ) + ) + + self.norm = RMSNorm(dim) + + def forward(self, x, mask=None): + batch = x.shape[0] + + x = self.proj_context(x) + + latents = repeat(self.latents, "n d -> b n d", b=batch) + + for attn, ff in self.layers: + latents = attn(latents, x, mask=mask) + latents + latents = ff(latents) + latents + + return self.norm(latents) + + +class Attention(nn.Module): + def __init__( + self, + dim, + *, + dim_context=None, + causal=False, + dim_head=64, + heads=8, + dropout=0.0, + use_flash=False, + cross_attn_include_queries=False, + ): + super().__init__() + self.scale = dim_head**-0.5 + self.heads = heads + self.cross_attn_include_queries = cross_attn_include_queries + + dim_inner = dim_head * heads + dim_context = default(dim_context, dim) + + self.attend = Attend(causal=causal, dropout=dropout, use_flash=use_flash) + self.to_q = nn.Linear(dim, dim_inner, bias=False) + self.to_kv = nn.Linear(dim_context, dim_inner * 2, bias=False) + self.to_out = nn.Linear(dim_inner, dim, bias=False) + + def forward(self, x, context=None, mask=None): + h, has_context = self.heads, exists(context) + + context = default(context, x) + + if has_context and self.cross_attn_include_queries: + context = torch.cat((x, context), dim=-2) + + q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim=-1)) + q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v)) + + out = self.attend(q, k, v, mask=mask) + + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 456f8081..4f2da02d 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -4,6 +4,8 @@ import re import pypinyin import torch +from hangul_romanize import Transliter +from hangul_romanize.rule import academic from num2words import num2words from tokenizers import Tokenizer @@ -112,7 +114,7 @@ _abbreviations = { # There are not many common abbreviations in Arabic as in English. ] ], - "zh-cn": [ + "zh": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # Chinese doesn't typically use abbreviations in the same way as Latin-based scripts. @@ -155,6 +157,21 @@ _abbreviations = { # Add other Turkish abbreviations here if needed. ] ], + "hu": [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("dr", "doktor"), # doctor + ("b", "bácsi"), # Mr. + ("nőv", "nővér"), # nurse + # Add other Hungarian abbreviations here if needed. + ] + ], + "ko": [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + # Korean doesn't typically use abbreviations in the same way as Latin-based scripts. + ] + ], } @@ -262,7 +279,7 @@ _symbols_multilingual = { ("°", " درجة "), ] ], - "zh-cn": [ + "zh": [ # Chinese (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ @@ -326,6 +343,31 @@ _symbols_multilingual = { ("°", " derece "), ] ], + "hu": [ + (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) + for x in [ + ("&", " és "), + ("@", " kukac "), + ("%", " százalék "), + ("#", " kettőskereszt "), + ("$", " dollár "), + ("£", " font "), + ("°", " fok "), + ] + ], + "ko": [ + # Korean + (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) + for x in [ + ("&", " 그리고 "), + ("@", " 에 "), + ("%", " 퍼센트 "), + ("#", " 번호 "), + ("$", " 달러 "), + ("£", " 파운드 "), + ("°", " 도 "), + ] + ], } @@ -349,6 +391,8 @@ _ordinal_re = { "ru": re.compile(r"([0-9]+)(-й|-я|-е|-ое|-ье|-го)"), "nl": re.compile(r"([0-9]+)(de|ste|e)"), "tr": re.compile(r"([0-9]+)(\.|inci|nci|uncu|üncü|\.)"), + "hu": re.compile(r"([0-9]+)(\.|adik|edik|odik|edik|ödik|ödike|ik)"), + "ko": re.compile(r"([0-9]+)(번째|번|차|째)"), } _number_re = re.compile(r"[0-9]+") _currency_re = { @@ -398,6 +442,8 @@ def _expand_currency(m, lang="en", currency="USD"): "nl": ", ", "ar": ", ", "tr": ", ", + "hu": ", ", + "ko": ", ", } if amount.is_integer(): @@ -417,7 +463,7 @@ def _expand_number(m, lang="en"): def expand_numbers_multilingual(text, lang="en"): - if lang == "zh-cn": + if lang == "zh" or lang == "zh-cn": text = zh_num2words()(text) else: if lang in ["en", "ru"]: @@ -468,7 +514,7 @@ def basic_cleaners(text): def chinese_transliterate(text): return "".join( - p[0] for p in pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True) + [p[0] for p in pypinyin.pinyin(text, style=pypinyin.Style.TONE3, heteronym=False, neutral_tone_with_five=True)] ) @@ -478,42 +524,23 @@ def japanese_cleaners(text, katsu): return text +def korean_cleaners(text): + r = Transliter(academic) + return r.translit(text) + + +DEFAULT_VOCAB_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../data/tokenizer.json") + + class VoiceBpeTokenizer: - def __init__(self, vocab_file=None, preprocess=None): + def __init__(self, vocab_file=None): self.tokenizer = None self.katsu = None - if vocab_file is not None: - with open(vocab_file, "r", encoding="utf-8") as f: - vocab = json.load(f) - - self.language = vocab["model"]["language"] if "language" in vocab["model"] else None - - if preprocess is None: - self.preprocess = "pre_tokenizer" in vocab and vocab["pre_tokenizer"] - else: - self.preprocess = preprocess - self.tokenizer = Tokenizer.from_file(vocab_file) - def preprocess_text(self, txt, lang): - if lang in ["en", "es", "fr", "de", "pt", "it", "pl", "ar", "cs", "ru", "nl", "tr", "zh-cn"]: - txt = multilingual_cleaners(txt, lang) - if lang == "zh-cn": - txt = chinese_transliterate(txt) - elif lang == "ja": - if self.katsu is None: - import cutlet - - self.katsu = cutlet.Cutlet() - txt = japanese_cleaners(txt, self.katsu) - else: - raise NotImplementedError() - return txt - def encode(self, txt, lang): - if self.preprocess: - txt = self.preprocess_text(txt, lang) + txt = self.preprocess_text(txt, lang) txt = f"[{lang}]{txt}" txt = txt.replace(" ", "[SPACE]") return self.tokenizer.encode(txt).ids @@ -527,8 +554,200 @@ class VoiceBpeTokenizer: txt = txt.replace("[UNK]", "") return txt + def preprocess_text(self, txt, lang): + if lang in ["en", "es", "fr", "de", "pt", "it", "pl", "zh", "ar", "cs", "ru", "nl", "tr", "hu"]: + txt = multilingual_cleaners(txt, lang) + elif lang == "ja": + if self.katsu is None: + import cutlet + + self.katsu = cutlet.Cutlet() + txt = japanese_cleaners(txt, self.katsu) + elif lang == "zh-cn" or lang == "zh": + txt = chinese_transliterate(txt) + elif lang == "ko": + txt = korean_cleaners(txt) + else: + raise NotImplementedError() + return txt + def __len__(self): return self.tokenizer.get_vocab_size() def get_number_tokens(self): return max(self.tokenizer.get_vocab().values()) + 1 + + +def test_expand_numbers_multilingual(): + test_cases = [ + # English + ("In 12.5 seconds.", "In twelve point five seconds.", "en"), + ("There were 50 soldiers.", "There were fifty soldiers.", "en"), + ("This is a 1st test", "This is a first test", "en"), + ("That will be $20 sir.", "That will be twenty dollars sir.", "en"), + ("That will be 20€ sir.", "That will be twenty euro sir.", "en"), + ("That will be 20.15€ sir.", "That will be twenty euro, fifteen cents sir.", "en"), + ("That's 100,000.5.", "That's one hundred thousand point five.", "en"), + # French + ("En 12,5 secondes.", "En douze virgule cinq secondes.", "fr"), + ("Il y avait 50 soldats.", "Il y avait cinquante soldats.", "fr"), + ("Ceci est un 1er test", "Ceci est un premier test", "fr"), + ("Cela vous fera $20 monsieur.", "Cela vous fera vingt dollars monsieur.", "fr"), + ("Cela vous fera 20€ monsieur.", "Cela vous fera vingt euros monsieur.", "fr"), + ("Cela vous fera 20,15€ monsieur.", "Cela vous fera vingt euros et quinze centimes monsieur.", "fr"), + ("Ce sera 100.000,5.", "Ce sera cent mille virgule cinq.", "fr"), + # German + ("In 12,5 Sekunden.", "In zwölf Komma fünf Sekunden.", "de"), + ("Es gab 50 Soldaten.", "Es gab fünfzig Soldaten.", "de"), + ("Dies ist ein 1. Test", "Dies ist ein erste Test", "de"), # Issue with gender + ("Das macht $20 Herr.", "Das macht zwanzig Dollar Herr.", "de"), + ("Das macht 20€ Herr.", "Das macht zwanzig Euro Herr.", "de"), + ("Das macht 20,15€ Herr.", "Das macht zwanzig Euro und fünfzehn Cent Herr.", "de"), + # Spanish + ("En 12,5 segundos.", "En doce punto cinco segundos.", "es"), + ("Había 50 soldados.", "Había cincuenta soldados.", "es"), + ("Este es un 1er test", "Este es un primero test", "es"), + ("Eso le costará $20 señor.", "Eso le costará veinte dólares señor.", "es"), + ("Eso le costará 20€ señor.", "Eso le costará veinte euros señor.", "es"), + ("Eso le costará 20,15€ señor.", "Eso le costará veinte euros con quince céntimos señor.", "es"), + # Italian + ("In 12,5 secondi.", "In dodici virgola cinque secondi.", "it"), + ("C'erano 50 soldati.", "C'erano cinquanta soldati.", "it"), + ("Questo è un 1° test", "Questo è un primo test", "it"), + ("Ti costerà $20 signore.", "Ti costerà venti dollari signore.", "it"), + ("Ti costerà 20€ signore.", "Ti costerà venti euro signore.", "it"), + ("Ti costerà 20,15€ signore.", "Ti costerà venti euro e quindici centesimi signore.", "it"), + # Portuguese + ("Em 12,5 segundos.", "Em doze vírgula cinco segundos.", "pt"), + ("Havia 50 soldados.", "Havia cinquenta soldados.", "pt"), + ("Este é um 1º teste", "Este é um primeiro teste", "pt"), + ("Isso custará $20 senhor.", "Isso custará vinte dólares senhor.", "pt"), + ("Isso custará 20€ senhor.", "Isso custará vinte euros senhor.", "pt"), + ( + "Isso custará 20,15€ senhor.", + "Isso custará vinte euros e quinze cêntimos senhor.", + "pt", + ), # "cêntimos" should be "centavos" num2words issue + # Polish + ("W 12,5 sekundy.", "W dwanaście przecinek pięć sekundy.", "pl"), + ("Było 50 żołnierzy.", "Było pięćdziesiąt żołnierzy.", "pl"), + ("To będzie kosztować 20€ panie.", "To będzie kosztować dwadzieścia euro panie.", "pl"), + ("To będzie kosztować 20,15€ panie.", "To będzie kosztować dwadzieścia euro, piętnaście centów panie.", "pl"), + # Arabic + ("في الـ 12,5 ثانية.", "في الـ اثنا عشر , خمسون ثانية.", "ar"), + ("كان هناك 50 جنديًا.", "كان هناك خمسون جنديًا.", "ar"), + # ("ستكون النتيجة $20 يا سيد.", 'ستكون النتيجة عشرون دولار يا سيد.', 'ar'), # $ and € are mising from num2words + # ("ستكون النتيجة 20€ يا سيد.", 'ستكون النتيجة عشرون يورو يا سيد.', 'ar'), + # Czech + ("Za 12,5 vteřiny.", "Za dvanáct celá pět vteřiny.", "cs"), + ("Bylo tam 50 vojáků.", "Bylo tam padesát vojáků.", "cs"), + ("To bude stát 20€ pane.", "To bude stát dvacet euro pane.", "cs"), + ("To bude 20.15€ pane.", "To bude dvacet euro, patnáct centů pane.", "cs"), + # Russian + ("Через 12.5 секунды.", "Через двенадцать запятая пять секунды.", "ru"), + ("Там было 50 солдат.", "Там было пятьдесят солдат.", "ru"), + ("Это будет 20.15€ сэр.", "Это будет двадцать евро, пятнадцать центов сэр.", "ru"), + ("Это будет стоить 20€ господин.", "Это будет стоить двадцать евро господин.", "ru"), + # Dutch + ("In 12,5 seconden.", "In twaalf komma vijf seconden.", "nl"), + ("Er waren 50 soldaten.", "Er waren vijftig soldaten.", "nl"), + ("Dat wordt dan $20 meneer.", "Dat wordt dan twintig dollar meneer.", "nl"), + ("Dat wordt dan 20€ meneer.", "Dat wordt dan twintig euro meneer.", "nl"), + # Chinese (Simplified) + ("在12.5秒内", "在十二点五秒内", "zh"), + ("有50名士兵", "有五十名士兵", "zh"), + # ("那将是$20先生", '那将是二十美元先生', 'zh'), currency doesn't work + # ("那将是20€先生", '那将是二十欧元先生', 'zh'), + # Turkish + # ("12,5 saniye içinde.", 'On iki virgül beş saniye içinde.', 'tr'), # decimal doesn't work for TR + ("50 asker vardı.", "elli asker vardı.", "tr"), + ("Bu 1. test", "Bu birinci test", "tr"), + # ("Bu 100.000,5.", 'Bu yüz bin virgül beş.', 'tr'), + # Hungarian + ("12,5 másodperc alatt.", "tizenkettő egész öt tized másodperc alatt.", "hu"), + ("50 katona volt.", "ötven katona volt.", "hu"), + ("Ez az 1. teszt", "Ez az első teszt", "hu"), + # Korean + ("12.5 초 안에.", "십이 점 다섯 초 안에.", "ko"), + ("50 명의 병사가 있었다.", "오십 명의 병사가 있었다.", "ko"), + ("이것은 1 번째 테스트입니다", "이것은 첫 번째 테스트입니다", "ko"), + ] + for a, b, lang in test_cases: + out = expand_numbers_multilingual(a, lang=lang) + assert out == b, f"'{out}' vs '{b}'" + + +def test_abbreviations_multilingual(): + test_cases = [ + # English + ("Hello Mr. Smith.", "Hello mister Smith.", "en"), + ("Dr. Jones is here.", "doctor Jones is here.", "en"), + # Spanish + ("Hola Sr. Garcia.", "Hola señor Garcia.", "es"), + ("La Dra. Martinez es muy buena.", "La doctora Martinez es muy buena.", "es"), + # French + ("Bonjour Mr. Dupond.", "Bonjour monsieur Dupond.", "fr"), + ("Mme. Moreau est absente aujourd'hui.", "madame Moreau est absente aujourd'hui.", "fr"), + # German + ("Frau Dr. Müller ist sehr klug.", "Frau doktor Müller ist sehr klug.", "de"), + # Portuguese + ("Olá Sr. Silva.", "Olá senhor Silva.", "pt"), + ("Dra. Costa, você está disponível?", "doutora Costa, você está disponível?", "pt"), + # Italian + ("Buongiorno, Sig. Rossi.", "Buongiorno, signore Rossi.", "it"), + # ("Sig.ra Bianchi, posso aiutarti?", 'signora Bianchi, posso aiutarti?', 'it'), # Issue with matching that pattern + # Polish + ("Dzień dobry, P. Kowalski.", "Dzień dobry, pani Kowalski.", "pl"), + ("M. Nowak, czy mogę zadać pytanie?", "pan Nowak, czy mogę zadać pytanie?", "pl"), + # Czech + ("P. Novák", "pan Novák", "cs"), + ("Dr. Vojtěch", "doktor Vojtěch", "cs"), + # Dutch + ("Dhr. Jansen", "de heer Jansen", "nl"), + ("Mevr. de Vries", "mevrouw de Vries", "nl"), + # Russian + ("Здравствуйте Г-н Иванов.", "Здравствуйте господин Иванов.", "ru"), + ("Д-р Смирнов здесь, чтобы увидеть вас.", "доктор Смирнов здесь, чтобы увидеть вас.", "ru"), + # Turkish + ("Merhaba B. Yılmaz.", "Merhaba bay Yılmaz.", "tr"), + ("Dr. Ayşe burada.", "doktor Ayşe burada.", "tr"), + # Hungarian + ("Dr. Szabó itt van.", "doktor Szabó itt van.", "hu"), + ] + + for a, b, lang in test_cases: + out = expand_abbreviations_multilingual(a, lang=lang) + assert out == b, f"'{out}' vs '{b}'" + + +def test_symbols_multilingual(): + test_cases = [ + ("I have 14% battery", "I have 14 percent battery", "en"), + ("Te veo @ la fiesta", "Te veo arroba la fiesta", "es"), + ("J'ai 14° de fièvre", "J'ai 14 degrés de fièvre", "fr"), + ("Die Rechnung beträgt £ 20", "Die Rechnung beträgt pfund 20", "de"), + ("O meu email é ana&joao@gmail.com", "O meu email é ana e joao arroba gmail.com", "pt"), + ("linguaggio di programmazione C#", "linguaggio di programmazione C cancelletto", "it"), + ("Moja temperatura to 36.6°", "Moja temperatura to 36.6 stopnie", "pl"), + ("Mám 14% baterie", "Mám 14 procento baterie", "cs"), + ("Těším se na tebe @ party", "Těším se na tebe na party", "cs"), + ("У меня 14% заряда", "У меня 14 процентов заряда", "ru"), + ("Я буду @ дома", "Я буду собака дома", "ru"), + ("Ik heb 14% batterij", "Ik heb 14 procent batterij", "nl"), + ("Ik zie je @ het feest", "Ik zie je bij het feest", "nl"), + ("لدي 14% في البطارية", "لدي 14 في المئة في البطارية", "ar"), + ("我的电量为 14%", "我的电量为 14 百分之", "zh"), + ("Pilim %14 dolu.", "Pilim yüzde 14 dolu.", "tr"), + ("Az akkumulátorom töltöttsége 14%", "Az akkumulátorom töltöttsége 14 százalék", "hu"), + ("배터리 잔량이 14%입니다.", "배터리 잔량이 14 퍼센트입니다.", "ko"), + ] + + for a, b, lang in test_cases: + out = expand_symbols_multilingual(a, lang=lang) + assert out == b, f"'{out}' vs '{b}'" + + +if __name__ == "__main__": + test_expand_numbers_multilingual() + test_abbreviations_multilingual() + test_symbols_multilingual() diff --git a/TTS/tts/layers/xtts/trainer/dataset.py b/TTS/tts/layers/xtts/trainer/dataset.py index 41401fd6..5d8b2ae6 100644 --- a/TTS/tts/layers/xtts/trainer/dataset.py +++ b/TTS/tts/layers/xtts/trainer/dataset.py @@ -88,6 +88,7 @@ class XTTSDataset(torch.utils.data.Dataset): self.sample_rate = sample_rate self.max_wav_len = model_args.max_wav_length self.max_text_len = model_args.max_text_length + self.use_masking_gt_prompt_approach = model_args.gpt_use_masking_gt_prompt_approach assert self.max_wav_len is not None and self.max_text_len is not None self.samples = samples @@ -109,7 +110,7 @@ class XTTSDataset(torch.utils.data.Dataset): try: tseq, _, wav, _, _, _ = self.load_item(sample) except: - pass + continue # Basically, this audio file is nonexistent or too long to be supported by the dataset. if ( wav is None @@ -140,10 +141,24 @@ class XTTSDataset(torch.utils.data.Dataset): # Ultra short clips are also useless (and can cause problems within some models). raise ValueError - # get a slice from GT to condition the model - cond, cond_len, cond_idxs = get_prompt_slice( - audiopath, self.max_conditioning_length, self.min_conditioning_length, self.sample_rate, self.is_eval - ) + if self.use_masking_gt_prompt_approach: + # get a slice from GT to condition the model + cond, _, cond_idxs = get_prompt_slice( + audiopath, self.max_conditioning_length, self.min_conditioning_length, self.sample_rate, self.is_eval + ) + # if use masking do not use cond_len + cond_len = torch.nan + else: + ref_sample = ( + sample["reference_path"] + if "reference_path" in sample and sample["reference_path"] is not None + else audiopath + ) + cond, cond_len, _ = get_prompt_slice( + ref_sample, self.max_conditioning_length, self.min_conditioning_length, self.sample_rate, self.is_eval + ) + # if do not use masking use cond_len + cond_idxs = torch.nan return tseq, audiopath, wav, cond, cond_len, cond_idxs @@ -199,8 +214,10 @@ class XTTSDataset(torch.utils.data.Dataset): "wav_lengths": torch.tensor(wav.shape[-1], dtype=torch.long), "filenames": audiopath, "conditioning": cond.unsqueeze(1), - "cond_lens": torch.tensor(cond_len, dtype=torch.long), - "cond_idxs": torch.tensor(cond_idxs), + "cond_lens": torch.tensor(cond_len, dtype=torch.long) + if cond_len is not torch.nan + else torch.tensor([cond_len]), + "cond_idxs": torch.tensor(cond_idxs) if cond_idxs is not torch.nan else torch.tensor([cond_idxs]), } return res @@ -221,6 +238,13 @@ class XTTSDataset(torch.utils.data.Dataset): batch["conditioning"] = torch.stack(batch["conditioning"]) batch["cond_lens"] = torch.stack(batch["cond_lens"]) batch["cond_idxs"] = torch.stack(batch["cond_idxs"]) + + if torch.any(batch["cond_idxs"].isnan()): + batch["cond_idxs"] = None + + if torch.any(batch["cond_lens"].isnan()): + batch["cond_lens"] = None + max_text_len = batch["text_lengths"].max() max_wav_len = batch["wav_lengths"].max() diff --git a/TTS/tts/layers/xtts/trainer/gpt_trainer.py b/TTS/tts/layers/xtts/trainer/gpt_trainer.py index e93063fa..ef32a4ab 100644 --- a/TTS/tts/layers/xtts/trainer/gpt_trainer.py +++ b/TTS/tts/layers/xtts/trainer/gpt_trainer.py @@ -141,17 +141,30 @@ class GPTTrainer(BaseTTS): print(">> GPT weights restored from:", self.args.gpt_checkpoint) # Mel spectrogram extractor for conditioning - self.torch_mel_spectrogram_style_encoder = TorchMelSpectrogram( - filter_length=4096, - hop_length=1024, - win_length=4096, - normalize=False, - sampling_rate=config.audio.sample_rate, - mel_fmin=0, - mel_fmax=8000, - n_mel_channels=80, - mel_norm_file=self.args.mel_norm_file, - ) + if self.args.gpt_use_perceiver_resampler: + self.torch_mel_spectrogram_style_encoder = TorchMelSpectrogram( + filter_length=2048, + hop_length=256, + win_length=1024, + normalize=False, + sampling_rate=config.audio.sample_rate, + mel_fmin=0, + mel_fmax=8000, + n_mel_channels=80, + mel_norm_file=self.args.mel_norm_file, + ) + else: + self.torch_mel_spectrogram_style_encoder = TorchMelSpectrogram( + filter_length=4096, + hop_length=1024, + win_length=4096, + normalize=False, + sampling_rate=config.audio.sample_rate, + mel_fmin=0, + mel_fmax=8000, + n_mel_channels=80, + mel_norm_file=self.args.mel_norm_file, + ) # Load DVAE self.dvae = DiscreteVAE( @@ -186,7 +199,7 @@ class GPTTrainer(BaseTTS): def device(self): return next(self.parameters()).device - def forward(self, text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels, cond_idxs): + def forward(self, text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels, cond_idxs, cond_lens): """ Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode (actuated by `text_first`). @@ -197,9 +210,16 @@ class GPTTrainer(BaseTTS): wav_lengths: long tensor, (b,) cond_mels: MEL float tensor, (b, num_samples, 80,t_m) cond_idxs: cond start and end indexs, (b, 2) + cond_lens: long tensor, (b,) """ losses = self.xtts.gpt( - text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels=cond_mels, cond_idxs=cond_idxs + text_inputs, + text_lengths, + audio_codes, + wav_lengths, + cond_mels=cond_mels, + cond_idxs=cond_idxs, + cond_lens=cond_lens, ) return losses @@ -213,7 +233,12 @@ class GPTTrainer(BaseTTS): print(" | > Synthesizing test sentences.") for idx, s_info in enumerate(self.config.test_sentences): wav = self.xtts.synthesize( - s_info["text"], self.config, s_info["speaker_wav"], s_info["language"], gpt_cond_len=3 + s_info["text"], + self.config, + s_info["speaker_wav"], + s_info["language"], + gpt_cond_len=3, + decoder="ne_hifigan", )["wav"] test_audios["{}-audio".format(idx)] = wav @@ -269,7 +294,6 @@ class GPTTrainer(BaseTTS): del batch["padded_text"] del batch["wav"] del batch["conditioning"] - del batch["cond_lens"] return batch def train_step(self, batch, criterion): @@ -280,8 +304,11 @@ class GPTTrainer(BaseTTS): audio_codes = batch["audio_codes"] wav_lengths = batch["wav_lengths"] cond_idxs = batch["cond_idxs"] + cond_lens = batch["cond_lens"] - loss_text, loss_mel, _ = self.forward(text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels, cond_idxs) + loss_text, loss_mel, _ = self.forward( + text_inputs, text_lengths, audio_codes, wav_lengths, cond_mels, cond_idxs, cond_lens + ) loss_dict["loss_text_ce"] = loss_text * self.args.gpt_loss_text_ce_weight loss_dict["loss_mel_ce"] = loss_mel * self.args.gpt_loss_mel_ce_weight loss_dict["loss"] = loss_dict["loss_text_ce"] + loss_dict["loss_mel_ce"] diff --git a/TTS/tts/layers/xtts/zh_num2words.py b/TTS/tts/layers/xtts/zh_num2words.py index 2c56e3bb..ea6d98d3 100644 --- a/TTS/tts/layers/xtts/zh_num2words.py +++ b/TTS/tts/layers/xtts/zh_num2words.py @@ -14,375 +14,379 @@ import sys # ================================================================================ # # basic constant # ================================================================================ # -CHINESE_DIGIS = u'零一二三四五六七八九' -BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖' -BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖' -SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万' -SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬' -LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载' -LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載' -SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万' -SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬' +CHINESE_DIGIS = "零一二三四五六七八九" +BIG_CHINESE_DIGIS_SIMPLIFIED = "零壹贰叁肆伍陆柒捌玖" +BIG_CHINESE_DIGIS_TRADITIONAL = "零壹貳參肆伍陸柒捌玖" +SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = "十百千万" +SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = "拾佰仟萬" +LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = "亿兆京垓秭穰沟涧正载" +LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = "億兆京垓秭穰溝澗正載" +SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = "十百千万" +SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = "拾佰仟萬" -ZERO_ALT = u'〇' -ONE_ALT = u'幺' -TWO_ALTS = [u'两', u'兩'] +ZERO_ALT = "〇" +ONE_ALT = "幺" +TWO_ALTS = ["两", "兩"] -POSITIVE = [u'正', u'正'] -NEGATIVE = [u'负', u'負'] -POINT = [u'点', u'點'] +POSITIVE = ["正", "正"] +NEGATIVE = ["负", "負"] +POINT = ["点", "點"] # PLUS = [u'加', u'加'] # SIL = [u'杠', u'槓'] -FILLER_CHARS = ['呃', '啊'] +FILLER_CHARS = ["呃", "啊"] -ER_WHITELIST = '(儿女|儿子|儿孙|女儿|儿媳|妻儿|' \ - '胎儿|婴儿|新生儿|婴幼儿|幼儿|少儿|小儿|儿歌|儿童|儿科|托儿所|孤儿|' \ - '儿戏|儿化|台儿庄|鹿儿岛|正儿八经|吊儿郎当|生儿育女|托儿带女|养儿防老|痴儿呆女|' \ - '佳儿佳妇|儿怜兽扰|儿无常父|儿不嫌母丑|儿行千里母担忧|儿大不由爷|苏乞儿)' +ER_WHITELIST = ( + "(儿女|儿子|儿孙|女儿|儿媳|妻儿|" + "胎儿|婴儿|新生儿|婴幼儿|幼儿|少儿|小儿|儿歌|儿童|儿科|托儿所|孤儿|" + "儿戏|儿化|台儿庄|鹿儿岛|正儿八经|吊儿郎当|生儿育女|托儿带女|养儿防老|痴儿呆女|" + "佳儿佳妇|儿怜兽扰|儿无常父|儿不嫌母丑|儿行千里母担忧|儿大不由爷|苏乞儿)" +) ER_WHITELIST_PATTERN = re.compile(ER_WHITELIST) # 中文数字系统类型 -NUMBERING_TYPES = ['low', 'mid', 'high'] +NUMBERING_TYPES = ["low", "mid", "high"] -CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \ - '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)' -CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' -COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \ - '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \ - '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \ - '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \ - '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \ - '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)' +CURRENCY_NAMES = "(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|" "里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)" +CURRENCY_UNITS = "((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)" +COM_QUANTIFIERS = ( + "(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|" + "砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|" + "针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|" + "毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|" + "盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|" + "纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)" +) # Punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git) -CN_PUNCS_STOP = '!?。。' -CN_PUNCS_NONSTOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏·〈〉-' +CN_PUNCS_STOP = "!?。。" +CN_PUNCS_NONSTOP = ""#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏·〈〉-" CN_PUNCS = CN_PUNCS_STOP + CN_PUNCS_NONSTOP PUNCS = CN_PUNCS + string.punctuation -PUNCS_TRANSFORM = str.maketrans(PUNCS, ' ' * len(PUNCS), '') # replace puncs with space +PUNCS_TRANSFORM = str.maketrans(PUNCS, " " * len(PUNCS), "") # replace puncs with space # https://zh.wikipedia.org/wiki/全行和半行 QJ2BJ = { - ' ': ' ', - '!': '!', - '"': '"', - '#': '#', - '$': '$', - '%': '%', - '&': '&', - ''': "'", - '(': '(', - ')': ')', - '*': '*', - '+': '+', - ',': ',', - '-': '-', - '.': '.', - '/': '/', - '0': '0', - '1': '1', - '2': '2', - '3': '3', - '4': '4', - '5': '5', - '6': '6', - '7': '7', - '8': '8', - '9': '9', - ':': ':', - ';': ';', - '<': '<', - '=': '=', - '>': '>', - '?': '?', - '@': '@', - 'A': 'A', - 'B': 'B', - 'C': 'C', - 'D': 'D', - 'E': 'E', - 'F': 'F', - 'G': 'G', - 'H': 'H', - 'I': 'I', - 'J': 'J', - 'K': 'K', - 'L': 'L', - 'M': 'M', - 'N': 'N', - 'O': 'O', - 'P': 'P', - 'Q': 'Q', - 'R': 'R', - 'S': 'S', - 'T': 'T', - 'U': 'U', - 'V': 'V', - 'W': 'W', - 'X': 'X', - 'Y': 'Y', - 'Z': 'Z', - '[': '[', - '\': '\\', - ']': ']', - '^': '^', - '_': '_', - '`': '`', - 'a': 'a', - 'b': 'b', - 'c': 'c', - 'd': 'd', - 'e': 'e', - 'f': 'f', - 'g': 'g', - 'h': 'h', - 'i': 'i', - 'j': 'j', - 'k': 'k', - 'l': 'l', - 'm': 'm', - 'n': 'n', - 'o': 'o', - 'p': 'p', - 'q': 'q', - 'r': 'r', - 's': 's', - 't': 't', - 'u': 'u', - 'v': 'v', - 'w': 'w', - 'x': 'x', - 'y': 'y', - 'z': 'z', - '{': '{', - '|': '|', - '}': '}', - '~': '~', + " ": " ", + "!": "!", + """: '"', + "#": "#", + "$": "$", + "%": "%", + "&": "&", + "'": "'", + "(": "(", + ")": ")", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7", + "8": "8", + "9": "9", + ":": ":", + ";": ";", + "<": "<", + "=": "=", + ">": ">", + "?": "?", + "@": "@", + "A": "A", + "B": "B", + "C": "C", + "D": "D", + "E": "E", + "F": "F", + "G": "G", + "H": "H", + "I": "I", + "J": "J", + "K": "K", + "L": "L", + "M": "M", + "N": "N", + "O": "O", + "P": "P", + "Q": "Q", + "R": "R", + "S": "S", + "T": "T", + "U": "U", + "V": "V", + "W": "W", + "X": "X", + "Y": "Y", + "Z": "Z", + "[": "[", + "\": "\\", + "]": "]", + "^": "^", + "_": "_", + "`": "`", + "a": "a", + "b": "b", + "c": "c", + "d": "d", + "e": "e", + "f": "f", + "g": "g", + "h": "h", + "i": "i", + "j": "j", + "k": "k", + "l": "l", + "m": "m", + "n": "n", + "o": "o", + "p": "p", + "q": "q", + "r": "r", + "s": "s", + "t": "t", + "u": "u", + "v": "v", + "w": "w", + "x": "x", + "y": "y", + "z": "z", + "{": "{", + "|": "|", + "}": "}", + "~": "~", } -QJ2BJ_TRANSFORM = str.maketrans(''.join(QJ2BJ.keys()), ''.join(QJ2BJ.values()), '') +QJ2BJ_TRANSFORM = str.maketrans("".join(QJ2BJ.keys()), "".join(QJ2BJ.values()), "") # 2013 China National Standard: https://zh.wikipedia.org/wiki/通用规范汉字表, raw resources: # https://github.com/mozillazg/pinyin-data/blob/master/kMandarin_8105.txt with 8105 chinese chars in total CN_CHARS_COMMON = ( - '一丁七万丈三上下不与丏丐丑专且丕世丘丙业丛东丝丞丢两严丧个丫中丰串临丸丹为主丽举' - '乂乃久么义之乌乍乎乏乐乒乓乔乖乘乙乜九乞也习乡书乩买乱乳乸乾了予争事二亍于亏云互' - '亓五井亘亚些亟亡亢交亥亦产亨亩享京亭亮亲亳亵亶亸亹人亿什仁仂仃仄仅仆仇仉今介仍从' - '仑仓仔仕他仗付仙仝仞仟仡代令以仨仪仫们仰仲仳仵件价任份仿企伈伉伊伋伍伎伏伐休众优' - '伙会伛伞伟传伢伣伤伥伦伧伪伫伭伯估伲伴伶伸伺似伽伾佁佃但位低住佐佑体何佖佗佘余佚' - '佛作佝佞佟你佣佤佥佩佬佯佰佳佴佶佸佺佻佼佽佾使侁侂侃侄侈侉例侍侏侑侔侗侘供依侠侣' - '侥侦侧侨侩侪侬侮侯侴侵侹便促俄俅俊俍俎俏俐俑俗俘俙俚俜保俞俟信俣俦俨俩俪俫俭修俯' - '俱俳俵俶俸俺俾倌倍倏倒倓倔倕倘候倚倜倞借倡倥倦倧倨倩倪倬倭倮倴债倻值倾偁偃假偈偌' - '偎偏偓偕做停偡健偬偭偰偲偶偷偻偾偿傀傃傅傈傉傍傒傕傣傥傧储傩催傲傺傻僇僎像僔僖僚' - '僦僧僬僭僮僰僳僵僻儆儇儋儒儡儦儳儴儿兀允元兄充兆先光克免兑兔兕兖党兜兢入全八公六' - '兮兰共关兴兵其具典兹养兼兽冀冁内冈冉册再冏冒冔冕冗写军农冠冢冤冥冬冮冯冰冱冲决况' - '冶冷冻冼冽净凄准凇凉凋凌减凑凓凘凛凝几凡凤凫凭凯凰凳凶凸凹出击凼函凿刀刁刃分切刈' - '刊刍刎刑划刖列刘则刚创初删判刨利别刬刭刮到刳制刷券刹刺刻刽刿剀剁剂剃剅削剋剌前剐' - '剑剔剕剖剜剞剟剡剥剧剩剪副割剽剿劁劂劄劈劐劓力劝办功加务劢劣动助努劫劬劭励劲劳劼' - '劾势勃勇勉勋勍勐勒勔勖勘勚募勠勤勰勺勾勿匀包匆匈匍匏匐匕化北匙匜匝匠匡匣匦匪匮匹' - '区医匼匾匿十千卅升午卉半华协卑卒卓单卖南博卜卞卟占卡卢卣卤卦卧卫卬卮卯印危即却卵' - '卷卸卺卿厂厄厅历厉压厌厍厕厖厘厚厝原厢厣厥厦厨厩厮去厾县叁参叆叇又叉及友双反发叔' - '叕取受变叙叚叛叟叠口古句另叨叩只叫召叭叮可台叱史右叵叶号司叹叻叼叽吁吃各吆合吉吊' - '同名后吏吐向吒吓吕吖吗君吝吞吟吠吡吣否吧吨吩含听吭吮启吱吲吴吵吸吹吻吼吽吾呀呃呆' - '呇呈告呋呐呒呓呔呕呖呗员呙呛呜呢呣呤呦周呱呲味呵呶呷呸呻呼命咀咂咄咆咇咉咋和咍咎' - '咏咐咒咔咕咖咙咚咛咝咡咣咤咥咦咧咨咩咪咫咬咯咱咳咴咸咺咻咽咿哀品哂哃哄哆哇哈哉哌' - '响哎哏哐哑哒哓哔哕哗哙哚哝哞哟哢哥哦哧哨哩哪哭哮哱哲哳哺哼哽哿唁唆唇唉唏唐唑唔唛' - '唝唠唢唣唤唧唪唬售唯唰唱唳唵唷唼唾唿啁啃啄商啉啊啐啕啖啜啡啤啥啦啧啪啫啬啭啮啰啴' - '啵啶啷啸啻啼啾喀喁喂喃善喆喇喈喉喊喋喏喑喔喘喙喜喝喟喤喧喱喳喵喷喹喻喽喾嗄嗅嗉嗌' - '嗍嗐嗑嗒嗓嗔嗖嗜嗝嗞嗟嗡嗣嗤嗥嗦嗨嗪嗫嗬嗯嗲嗳嗵嗷嗽嗾嘀嘁嘈嘉嘌嘎嘏嘘嘚嘛嘞嘟嘡' - '嘣嘤嘧嘬嘭嘱嘲嘴嘶嘹嘻嘿噀噂噇噌噍噎噔噗噘噙噜噢噤器噩噪噫噬噱噶噻噼嚄嚅嚆嚎嚏嚓' - '嚚嚣嚭嚯嚷嚼囊囔囚四回囟因囡团囤囫园困囱围囵囷囹固国图囿圃圄圆圈圉圊圌圐圙圜土圢' - '圣在圩圪圫圬圭圮圯地圲圳圹场圻圾址坂均坉坊坋坌坍坎坏坐坑坒块坚坛坜坝坞坟坠坡坤坥' - '坦坨坩坪坫坬坭坯坰坳坷坻坼坽垂垃垄垆垈型垌垍垎垏垒垓垕垙垚垛垞垟垠垡垢垣垤垦垧垩' - '垫垭垮垯垱垲垴垵垸垺垾垿埂埃埆埇埋埌城埏埒埔埕埗埘埙埚埝域埠埤埪埫埭埯埴埵埸培基' - '埼埽堂堃堆堇堉堋堌堍堎堐堑堕堙堞堠堡堤堧堨堪堰堲堵堼堽堾塄塅塆塌塍塑塔塘塝塞塥填' - '塬塱塾墀墁境墅墈墉墐墒墓墕墘墙墚增墟墡墣墦墨墩墼壁壅壑壕壤士壬壮声壳壶壸壹处备复' - '夏夐夔夕外夙多夜够夤夥大天太夫夬夭央夯失头夷夸夹夺夼奁奂奄奇奈奉奋奎奏契奓奔奕奖' - '套奘奚奠奡奢奥奭女奴奶奸她好妁如妃妄妆妇妈妊妍妒妓妖妗妘妙妞妣妤妥妧妨妩妪妫妭妮' - '妯妲妹妻妾姆姈姊始姐姑姒姓委姗姘姚姜姝姞姣姤姥姨姬姮姱姶姹姻姽姿娀威娃娄娅娆娇娈' - '娉娌娑娓娘娜娟娠娣娥娩娱娲娴娵娶娼婀婆婉婊婌婍婕婘婚婞婠婢婤婧婪婫婳婴婵婶婷婺婻' - '婼婿媂媄媆媒媓媖媚媛媞媪媭媱媲媳媵媸媾嫁嫂嫄嫉嫌嫒嫔嫕嫖嫘嫚嫜嫠嫡嫣嫦嫩嫪嫫嫭嫱' - '嫽嬉嬖嬗嬛嬥嬬嬴嬷嬿孀孅子孑孓孔孕孖字存孙孚孛孜孝孟孢季孤孥学孩孪孬孰孱孳孵孺孽' - '宁它宄宅宇守安宋完宏宓宕宗官宙定宛宜宝实宠审客宣室宥宦宧宪宫宬宰害宴宵家宸容宽宾' - '宿寁寂寄寅密寇富寐寒寓寝寞察寡寤寥寨寮寰寸对寺寻导寿封射将尉尊小少尔尕尖尘尚尜尝' - '尢尤尥尧尨尪尬就尴尸尹尺尻尼尽尾尿局屁层屃居屈屉届屋屎屏屐屑展屙属屠屡屣履屦屯山' - '屹屺屼屾屿岁岂岈岊岌岍岐岑岔岖岗岘岙岚岛岜岞岠岢岣岨岩岫岬岭岱岳岵岷岸岽岿峁峂峃' - '峄峋峒峗峘峙峛峡峣峤峥峦峧峨峪峭峰峱峻峿崀崁崂崃崄崆崇崌崎崒崔崖崚崛崞崟崡崤崦崧' - '崩崭崮崴崶崽崾崿嵁嵅嵇嵊嵋嵌嵎嵖嵘嵚嵛嵝嵩嵫嵬嵯嵲嵴嶂嶅嶍嶒嶓嶙嶝嶟嶦嶲嶷巅巇巉' - '巍川州巡巢工左巧巨巩巫差巯己已巳巴巷巽巾币市布帅帆师希帏帐帑帔帕帖帘帙帚帛帜帝帡' - '带帧帨席帮帱帷常帻帼帽幂幄幅幌幔幕幖幛幞幡幢幪干平年并幸幺幻幼幽广庄庆庇床庋序庐' - '庑库应底庖店庙庚府庞废庠庤庥度座庭庱庳庵庶康庸庹庼庾廆廉廊廋廑廒廓廖廙廛廨廪延廷' - '建廿开弁异弃弄弆弇弈弊弋式弑弓引弗弘弛弟张弢弥弦弧弨弩弭弯弱弶弸弹强弼彀归当录彖' - '彗彘彝彟形彤彦彧彩彪彬彭彰影彳彷役彻彼往征徂径待徇很徉徊律徐徒徕得徘徙徛徜御徨循' - '徭微徵德徼徽心必忆忉忌忍忏忐忑忒忖志忘忙忝忞忠忡忤忧忪快忭忮忱忳念忸忺忻忽忾忿怀' - '态怂怃怄怅怆怊怍怎怏怒怔怕怖怙怛怜思怠怡急怦性怨怩怪怫怯怵总怼怿恁恂恃恋恍恐恒恓' - '恔恕恙恚恝恢恣恤恧恨恩恪恫恬恭息恰恳恶恸恹恺恻恼恽恿悃悄悆悈悉悌悍悒悔悖悚悛悝悟' - '悠悢患悦您悫悬悭悯悰悱悲悴悸悻悼情惆惇惊惋惎惑惔惕惘惙惚惛惜惝惟惠惦惧惨惩惫惬惭' - '惮惯惰想惴惶惹惺愀愁愃愆愈愉愍愎意愐愔愕愚感愠愣愤愦愧愫愭愿慆慈慊慌慎慑慕慝慢慥' - '慧慨慬慭慰慵慷憋憎憔憕憙憧憨憩憬憭憷憺憾懂懈懊懋懑懒懔懦懵懿戆戈戊戋戌戍戎戏成我' - '戒戕或戗战戚戛戟戡戢戣戤戥截戬戭戮戳戴户戽戾房所扁扂扃扅扆扇扈扉扊手才扎扑扒打扔' - '托扛扞扣扦执扩扪扫扬扭扮扯扰扳扶批扺扼扽找承技抃抄抉把抑抒抓抔投抖抗折抚抛抟抠抡' - '抢护报抨披抬抱抵抹抻押抽抿拂拃拄担拆拇拈拉拊拌拍拎拐拒拓拔拖拗拘拙招拜拟拢拣拤拥' - '拦拧拨择括拭拮拯拱拳拴拶拷拼拽拾拿持挂指挈按挎挑挓挖挚挛挝挞挟挠挡挣挤挥挦挨挪挫' - '振挲挹挺挽捂捃捅捆捉捋捌捍捎捏捐捕捞损捡换捣捧捩捭据捯捶捷捺捻捽掀掂掇授掉掊掌掎' - '掏掐排掖掘掞掠探掣接控推掩措掬掭掮掰掳掴掷掸掺掼掾揄揆揉揍描提插揕揖揠握揣揩揪揭' - '揳援揶揸揽揿搀搁搂搅搋搌搏搐搒搓搔搛搜搞搠搡搦搪搬搭搴携搽摁摄摅摆摇摈摊摏摒摔摘' - '摛摞摧摩摭摴摸摹摽撂撄撅撇撑撒撕撖撙撞撤撩撬播撮撰撵撷撸撺撼擀擂擅操擎擐擒擘擞擢' - '擤擦擿攀攉攒攘攥攫攮支收攸改攻攽放政故效敉敌敏救敔敕敖教敛敝敞敢散敦敩敫敬数敲整' - '敷文斋斌斐斑斓斗料斛斜斝斟斠斡斤斥斧斩斫断斯新斶方於施旁旃旄旅旆旋旌旎族旐旒旖旗' - '旞无既日旦旧旨早旬旭旮旯旰旱旴旵时旷旸旺旻旿昀昂昃昄昆昇昈昉昊昌明昏昒易昔昕昙昝' - '星映昡昣昤春昧昨昪昫昭是昱昳昴昵昶昺昼昽显晁晃晅晊晋晌晏晐晒晓晔晕晖晗晙晚晞晟晡' - '晢晤晦晨晪晫普景晰晱晴晶晷智晾暂暄暅暇暌暑暕暖暗暝暧暨暮暲暴暵暶暹暾暿曈曌曙曛曜' - '曝曦曩曰曲曳更曷曹曼曾替最月有朋服朏朐朓朔朕朗望朝期朦木未末本札术朱朳朴朵朸机朽' - '杀杂权杄杆杈杉杌李杏材村杓杕杖杙杜杞束杠条来杧杨杩杪杭杯杰杲杳杵杷杻杼松板极构枅' - '枇枉枋枍析枕林枘枚果枝枞枢枣枥枧枨枪枫枭枯枰枲枳枵架枷枸枹柁柃柄柈柊柏某柑柒染柔' - '柖柘柙柚柜柝柞柠柢查柩柬柯柰柱柳柴柷柽柿栀栅标栈栉栊栋栌栎栏栐树栒栓栖栗栝栟校栩' - '株栲栳栴样核根栻格栽栾桀桁桂桃桄桅框案桉桊桌桎桐桑桓桔桕桠桡桢档桤桥桦桧桨桩桫桯' - '桲桴桶桷桹梁梃梅梆梌梏梓梗梠梢梣梦梧梨梭梯械梳梴梵梼梽梾梿检棁棂棉棋棍棐棒棓棕棘' - '棚棠棣棤棨棪棫棬森棰棱棵棹棺棻棼棽椀椁椅椆椋植椎椐椑椒椓椟椠椤椪椭椰椴椸椹椽椿楂' - '楒楔楗楙楚楝楞楠楣楦楩楪楫楮楯楷楸楹楼概榃榄榅榆榇榈榉榍榑榔榕榖榛榜榧榨榫榭榰榱' - '榴榷榻槁槃槊槌槎槐槔槚槛槜槟槠槭槱槲槽槿樊樗樘樟模樨横樯樱樵樽樾橄橇橐橑橘橙橛橞' - '橡橥橦橱橹橼檀檄檎檐檑檗檞檠檩檫檬櫆欂欠次欢欣欤欧欲欸欹欺欻款歃歅歆歇歉歌歙止正' - '此步武歧歪歹死歼殁殂殃殄殆殇殉殊残殍殒殓殖殚殛殡殣殪殳殴段殷殿毁毂毅毋毌母每毐毒' - '毓比毕毖毗毙毛毡毪毫毯毳毵毹毽氅氆氇氍氏氐民氓气氕氖氘氙氚氛氟氡氢氤氦氧氨氩氪氮' - '氯氰氲水永氾氿汀汁求汆汇汈汉汊汋汐汔汕汗汛汜汝汞江池污汤汧汨汩汪汫汭汰汲汴汶汹汽' - '汾沁沂沃沄沅沆沇沈沉沌沏沐沓沔沘沙沚沛沟没沣沤沥沦沧沨沩沪沫沭沮沱河沸油沺治沼沽' - '沾沿泂泃泄泅泇泉泊泌泐泓泔法泖泗泙泚泛泜泞泠泡波泣泥注泪泫泮泯泰泱泳泵泷泸泺泻泼' - '泽泾洁洄洇洈洋洌洎洑洒洓洗洘洙洚洛洞洢洣津洧洨洪洫洭洮洱洲洳洴洵洸洹洺活洼洽派洿' - '流浃浅浆浇浈浉浊测浍济浏浐浑浒浓浔浕浙浚浛浜浞浟浠浡浣浥浦浩浪浬浭浮浯浰浲浴海浸' - '浼涂涄涅消涉涌涍涎涐涑涓涔涕涘涛涝涞涟涠涡涢涣涤润涧涨涩涪涫涮涯液涴涵涸涿淀淄淅' - '淆淇淋淌淏淑淖淘淙淜淝淞淟淠淡淤淦淫淬淮淯深淳淴混淹添淼清渊渌渍渎渐渑渔渗渚渝渟' - '渠渡渣渤渥温渫渭港渰渲渴游渺渼湃湄湉湍湎湑湓湔湖湘湛湜湝湟湣湫湮湲湴湾湿溁溃溅溆' - '溇溉溍溏源溘溚溜溞溟溠溢溥溦溧溪溯溱溲溴溵溶溷溹溺溻溽滁滂滃滆滇滉滋滍滏滑滓滔滕' - '滗滘滚滞滟滠满滢滤滥滦滧滨滩滪滫滴滹漂漆漈漉漋漏漓演漕漖漠漤漦漩漪漫漭漯漱漳漴漶' - '漷漹漻漼漾潆潇潋潍潏潖潘潜潞潟潢潦潩潭潮潲潴潵潸潺潼潽潾澂澄澈澉澌澍澎澛澜澡澥澧' - '澪澭澳澴澶澹澼澽激濂濉濋濑濒濞濠濡濩濮濯瀌瀍瀑瀔瀚瀛瀣瀱瀵瀹瀼灈灌灏灞火灭灯灰灵' - '灶灸灼灾灿炀炅炆炉炊炌炎炒炔炕炖炘炙炜炝炟炣炫炬炭炮炯炱炳炷炸点炻炼炽烀烁烂烃烈' - '烊烔烘烙烛烜烝烟烠烤烦烧烨烩烫烬热烯烶烷烹烺烻烽焆焉焊焌焐焓焕焖焗焘焙焚焜焞焦焯' - '焰焱然煁煃煅煊煋煌煎煓煜煞煟煤煦照煨煮煲煳煴煸煺煽熄熇熊熏熔熘熙熛熜熟熠熥熨熬熵' - '熹熻燃燊燋燎燏燔燕燚燠燥燧燮燹爆爇爔爚爝爟爨爪爬爰爱爵父爷爸爹爻爽爿牁牂片版牌牍' - '牒牖牙牚牛牝牟牡牢牤牥牦牧物牮牯牲牵特牺牻牾牿犀犁犄犇犊犋犍犏犒犟犨犬犯犰犴状犷' - '犸犹狁狂狃狄狈狉狍狎狐狒狗狙狝狞狠狡狨狩独狭狮狯狰狱狲狳狴狷狸狺狻狼猁猃猄猇猊猎' - '猕猖猗猛猜猝猞猡猢猥猩猪猫猬献猯猰猱猴猷猹猺猾猿獍獐獒獗獠獬獭獯獴獾玃玄率玉王玎' - '玑玒玓玕玖玘玙玚玛玞玟玠玡玢玤玥玦玩玫玭玮环现玱玲玳玶玷玹玺玻玼玿珀珂珅珇珈珉珊' - '珋珌珍珏珐珑珒珕珖珙珛珝珞珠珢珣珥珦珧珩珪珫班珰珲珵珷珸珹珺珽琀球琄琅理琇琈琉琊' - '琎琏琐琔琚琛琟琡琢琤琥琦琨琪琫琬琭琮琯琰琲琳琴琵琶琼瑀瑁瑂瑃瑄瑅瑆瑑瑓瑔瑕瑖瑗瑙' - '瑚瑛瑜瑝瑞瑟瑢瑧瑨瑬瑭瑰瑱瑳瑶瑷瑾璀璁璃璆璇璈璋璎璐璒璘璜璞璟璠璥璧璨璩璪璬璮璱' - '璲璺瓀瓒瓖瓘瓜瓞瓠瓢瓣瓤瓦瓮瓯瓴瓶瓷瓻瓿甄甍甏甑甓甗甘甚甜生甡甥甦用甩甪甫甬甭甯' - '田由甲申电男甸町画甾畀畅畈畋界畎畏畔畖留畚畛畜畤略畦番畬畯畲畴畸畹畿疁疃疆疍疏疐' - '疑疔疖疗疙疚疝疟疠疡疢疣疤疥疫疬疭疮疯疰疱疲疳疴疵疸疹疼疽疾痂痃痄病症痈痉痊痍痒' - '痓痔痕痘痛痞痢痣痤痦痧痨痪痫痰痱痴痹痼痿瘀瘁瘃瘅瘆瘊瘌瘐瘕瘗瘘瘙瘛瘟瘠瘢瘤瘥瘦瘩' - '瘪瘫瘭瘰瘳瘴瘵瘸瘼瘾瘿癀癃癌癍癔癖癗癜癞癣癫癯癸登白百癿皂的皆皇皈皋皎皑皓皕皖皙' - '皛皞皤皦皭皮皱皲皴皿盂盅盆盈盉益盍盎盏盐监盒盔盖盗盘盛盟盥盦目盯盱盲直盷相盹盼盾' - '省眄眇眈眉眊看眍眙眚真眠眢眦眨眩眬眭眯眵眶眷眸眺眼着睁睃睄睇睎睐睑睚睛睡睢督睥睦' - '睨睫睬睹睽睾睿瞀瞄瞅瞋瞌瞍瞎瞑瞒瞟瞠瞢瞥瞧瞩瞪瞫瞬瞭瞰瞳瞵瞻瞽瞿矍矗矛矜矞矢矣知' - '矧矩矫矬短矮矰石矶矸矻矼矾矿砀码砂砄砆砉砌砍砑砒研砖砗砘砚砜砝砟砠砣砥砧砫砬砭砮' - '砰破砵砷砸砹砺砻砼砾础硁硅硇硊硌硍硎硐硒硔硕硖硗硙硚硝硪硫硬硭确硼硿碃碇碈碉碌碍' - '碎碏碑碓碗碘碚碛碜碟碡碣碥碧碨碰碱碲碳碴碶碹碾磁磅磉磊磋磏磐磔磕磙磜磡磨磬磲磴磷' - '磹磻礁礅礌礓礞礴礵示礼社祀祁祃祆祇祈祉祊祋祎祏祐祓祕祖祗祚祛祜祝神祟祠祢祥祧票祭' - '祯祲祷祸祺祼祾禀禁禄禅禊禋福禒禔禘禚禛禤禧禳禹禺离禽禾秀私秃秆秉秋种科秒秕秘租秣' - '秤秦秧秩秫秬秭积称秸移秽秾稀稂稃稆程稌稍税稑稔稗稙稚稞稠稣稳稷稹稻稼稽稿穄穆穑穗' - '穙穜穟穰穴究穷穸穹空穿窀突窃窄窅窈窊窍窎窑窒窕窖窗窘窜窝窟窠窣窥窦窨窬窭窳窸窿立' - '竑竖竘站竞竟章竣童竦竫竭端竹竺竽竿笃笄笆笈笊笋笏笑笔笕笙笛笞笠笤笥符笨笪笫第笮笯' - '笱笳笸笺笼笾筀筅筇等筋筌筏筐筑筒答策筘筚筛筜筝筠筢筤筥筦筮筱筲筵筶筷筹筻筼签简箅' - '箍箐箓箔箕箖算箜管箢箦箧箨箩箪箫箬箭箱箴箸篁篆篇篌篑篓篙篚篝篡篥篦篪篮篯篱篷篼篾' - '簃簇簉簋簌簏簕簖簝簟簠簧簪簰簸簿籀籁籍籥米籴类籼籽粉粑粒粕粗粘粜粝粞粟粢粤粥粪粮' - '粱粲粳粹粼粽精粿糁糅糇糈糊糌糍糒糕糖糗糙糜糟糠糨糯糵系紊素索紧紫累絜絮絷綦綮縠縢' - '縻繁繄繇纂纛纠纡红纣纤纥约级纨纩纪纫纬纭纮纯纰纱纲纳纴纵纶纷纸纹纺纻纼纽纾线绀绁' - '绂练组绅细织终绉绊绋绌绍绎经绐绑绒结绔绕绖绗绘给绚绛络绝绞统绠绡绢绣绤绥绦继绨绩' - '绪绫续绮绯绰绱绲绳维绵绶绷绸绹绺绻综绽绾绿缀缁缂缃缄缅缆缇缈缉缊缌缎缐缑缒缓缔缕' - '编缗缘缙缚缛缜缝缞缟缠缡缢缣缤缥缦缧缨缩缪缫缬缭缮缯缰缱缲缳缴缵缶缸缺罂罄罅罍罐' - '网罔罕罗罘罚罟罡罢罨罩罪置罱署罴罶罹罽罾羁羊羌美羑羓羔羕羖羚羝羞羟羡群羧羯羰羱羲' - '羸羹羼羽羿翀翁翂翃翅翈翊翌翎翔翕翘翙翚翛翟翠翡翥翦翩翮翯翰翱翳翷翻翼翾耀老考耄者' - '耆耇耋而耍耏耐耑耒耔耕耖耗耘耙耜耠耢耤耥耦耧耨耩耪耰耱耳耵耶耷耸耻耽耿聂聃聆聊聋' - '职聍聒联聘聚聩聪聱聿肃肄肆肇肉肋肌肓肖肘肚肛肝肟肠股肢肤肥肩肪肫肭肮肯肱育肴肷肸' - '肺肼肽肾肿胀胁胂胃胄胆胈背胍胎胖胗胙胚胛胜胝胞胠胡胣胤胥胧胨胩胪胫胬胭胯胰胱胲胳' - '胴胶胸胺胼能脂脆脉脊脍脎脏脐脑脒脓脔脖脘脚脞脟脩脬脯脱脲脶脸脾脿腆腈腊腋腌腐腑腒' - '腓腔腕腘腙腚腠腥腧腨腩腭腮腯腰腱腴腹腺腻腼腽腾腿膀膂膈膊膏膑膘膙膛膜膝膦膨膳膺膻' - '臀臂臃臆臊臌臑臜臣臧自臬臭至致臻臼臾舀舁舂舄舅舆舌舍舐舒舔舛舜舞舟舠舢舣舥航舫般' - '舭舯舰舱舲舳舴舵舶舷舸船舻舾艄艅艇艉艋艎艏艘艚艟艨艮良艰色艳艴艺艽艾艿节芃芄芈芊' - '芋芍芎芏芑芒芗芘芙芜芝芟芠芡芣芤芥芦芨芩芪芫芬芭芮芯芰花芳芴芷芸芹芼芽芾苁苄苇苈' - '苉苊苋苌苍苎苏苑苒苓苔苕苗苘苛苜苞苟苠苡苣苤若苦苧苫苯英苴苷苹苻苾茀茁茂范茄茅茆' - '茈茉茋茌茎茏茑茓茔茕茗茚茛茜茝茧茨茫茬茭茯茱茳茴茵茶茸茹茺茼茽荀荁荃荄荆荇草荏荐' - '荑荒荓荔荖荙荚荛荜荞荟荠荡荣荤荥荦荧荨荩荪荫荬荭荮药荷荸荻荼荽莅莆莉莎莒莓莘莙莛' - '莜莝莞莠莨莩莪莫莰莱莲莳莴莶获莸莹莺莼莽莿菀菁菂菅菇菉菊菌菍菏菔菖菘菜菝菟菠菡菥' - '菩菪菰菱菲菹菼菽萁萃萄萆萋萌萍萎萏萑萘萚萜萝萣萤营萦萧萨萩萱萳萸萹萼落葆葎葑葖著' - '葙葚葛葜葡董葩葫葬葭葰葱葳葴葵葶葸葺蒂蒄蒇蒈蒉蒋蒌蒎蒐蒗蒙蒜蒟蒡蒨蒯蒱蒲蒴蒸蒹蒺' - '蒻蒽蒿蓁蓂蓄蓇蓉蓊蓍蓏蓐蓑蓓蓖蓝蓟蓠蓢蓣蓥蓦蓬蓰蓼蓿蔀蔃蔈蔊蔌蔑蔓蔗蔚蔟蔡蔫蔬蔷' - '蔸蔹蔺蔻蔼蔽蕃蕈蕉蕊蕖蕗蕙蕞蕤蕨蕰蕲蕴蕹蕺蕻蕾薁薄薅薇薏薛薜薢薤薨薪薮薯薰薳薷薸' - '薹薿藁藉藏藐藓藕藜藟藠藤藦藨藩藻藿蘅蘑蘖蘘蘧蘩蘸蘼虎虏虐虑虒虓虔虚虞虢虤虫虬虮虱' - '虷虸虹虺虻虼虽虾虿蚀蚁蚂蚄蚆蚊蚋蚌蚍蚓蚕蚜蚝蚣蚤蚧蚨蚩蚪蚬蚯蚰蚱蚲蚴蚶蚺蛀蛃蛄蛆' - '蛇蛉蛊蛋蛎蛏蛐蛑蛔蛘蛙蛛蛞蛟蛤蛩蛭蛮蛰蛱蛲蛳蛴蛸蛹蛾蜀蜂蜃蜇蜈蜉蜊蜍蜎蜐蜒蜓蜕蜗' - '蜘蜚蜜蜞蜡蜢蜣蜥蜩蜮蜱蜴蜷蜻蜾蜿蝇蝈蝉蝌蝎蝓蝗蝘蝙蝠蝣蝤蝥蝮蝰蝲蝴蝶蝻蝼蝽蝾螂螃' - '螅螈螋融螗螟螠螣螨螫螬螭螯螱螳螵螺螽蟀蟆蟊蟋蟏蟑蟒蟛蟠蟥蟪蟫蟮蟹蟾蠃蠊蠋蠓蠕蠖蠡' - '蠢蠲蠹蠼血衃衄衅行衍衎衒衔街衙衠衡衢衣补表衩衫衬衮衰衲衷衽衾衿袁袂袄袅袆袈袋袍袒' - '袖袗袜袢袤袪被袭袯袱袷袼裁裂装裆裈裉裎裒裔裕裘裙裛裟裢裣裤裥裨裰裱裳裴裸裹裼裾褂' - '褊褐褒褓褕褙褚褛褟褡褥褪褫褯褰褴褶襁襄襕襚襜襞襟襦襫襻西要覃覆见观觃规觅视觇览觉' - '觊觋觌觎觏觐觑角觖觚觜觞觟解觥触觫觭觯觱觳觿言訄訇訚訾詈詟詹誉誊誓謇警譬计订讣认' - '讥讦讧讨让讪讫训议讯记讱讲讳讴讵讶讷许讹论讻讼讽设访诀证诂诃评诅识诇诈诉诊诋诌词' - '诎诏诐译诒诓诔试诖诗诘诙诚诛诜话诞诟诠诡询诣诤该详诧诨诩诫诬语诮误诰诱诲诳说诵请' - '诸诹诺读诼诽课诿谀谁谂调谄谅谆谇谈谊谋谌谍谎谏谐谑谒谓谔谕谖谗谙谚谛谜谝谞谟谠谡' - '谢谣谤谥谦谧谨谩谪谫谬谭谮谯谰谱谲谳谴谵谶谷谼谿豁豆豇豉豌豕豚象豢豨豪豫豮豳豸豹' - '豺貂貅貆貉貊貌貔貘贝贞负贡财责贤败账货质贩贪贫贬购贮贯贰贱贲贳贴贵贶贷贸费贺贻贼' - '贽贾贿赀赁赂赃资赅赆赇赈赉赊赋赌赍赎赏赐赑赒赓赔赕赖赗赘赙赚赛赜赝赞赟赠赡赢赣赤' - '赦赧赪赫赭走赳赴赵赶起趁趄超越趋趑趔趟趣趯趱足趴趵趸趺趼趾趿跂跃跄跆跋跌跎跏跐跑' - '跖跗跚跛距跞跟跣跤跨跪跬路跱跳践跶跷跸跹跺跻跽踅踉踊踌踏踒踔踝踞踟踢踣踦踩踪踬踮' - '踯踱踵踶踹踺踽蹀蹁蹂蹄蹅蹇蹈蹉蹊蹋蹐蹑蹒蹙蹚蹜蹢蹦蹩蹬蹭蹯蹰蹲蹴蹶蹼蹽蹾蹿躁躅躇' - '躏躐躔躜躞身躬躯躲躺车轧轨轩轪轫转轭轮软轰轱轲轳轴轵轶轷轸轹轺轻轼载轾轿辀辁辂较' - '辄辅辆辇辈辉辊辋辌辍辎辏辐辑辒输辔辕辖辗辘辙辚辛辜辞辟辣辨辩辫辰辱边辽达辿迁迂迄' - '迅过迈迎运近迓返迕还这进远违连迟迢迤迥迦迨迩迪迫迭迮述迳迷迸迹迺追退送适逃逄逅逆' - '选逊逋逍透逐逑递途逖逗通逛逝逞速造逡逢逦逭逮逯逴逵逶逸逻逼逾遁遂遄遆遇遍遏遐遑遒' - '道遗遘遛遢遣遥遨遭遮遴遵遹遽避邀邂邃邈邋邑邓邕邗邘邙邛邝邠邡邢那邦邨邪邬邮邯邰邱' - '邲邳邴邵邶邸邹邺邻邽邾邿郁郃郄郅郇郈郊郎郏郐郑郓郗郚郛郜郝郡郢郤郦郧部郪郫郭郯郴' - '郸都郾郿鄀鄂鄃鄄鄅鄌鄑鄗鄘鄙鄚鄜鄞鄠鄢鄣鄫鄯鄱鄹酂酃酅酆酉酊酋酌配酎酏酐酒酗酚酝' - '酞酡酢酣酤酥酦酩酪酬酮酯酰酱酲酴酵酶酷酸酹酺酽酾酿醅醇醉醋醌醍醐醑醒醚醛醢醨醪醭' - '醮醯醴醵醺醾采釉释里重野量釐金釜鉴銎銮鋆鋈錾鍪鎏鏊鏖鐾鑫钆钇针钉钊钋钌钍钎钏钐钒' - '钓钔钕钖钗钘钙钚钛钜钝钞钟钠钡钢钣钤钥钦钧钨钩钪钫钬钭钮钯钰钱钲钳钴钵钷钹钺钻钼' - '钽钾钿铀铁铂铃铄铅铆铈铉铊铋铌铍铎铏铐铑铒铕铖铗铘铙铚铛铜铝铞铟铠铡铢铣铤铥铧铨' - '铩铪铫铬铭铮铯铰铱铲铳铴铵银铷铸铹铺铻铼铽链铿销锁锂锃锄锅锆锇锈锉锊锋锌锍锎锏锐' - '锑锒锓锔锕锖锗锘错锚锛锜锝锞锟锡锢锣锤锥锦锧锨锩锪锫锬锭键锯锰锱锲锳锴锵锶锷锸锹' - '锺锻锼锽锾锿镀镁镂镃镄镅镆镇镈镉镊镋镌镍镎镏镐镑镒镓镔镕镖镗镘镚镛镜镝镞镠镡镢镣' - '镤镥镦镧镨镩镪镫镬镭镮镯镰镱镲镳镴镵镶长门闩闪闫闭问闯闰闱闲闳间闵闶闷闸闹闺闻闼' - '闽闾闿阀阁阂阃阄阅阆阇阈阉阊阋阌阍阎阏阐阑阒阔阕阖阗阘阙阚阜队阡阪阮阱防阳阴阵阶' - '阻阼阽阿陀陂附际陆陇陈陉陋陌降陎限陑陔陕陛陞陟陡院除陧陨险陪陬陲陴陵陶陷隃隅隆隈' - '隋隍随隐隔隗隘隙障隧隩隰隳隶隹隺隼隽难雀雁雄雅集雇雉雊雌雍雎雏雒雕雠雨雩雪雯雱雳' - '零雷雹雾需霁霄霅霆震霈霉霍霎霏霓霖霜霞霨霪霭霰露霸霹霾青靓靖静靛非靠靡面靥革靬靰' - '靳靴靶靸靺靼靽靿鞁鞅鞋鞍鞑鞒鞔鞘鞠鞡鞣鞧鞨鞫鞬鞭鞮鞯鞲鞳鞴韂韦韧韨韩韪韫韬韭音韵' - '韶页顶顷顸项顺须顼顽顾顿颀颁颂颃预颅领颇颈颉颊颋颌颍颎颏颐频颓颔颖颗题颙颚颛颜额' - '颞颟颠颡颢颤颥颦颧风飏飐飑飒飓飔飕飗飘飙飞食飧飨餍餐餮饔饕饥饧饨饩饪饫饬饭饮饯饰' - '饱饲饳饴饵饶饷饸饹饺饻饼饽饿馁馃馄馅馆馇馈馉馊馋馌馍馏馐馑馒馓馔馕首馗馘香馝馞馥' - '馧馨马驭驮驯驰驱驲驳驴驵驶驷驸驹驺驻驼驽驾驿骀骁骂骃骄骅骆骇骈骉骊骋验骍骎骏骐骑' - '骒骓骕骖骗骘骙骚骛骜骝骞骟骠骡骢骣骤骥骦骧骨骰骱骶骷骸骺骼髀髁髂髃髅髋髌髎髑髓高' - '髡髢髦髫髭髯髹髻髽鬃鬈鬏鬒鬓鬘鬟鬣鬯鬲鬶鬷鬻鬼魁魂魃魄魅魆魇魈魉魋魍魏魑魔鱼鱽鱾' - '鱿鲀鲁鲂鲃鲅鲆鲇鲈鲉鲊鲋鲌鲍鲎鲏鲐鲑鲒鲔鲕鲖鲗鲘鲙鲚鲛鲜鲝鲞鲟鲠鲡鲢鲣鲤鲥鲦鲧鲨' - '鲩鲪鲫鲬鲭鲮鲯鲰鲱鲲鲳鲴鲵鲷鲸鲹鲺鲻鲼鲽鲾鲿鳀鳁鳂鳃鳄鳅鳇鳈鳉鳊鳌鳍鳎鳏鳐鳑鳒鳓' - '鳔鳕鳖鳗鳘鳙鳚鳛鳜鳝鳞鳟鳠鳡鳢鳣鳤鸟鸠鸡鸢鸣鸤鸥鸦鸧鸨鸩鸪鸫鸬鸭鸮鸯鸰鸱鸲鸳鸵鸶' - '鸷鸸鸹鸺鸻鸼鸽鸾鸿鹀鹁鹂鹃鹄鹅鹆鹇鹈鹉鹊鹋鹌鹍鹎鹏鹐鹑鹒鹔鹕鹖鹗鹘鹙鹚鹛鹜鹝鹞鹟' - '鹠鹡鹢鹣鹤鹦鹧鹨鹩鹪鹫鹬鹭鹮鹯鹰鹱鹲鹳鹴鹾鹿麀麂麇麈麋麑麒麓麖麝麟麦麸麹麻麽麾黄' - '黇黉黍黎黏黑黔默黛黜黝黟黠黡黢黥黧黩黪黯黹黻黼黾鼋鼍鼎鼐鼒鼓鼗鼙鼠鼢鼩鼫鼬鼯鼱鼷' - '鼹鼻鼽鼾齁齇齉齐齑齿龀龁龂龃龄龅龆龇龈龉龊龋龌龙龚龛龟龠龢鿍鿎鿏㑇㑊㕮㘎㙍㙘㙦㛃' - '㛚㛹㟃㠇㠓㤘㥄㧐㧑㧟㫰㬊㬎㬚㭎㭕㮾㰀㳇㳘㳚㴔㵐㶲㸆㸌㺄㻬㽏㿠䁖䂮䃅䃎䅟䌹䎃䎖䏝䏡' - '䏲䐃䓖䓛䓨䓫䓬䗖䗛䗪䗴䜣䝙䢺䢼䣘䥽䦃䲟䲠䲢䴓䴔䴕䴖䴗䴘䴙䶮𠅤𠙶𠳐𡎚𡐓𣗋𣲗𣲘𣸣𤧛𤩽' - '𤫉𥔲𥕢𥖨𥻗𦈡𦒍𦙶𦝼𦭜𦰡𧿹𨐈𨙸𨚕𨟠𨭉𨱇𨱏𨱑𨱔𨺙𩽾𩾃𩾌𪟝𪣻𪤗𪨰𪨶𪩘𪾢𫄧𫄨𫄷𫄸𫇭𫌀𫍣𫍯' - '𫍲𫍽𫐄𫐐𫐓𫑡𫓧𫓯𫓶𫓹𫔍𫔎𫔶𫖮𫖯𫖳𫗧𫗴𫘜𫘝𫘦𫘧𫘨𫘪𫘬𫚕𫚖𫚭𫛭𫞩𫟅𫟦𫟹𫟼𫠆𫠊𫠜𫢸𫫇𫭟' - '𫭢𫭼𫮃𫰛𫵷𫶇𫷷𫸩𬀩𬀪𬂩𬃊𬇕𬇙𬇹𬉼𬊈𬊤𬌗𬍛𬍡𬍤𬒈𬒔𬒗𬕂𬘓𬘘𬘡𬘩𬘫𬘬𬘭𬘯𬙂𬙊𬙋𬜬𬜯𬞟' - '𬟁𬟽𬣙𬣞𬣡𬣳𬤇𬤊𬤝𬨂𬨎𬩽𬪩𬬩𬬭𬬮𬬱𬬸𬬹𬬻𬬿𬭁𬭊𬭎𬭚𬭛𬭤𬭩𬭬𬭯𬭳𬭶𬭸𬭼𬮱𬮿𬯀𬯎𬱖𬱟' - '𬳵𬳶𬳽𬳿𬴂𬴃𬴊𬶋𬶍𬶏𬶐𬶟𬶠𬶨𬶭𬶮𬷕𬸘𬸚𬸣𬸦𬸪𬹼𬺈𬺓' + "一丁七万丈三上下不与丏丐丑专且丕世丘丙业丛东丝丞丢两严丧个丫中丰串临丸丹为主丽举" + "乂乃久么义之乌乍乎乏乐乒乓乔乖乘乙乜九乞也习乡书乩买乱乳乸乾了予争事二亍于亏云互" + "亓五井亘亚些亟亡亢交亥亦产亨亩享京亭亮亲亳亵亶亸亹人亿什仁仂仃仄仅仆仇仉今介仍从" + "仑仓仔仕他仗付仙仝仞仟仡代令以仨仪仫们仰仲仳仵件价任份仿企伈伉伊伋伍伎伏伐休众优" + "伙会伛伞伟传伢伣伤伥伦伧伪伫伭伯估伲伴伶伸伺似伽伾佁佃但位低住佐佑体何佖佗佘余佚" + "佛作佝佞佟你佣佤佥佩佬佯佰佳佴佶佸佺佻佼佽佾使侁侂侃侄侈侉例侍侏侑侔侗侘供依侠侣" + "侥侦侧侨侩侪侬侮侯侴侵侹便促俄俅俊俍俎俏俐俑俗俘俙俚俜保俞俟信俣俦俨俩俪俫俭修俯" + "俱俳俵俶俸俺俾倌倍倏倒倓倔倕倘候倚倜倞借倡倥倦倧倨倩倪倬倭倮倴债倻值倾偁偃假偈偌" + "偎偏偓偕做停偡健偬偭偰偲偶偷偻偾偿傀傃傅傈傉傍傒傕傣傥傧储傩催傲傺傻僇僎像僔僖僚" + "僦僧僬僭僮僰僳僵僻儆儇儋儒儡儦儳儴儿兀允元兄充兆先光克免兑兔兕兖党兜兢入全八公六" + "兮兰共关兴兵其具典兹养兼兽冀冁内冈冉册再冏冒冔冕冗写军农冠冢冤冥冬冮冯冰冱冲决况" + "冶冷冻冼冽净凄准凇凉凋凌减凑凓凘凛凝几凡凤凫凭凯凰凳凶凸凹出击凼函凿刀刁刃分切刈" + "刊刍刎刑划刖列刘则刚创初删判刨利别刬刭刮到刳制刷券刹刺刻刽刿剀剁剂剃剅削剋剌前剐" + "剑剔剕剖剜剞剟剡剥剧剩剪副割剽剿劁劂劄劈劐劓力劝办功加务劢劣动助努劫劬劭励劲劳劼" + "劾势勃勇勉勋勍勐勒勔勖勘勚募勠勤勰勺勾勿匀包匆匈匍匏匐匕化北匙匜匝匠匡匣匦匪匮匹" + "区医匼匾匿十千卅升午卉半华协卑卒卓单卖南博卜卞卟占卡卢卣卤卦卧卫卬卮卯印危即却卵" + "卷卸卺卿厂厄厅历厉压厌厍厕厖厘厚厝原厢厣厥厦厨厩厮去厾县叁参叆叇又叉及友双反发叔" + "叕取受变叙叚叛叟叠口古句另叨叩只叫召叭叮可台叱史右叵叶号司叹叻叼叽吁吃各吆合吉吊" + "同名后吏吐向吒吓吕吖吗君吝吞吟吠吡吣否吧吨吩含听吭吮启吱吲吴吵吸吹吻吼吽吾呀呃呆" + "呇呈告呋呐呒呓呔呕呖呗员呙呛呜呢呣呤呦周呱呲味呵呶呷呸呻呼命咀咂咄咆咇咉咋和咍咎" + "咏咐咒咔咕咖咙咚咛咝咡咣咤咥咦咧咨咩咪咫咬咯咱咳咴咸咺咻咽咿哀品哂哃哄哆哇哈哉哌" + "响哎哏哐哑哒哓哔哕哗哙哚哝哞哟哢哥哦哧哨哩哪哭哮哱哲哳哺哼哽哿唁唆唇唉唏唐唑唔唛" + "唝唠唢唣唤唧唪唬售唯唰唱唳唵唷唼唾唿啁啃啄商啉啊啐啕啖啜啡啤啥啦啧啪啫啬啭啮啰啴" + "啵啶啷啸啻啼啾喀喁喂喃善喆喇喈喉喊喋喏喑喔喘喙喜喝喟喤喧喱喳喵喷喹喻喽喾嗄嗅嗉嗌" + "嗍嗐嗑嗒嗓嗔嗖嗜嗝嗞嗟嗡嗣嗤嗥嗦嗨嗪嗫嗬嗯嗲嗳嗵嗷嗽嗾嘀嘁嘈嘉嘌嘎嘏嘘嘚嘛嘞嘟嘡" + "嘣嘤嘧嘬嘭嘱嘲嘴嘶嘹嘻嘿噀噂噇噌噍噎噔噗噘噙噜噢噤器噩噪噫噬噱噶噻噼嚄嚅嚆嚎嚏嚓" + "嚚嚣嚭嚯嚷嚼囊囔囚四回囟因囡团囤囫园困囱围囵囷囹固国图囿圃圄圆圈圉圊圌圐圙圜土圢" + "圣在圩圪圫圬圭圮圯地圲圳圹场圻圾址坂均坉坊坋坌坍坎坏坐坑坒块坚坛坜坝坞坟坠坡坤坥" + "坦坨坩坪坫坬坭坯坰坳坷坻坼坽垂垃垄垆垈型垌垍垎垏垒垓垕垙垚垛垞垟垠垡垢垣垤垦垧垩" + "垫垭垮垯垱垲垴垵垸垺垾垿埂埃埆埇埋埌城埏埒埔埕埗埘埙埚埝域埠埤埪埫埭埯埴埵埸培基" + "埼埽堂堃堆堇堉堋堌堍堎堐堑堕堙堞堠堡堤堧堨堪堰堲堵堼堽堾塄塅塆塌塍塑塔塘塝塞塥填" + "塬塱塾墀墁境墅墈墉墐墒墓墕墘墙墚增墟墡墣墦墨墩墼壁壅壑壕壤士壬壮声壳壶壸壹处备复" + "夏夐夔夕外夙多夜够夤夥大天太夫夬夭央夯失头夷夸夹夺夼奁奂奄奇奈奉奋奎奏契奓奔奕奖" + "套奘奚奠奡奢奥奭女奴奶奸她好妁如妃妄妆妇妈妊妍妒妓妖妗妘妙妞妣妤妥妧妨妩妪妫妭妮" + "妯妲妹妻妾姆姈姊始姐姑姒姓委姗姘姚姜姝姞姣姤姥姨姬姮姱姶姹姻姽姿娀威娃娄娅娆娇娈" + "娉娌娑娓娘娜娟娠娣娥娩娱娲娴娵娶娼婀婆婉婊婌婍婕婘婚婞婠婢婤婧婪婫婳婴婵婶婷婺婻" + "婼婿媂媄媆媒媓媖媚媛媞媪媭媱媲媳媵媸媾嫁嫂嫄嫉嫌嫒嫔嫕嫖嫘嫚嫜嫠嫡嫣嫦嫩嫪嫫嫭嫱" + "嫽嬉嬖嬗嬛嬥嬬嬴嬷嬿孀孅子孑孓孔孕孖字存孙孚孛孜孝孟孢季孤孥学孩孪孬孰孱孳孵孺孽" + "宁它宄宅宇守安宋完宏宓宕宗官宙定宛宜宝实宠审客宣室宥宦宧宪宫宬宰害宴宵家宸容宽宾" + "宿寁寂寄寅密寇富寐寒寓寝寞察寡寤寥寨寮寰寸对寺寻导寿封射将尉尊小少尔尕尖尘尚尜尝" + "尢尤尥尧尨尪尬就尴尸尹尺尻尼尽尾尿局屁层屃居屈屉届屋屎屏屐屑展屙属屠屡屣履屦屯山" + "屹屺屼屾屿岁岂岈岊岌岍岐岑岔岖岗岘岙岚岛岜岞岠岢岣岨岩岫岬岭岱岳岵岷岸岽岿峁峂峃" + "峄峋峒峗峘峙峛峡峣峤峥峦峧峨峪峭峰峱峻峿崀崁崂崃崄崆崇崌崎崒崔崖崚崛崞崟崡崤崦崧" + "崩崭崮崴崶崽崾崿嵁嵅嵇嵊嵋嵌嵎嵖嵘嵚嵛嵝嵩嵫嵬嵯嵲嵴嶂嶅嶍嶒嶓嶙嶝嶟嶦嶲嶷巅巇巉" + "巍川州巡巢工左巧巨巩巫差巯己已巳巴巷巽巾币市布帅帆师希帏帐帑帔帕帖帘帙帚帛帜帝帡" + "带帧帨席帮帱帷常帻帼帽幂幄幅幌幔幕幖幛幞幡幢幪干平年并幸幺幻幼幽广庄庆庇床庋序庐" + "庑库应底庖店庙庚府庞废庠庤庥度座庭庱庳庵庶康庸庹庼庾廆廉廊廋廑廒廓廖廙廛廨廪延廷" + "建廿开弁异弃弄弆弇弈弊弋式弑弓引弗弘弛弟张弢弥弦弧弨弩弭弯弱弶弸弹强弼彀归当录彖" + "彗彘彝彟形彤彦彧彩彪彬彭彰影彳彷役彻彼往征徂径待徇很徉徊律徐徒徕得徘徙徛徜御徨循" + "徭微徵德徼徽心必忆忉忌忍忏忐忑忒忖志忘忙忝忞忠忡忤忧忪快忭忮忱忳念忸忺忻忽忾忿怀" + "态怂怃怄怅怆怊怍怎怏怒怔怕怖怙怛怜思怠怡急怦性怨怩怪怫怯怵总怼怿恁恂恃恋恍恐恒恓" + "恔恕恙恚恝恢恣恤恧恨恩恪恫恬恭息恰恳恶恸恹恺恻恼恽恿悃悄悆悈悉悌悍悒悔悖悚悛悝悟" + "悠悢患悦您悫悬悭悯悰悱悲悴悸悻悼情惆惇惊惋惎惑惔惕惘惙惚惛惜惝惟惠惦惧惨惩惫惬惭" + "惮惯惰想惴惶惹惺愀愁愃愆愈愉愍愎意愐愔愕愚感愠愣愤愦愧愫愭愿慆慈慊慌慎慑慕慝慢慥" + "慧慨慬慭慰慵慷憋憎憔憕憙憧憨憩憬憭憷憺憾懂懈懊懋懑懒懔懦懵懿戆戈戊戋戌戍戎戏成我" + "戒戕或戗战戚戛戟戡戢戣戤戥截戬戭戮戳戴户戽戾房所扁扂扃扅扆扇扈扉扊手才扎扑扒打扔" + "托扛扞扣扦执扩扪扫扬扭扮扯扰扳扶批扺扼扽找承技抃抄抉把抑抒抓抔投抖抗折抚抛抟抠抡" + "抢护报抨披抬抱抵抹抻押抽抿拂拃拄担拆拇拈拉拊拌拍拎拐拒拓拔拖拗拘拙招拜拟拢拣拤拥" + "拦拧拨择括拭拮拯拱拳拴拶拷拼拽拾拿持挂指挈按挎挑挓挖挚挛挝挞挟挠挡挣挤挥挦挨挪挫" + "振挲挹挺挽捂捃捅捆捉捋捌捍捎捏捐捕捞损捡换捣捧捩捭据捯捶捷捺捻捽掀掂掇授掉掊掌掎" + "掏掐排掖掘掞掠探掣接控推掩措掬掭掮掰掳掴掷掸掺掼掾揄揆揉揍描提插揕揖揠握揣揩揪揭" + "揳援揶揸揽揿搀搁搂搅搋搌搏搐搒搓搔搛搜搞搠搡搦搪搬搭搴携搽摁摄摅摆摇摈摊摏摒摔摘" + "摛摞摧摩摭摴摸摹摽撂撄撅撇撑撒撕撖撙撞撤撩撬播撮撰撵撷撸撺撼擀擂擅操擎擐擒擘擞擢" + "擤擦擿攀攉攒攘攥攫攮支收攸改攻攽放政故效敉敌敏救敔敕敖教敛敝敞敢散敦敩敫敬数敲整" + "敷文斋斌斐斑斓斗料斛斜斝斟斠斡斤斥斧斩斫断斯新斶方於施旁旃旄旅旆旋旌旎族旐旒旖旗" + "旞无既日旦旧旨早旬旭旮旯旰旱旴旵时旷旸旺旻旿昀昂昃昄昆昇昈昉昊昌明昏昒易昔昕昙昝" + "星映昡昣昤春昧昨昪昫昭是昱昳昴昵昶昺昼昽显晁晃晅晊晋晌晏晐晒晓晔晕晖晗晙晚晞晟晡" + "晢晤晦晨晪晫普景晰晱晴晶晷智晾暂暄暅暇暌暑暕暖暗暝暧暨暮暲暴暵暶暹暾暿曈曌曙曛曜" + "曝曦曩曰曲曳更曷曹曼曾替最月有朋服朏朐朓朔朕朗望朝期朦木未末本札术朱朳朴朵朸机朽" + "杀杂权杄杆杈杉杌李杏材村杓杕杖杙杜杞束杠条来杧杨杩杪杭杯杰杲杳杵杷杻杼松板极构枅" + "枇枉枋枍析枕林枘枚果枝枞枢枣枥枧枨枪枫枭枯枰枲枳枵架枷枸枹柁柃柄柈柊柏某柑柒染柔" + "柖柘柙柚柜柝柞柠柢查柩柬柯柰柱柳柴柷柽柿栀栅标栈栉栊栋栌栎栏栐树栒栓栖栗栝栟校栩" + "株栲栳栴样核根栻格栽栾桀桁桂桃桄桅框案桉桊桌桎桐桑桓桔桕桠桡桢档桤桥桦桧桨桩桫桯" + "桲桴桶桷桹梁梃梅梆梌梏梓梗梠梢梣梦梧梨梭梯械梳梴梵梼梽梾梿检棁棂棉棋棍棐棒棓棕棘" + "棚棠棣棤棨棪棫棬森棰棱棵棹棺棻棼棽椀椁椅椆椋植椎椐椑椒椓椟椠椤椪椭椰椴椸椹椽椿楂" + "楒楔楗楙楚楝楞楠楣楦楩楪楫楮楯楷楸楹楼概榃榄榅榆榇榈榉榍榑榔榕榖榛榜榧榨榫榭榰榱" + "榴榷榻槁槃槊槌槎槐槔槚槛槜槟槠槭槱槲槽槿樊樗樘樟模樨横樯樱樵樽樾橄橇橐橑橘橙橛橞" + "橡橥橦橱橹橼檀檄檎檐檑檗檞檠檩檫檬櫆欂欠次欢欣欤欧欲欸欹欺欻款歃歅歆歇歉歌歙止正" + "此步武歧歪歹死歼殁殂殃殄殆殇殉殊残殍殒殓殖殚殛殡殣殪殳殴段殷殿毁毂毅毋毌母每毐毒" + "毓比毕毖毗毙毛毡毪毫毯毳毵毹毽氅氆氇氍氏氐民氓气氕氖氘氙氚氛氟氡氢氤氦氧氨氩氪氮" + "氯氰氲水永氾氿汀汁求汆汇汈汉汊汋汐汔汕汗汛汜汝汞江池污汤汧汨汩汪汫汭汰汲汴汶汹汽" + "汾沁沂沃沄沅沆沇沈沉沌沏沐沓沔沘沙沚沛沟没沣沤沥沦沧沨沩沪沫沭沮沱河沸油沺治沼沽" + "沾沿泂泃泄泅泇泉泊泌泐泓泔法泖泗泙泚泛泜泞泠泡波泣泥注泪泫泮泯泰泱泳泵泷泸泺泻泼" + "泽泾洁洄洇洈洋洌洎洑洒洓洗洘洙洚洛洞洢洣津洧洨洪洫洭洮洱洲洳洴洵洸洹洺活洼洽派洿" + "流浃浅浆浇浈浉浊测浍济浏浐浑浒浓浔浕浙浚浛浜浞浟浠浡浣浥浦浩浪浬浭浮浯浰浲浴海浸" + "浼涂涄涅消涉涌涍涎涐涑涓涔涕涘涛涝涞涟涠涡涢涣涤润涧涨涩涪涫涮涯液涴涵涸涿淀淄淅" + "淆淇淋淌淏淑淖淘淙淜淝淞淟淠淡淤淦淫淬淮淯深淳淴混淹添淼清渊渌渍渎渐渑渔渗渚渝渟" + "渠渡渣渤渥温渫渭港渰渲渴游渺渼湃湄湉湍湎湑湓湔湖湘湛湜湝湟湣湫湮湲湴湾湿溁溃溅溆" + "溇溉溍溏源溘溚溜溞溟溠溢溥溦溧溪溯溱溲溴溵溶溷溹溺溻溽滁滂滃滆滇滉滋滍滏滑滓滔滕" + "滗滘滚滞滟滠满滢滤滥滦滧滨滩滪滫滴滹漂漆漈漉漋漏漓演漕漖漠漤漦漩漪漫漭漯漱漳漴漶" + "漷漹漻漼漾潆潇潋潍潏潖潘潜潞潟潢潦潩潭潮潲潴潵潸潺潼潽潾澂澄澈澉澌澍澎澛澜澡澥澧" + "澪澭澳澴澶澹澼澽激濂濉濋濑濒濞濠濡濩濮濯瀌瀍瀑瀔瀚瀛瀣瀱瀵瀹瀼灈灌灏灞火灭灯灰灵" + "灶灸灼灾灿炀炅炆炉炊炌炎炒炔炕炖炘炙炜炝炟炣炫炬炭炮炯炱炳炷炸点炻炼炽烀烁烂烃烈" + "烊烔烘烙烛烜烝烟烠烤烦烧烨烩烫烬热烯烶烷烹烺烻烽焆焉焊焌焐焓焕焖焗焘焙焚焜焞焦焯" + "焰焱然煁煃煅煊煋煌煎煓煜煞煟煤煦照煨煮煲煳煴煸煺煽熄熇熊熏熔熘熙熛熜熟熠熥熨熬熵" + "熹熻燃燊燋燎燏燔燕燚燠燥燧燮燹爆爇爔爚爝爟爨爪爬爰爱爵父爷爸爹爻爽爿牁牂片版牌牍" + "牒牖牙牚牛牝牟牡牢牤牥牦牧物牮牯牲牵特牺牻牾牿犀犁犄犇犊犋犍犏犒犟犨犬犯犰犴状犷" + "犸犹狁狂狃狄狈狉狍狎狐狒狗狙狝狞狠狡狨狩独狭狮狯狰狱狲狳狴狷狸狺狻狼猁猃猄猇猊猎" + "猕猖猗猛猜猝猞猡猢猥猩猪猫猬献猯猰猱猴猷猹猺猾猿獍獐獒獗獠獬獭獯獴獾玃玄率玉王玎" + "玑玒玓玕玖玘玙玚玛玞玟玠玡玢玤玥玦玩玫玭玮环现玱玲玳玶玷玹玺玻玼玿珀珂珅珇珈珉珊" + "珋珌珍珏珐珑珒珕珖珙珛珝珞珠珢珣珥珦珧珩珪珫班珰珲珵珷珸珹珺珽琀球琄琅理琇琈琉琊" + "琎琏琐琔琚琛琟琡琢琤琥琦琨琪琫琬琭琮琯琰琲琳琴琵琶琼瑀瑁瑂瑃瑄瑅瑆瑑瑓瑔瑕瑖瑗瑙" + "瑚瑛瑜瑝瑞瑟瑢瑧瑨瑬瑭瑰瑱瑳瑶瑷瑾璀璁璃璆璇璈璋璎璐璒璘璜璞璟璠璥璧璨璩璪璬璮璱" + "璲璺瓀瓒瓖瓘瓜瓞瓠瓢瓣瓤瓦瓮瓯瓴瓶瓷瓻瓿甄甍甏甑甓甗甘甚甜生甡甥甦用甩甪甫甬甭甯" + "田由甲申电男甸町画甾畀畅畈畋界畎畏畔畖留畚畛畜畤略畦番畬畯畲畴畸畹畿疁疃疆疍疏疐" + "疑疔疖疗疙疚疝疟疠疡疢疣疤疥疫疬疭疮疯疰疱疲疳疴疵疸疹疼疽疾痂痃痄病症痈痉痊痍痒" + "痓痔痕痘痛痞痢痣痤痦痧痨痪痫痰痱痴痹痼痿瘀瘁瘃瘅瘆瘊瘌瘐瘕瘗瘘瘙瘛瘟瘠瘢瘤瘥瘦瘩" + "瘪瘫瘭瘰瘳瘴瘵瘸瘼瘾瘿癀癃癌癍癔癖癗癜癞癣癫癯癸登白百癿皂的皆皇皈皋皎皑皓皕皖皙" + "皛皞皤皦皭皮皱皲皴皿盂盅盆盈盉益盍盎盏盐监盒盔盖盗盘盛盟盥盦目盯盱盲直盷相盹盼盾" + "省眄眇眈眉眊看眍眙眚真眠眢眦眨眩眬眭眯眵眶眷眸眺眼着睁睃睄睇睎睐睑睚睛睡睢督睥睦" + "睨睫睬睹睽睾睿瞀瞄瞅瞋瞌瞍瞎瞑瞒瞟瞠瞢瞥瞧瞩瞪瞫瞬瞭瞰瞳瞵瞻瞽瞿矍矗矛矜矞矢矣知" + "矧矩矫矬短矮矰石矶矸矻矼矾矿砀码砂砄砆砉砌砍砑砒研砖砗砘砚砜砝砟砠砣砥砧砫砬砭砮" + "砰破砵砷砸砹砺砻砼砾础硁硅硇硊硌硍硎硐硒硔硕硖硗硙硚硝硪硫硬硭确硼硿碃碇碈碉碌碍" + "碎碏碑碓碗碘碚碛碜碟碡碣碥碧碨碰碱碲碳碴碶碹碾磁磅磉磊磋磏磐磔磕磙磜磡磨磬磲磴磷" + "磹磻礁礅礌礓礞礴礵示礼社祀祁祃祆祇祈祉祊祋祎祏祐祓祕祖祗祚祛祜祝神祟祠祢祥祧票祭" + "祯祲祷祸祺祼祾禀禁禄禅禊禋福禒禔禘禚禛禤禧禳禹禺离禽禾秀私秃秆秉秋种科秒秕秘租秣" + "秤秦秧秩秫秬秭积称秸移秽秾稀稂稃稆程稌稍税稑稔稗稙稚稞稠稣稳稷稹稻稼稽稿穄穆穑穗" + "穙穜穟穰穴究穷穸穹空穿窀突窃窄窅窈窊窍窎窑窒窕窖窗窘窜窝窟窠窣窥窦窨窬窭窳窸窿立" + "竑竖竘站竞竟章竣童竦竫竭端竹竺竽竿笃笄笆笈笊笋笏笑笔笕笙笛笞笠笤笥符笨笪笫第笮笯" + "笱笳笸笺笼笾筀筅筇等筋筌筏筐筑筒答策筘筚筛筜筝筠筢筤筥筦筮筱筲筵筶筷筹筻筼签简箅" + "箍箐箓箔箕箖算箜管箢箦箧箨箩箪箫箬箭箱箴箸篁篆篇篌篑篓篙篚篝篡篥篦篪篮篯篱篷篼篾" + "簃簇簉簋簌簏簕簖簝簟簠簧簪簰簸簿籀籁籍籥米籴类籼籽粉粑粒粕粗粘粜粝粞粟粢粤粥粪粮" + "粱粲粳粹粼粽精粿糁糅糇糈糊糌糍糒糕糖糗糙糜糟糠糨糯糵系紊素索紧紫累絜絮絷綦綮縠縢" + "縻繁繄繇纂纛纠纡红纣纤纥约级纨纩纪纫纬纭纮纯纰纱纲纳纴纵纶纷纸纹纺纻纼纽纾线绀绁" + "绂练组绅细织终绉绊绋绌绍绎经绐绑绒结绔绕绖绗绘给绚绛络绝绞统绠绡绢绣绤绥绦继绨绩" + "绪绫续绮绯绰绱绲绳维绵绶绷绸绹绺绻综绽绾绿缀缁缂缃缄缅缆缇缈缉缊缌缎缐缑缒缓缔缕" + "编缗缘缙缚缛缜缝缞缟缠缡缢缣缤缥缦缧缨缩缪缫缬缭缮缯缰缱缲缳缴缵缶缸缺罂罄罅罍罐" + "网罔罕罗罘罚罟罡罢罨罩罪置罱署罴罶罹罽罾羁羊羌美羑羓羔羕羖羚羝羞羟羡群羧羯羰羱羲" + "羸羹羼羽羿翀翁翂翃翅翈翊翌翎翔翕翘翙翚翛翟翠翡翥翦翩翮翯翰翱翳翷翻翼翾耀老考耄者" + "耆耇耋而耍耏耐耑耒耔耕耖耗耘耙耜耠耢耤耥耦耧耨耩耪耰耱耳耵耶耷耸耻耽耿聂聃聆聊聋" + "职聍聒联聘聚聩聪聱聿肃肄肆肇肉肋肌肓肖肘肚肛肝肟肠股肢肤肥肩肪肫肭肮肯肱育肴肷肸" + "肺肼肽肾肿胀胁胂胃胄胆胈背胍胎胖胗胙胚胛胜胝胞胠胡胣胤胥胧胨胩胪胫胬胭胯胰胱胲胳" + "胴胶胸胺胼能脂脆脉脊脍脎脏脐脑脒脓脔脖脘脚脞脟脩脬脯脱脲脶脸脾脿腆腈腊腋腌腐腑腒" + "腓腔腕腘腙腚腠腥腧腨腩腭腮腯腰腱腴腹腺腻腼腽腾腿膀膂膈膊膏膑膘膙膛膜膝膦膨膳膺膻" + "臀臂臃臆臊臌臑臜臣臧自臬臭至致臻臼臾舀舁舂舄舅舆舌舍舐舒舔舛舜舞舟舠舢舣舥航舫般" + "舭舯舰舱舲舳舴舵舶舷舸船舻舾艄艅艇艉艋艎艏艘艚艟艨艮良艰色艳艴艺艽艾艿节芃芄芈芊" + "芋芍芎芏芑芒芗芘芙芜芝芟芠芡芣芤芥芦芨芩芪芫芬芭芮芯芰花芳芴芷芸芹芼芽芾苁苄苇苈" + "苉苊苋苌苍苎苏苑苒苓苔苕苗苘苛苜苞苟苠苡苣苤若苦苧苫苯英苴苷苹苻苾茀茁茂范茄茅茆" + "茈茉茋茌茎茏茑茓茔茕茗茚茛茜茝茧茨茫茬茭茯茱茳茴茵茶茸茹茺茼茽荀荁荃荄荆荇草荏荐" + "荑荒荓荔荖荙荚荛荜荞荟荠荡荣荤荥荦荧荨荩荪荫荬荭荮药荷荸荻荼荽莅莆莉莎莒莓莘莙莛" + "莜莝莞莠莨莩莪莫莰莱莲莳莴莶获莸莹莺莼莽莿菀菁菂菅菇菉菊菌菍菏菔菖菘菜菝菟菠菡菥" + "菩菪菰菱菲菹菼菽萁萃萄萆萋萌萍萎萏萑萘萚萜萝萣萤营萦萧萨萩萱萳萸萹萼落葆葎葑葖著" + "葙葚葛葜葡董葩葫葬葭葰葱葳葴葵葶葸葺蒂蒄蒇蒈蒉蒋蒌蒎蒐蒗蒙蒜蒟蒡蒨蒯蒱蒲蒴蒸蒹蒺" + "蒻蒽蒿蓁蓂蓄蓇蓉蓊蓍蓏蓐蓑蓓蓖蓝蓟蓠蓢蓣蓥蓦蓬蓰蓼蓿蔀蔃蔈蔊蔌蔑蔓蔗蔚蔟蔡蔫蔬蔷" + "蔸蔹蔺蔻蔼蔽蕃蕈蕉蕊蕖蕗蕙蕞蕤蕨蕰蕲蕴蕹蕺蕻蕾薁薄薅薇薏薛薜薢薤薨薪薮薯薰薳薷薸" + "薹薿藁藉藏藐藓藕藜藟藠藤藦藨藩藻藿蘅蘑蘖蘘蘧蘩蘸蘼虎虏虐虑虒虓虔虚虞虢虤虫虬虮虱" + "虷虸虹虺虻虼虽虾虿蚀蚁蚂蚄蚆蚊蚋蚌蚍蚓蚕蚜蚝蚣蚤蚧蚨蚩蚪蚬蚯蚰蚱蚲蚴蚶蚺蛀蛃蛄蛆" + "蛇蛉蛊蛋蛎蛏蛐蛑蛔蛘蛙蛛蛞蛟蛤蛩蛭蛮蛰蛱蛲蛳蛴蛸蛹蛾蜀蜂蜃蜇蜈蜉蜊蜍蜎蜐蜒蜓蜕蜗" + "蜘蜚蜜蜞蜡蜢蜣蜥蜩蜮蜱蜴蜷蜻蜾蜿蝇蝈蝉蝌蝎蝓蝗蝘蝙蝠蝣蝤蝥蝮蝰蝲蝴蝶蝻蝼蝽蝾螂螃" + "螅螈螋融螗螟螠螣螨螫螬螭螯螱螳螵螺螽蟀蟆蟊蟋蟏蟑蟒蟛蟠蟥蟪蟫蟮蟹蟾蠃蠊蠋蠓蠕蠖蠡" + "蠢蠲蠹蠼血衃衄衅行衍衎衒衔街衙衠衡衢衣补表衩衫衬衮衰衲衷衽衾衿袁袂袄袅袆袈袋袍袒" + "袖袗袜袢袤袪被袭袯袱袷袼裁裂装裆裈裉裎裒裔裕裘裙裛裟裢裣裤裥裨裰裱裳裴裸裹裼裾褂" + "褊褐褒褓褕褙褚褛褟褡褥褪褫褯褰褴褶襁襄襕襚襜襞襟襦襫襻西要覃覆见观觃规觅视觇览觉" + "觊觋觌觎觏觐觑角觖觚觜觞觟解觥触觫觭觯觱觳觿言訄訇訚訾詈詟詹誉誊誓謇警譬计订讣认" + "讥讦讧讨让讪讫训议讯记讱讲讳讴讵讶讷许讹论讻讼讽设访诀证诂诃评诅识诇诈诉诊诋诌词" + "诎诏诐译诒诓诔试诖诗诘诙诚诛诜话诞诟诠诡询诣诤该详诧诨诩诫诬语诮误诰诱诲诳说诵请" + "诸诹诺读诼诽课诿谀谁谂调谄谅谆谇谈谊谋谌谍谎谏谐谑谒谓谔谕谖谗谙谚谛谜谝谞谟谠谡" + "谢谣谤谥谦谧谨谩谪谫谬谭谮谯谰谱谲谳谴谵谶谷谼谿豁豆豇豉豌豕豚象豢豨豪豫豮豳豸豹" + "豺貂貅貆貉貊貌貔貘贝贞负贡财责贤败账货质贩贪贫贬购贮贯贰贱贲贳贴贵贶贷贸费贺贻贼" + "贽贾贿赀赁赂赃资赅赆赇赈赉赊赋赌赍赎赏赐赑赒赓赔赕赖赗赘赙赚赛赜赝赞赟赠赡赢赣赤" + "赦赧赪赫赭走赳赴赵赶起趁趄超越趋趑趔趟趣趯趱足趴趵趸趺趼趾趿跂跃跄跆跋跌跎跏跐跑" + "跖跗跚跛距跞跟跣跤跨跪跬路跱跳践跶跷跸跹跺跻跽踅踉踊踌踏踒踔踝踞踟踢踣踦踩踪踬踮" + "踯踱踵踶踹踺踽蹀蹁蹂蹄蹅蹇蹈蹉蹊蹋蹐蹑蹒蹙蹚蹜蹢蹦蹩蹬蹭蹯蹰蹲蹴蹶蹼蹽蹾蹿躁躅躇" + "躏躐躔躜躞身躬躯躲躺车轧轨轩轪轫转轭轮软轰轱轲轳轴轵轶轷轸轹轺轻轼载轾轿辀辁辂较" + "辄辅辆辇辈辉辊辋辌辍辎辏辐辑辒输辔辕辖辗辘辙辚辛辜辞辟辣辨辩辫辰辱边辽达辿迁迂迄" + "迅过迈迎运近迓返迕还这进远违连迟迢迤迥迦迨迩迪迫迭迮述迳迷迸迹迺追退送适逃逄逅逆" + "选逊逋逍透逐逑递途逖逗通逛逝逞速造逡逢逦逭逮逯逴逵逶逸逻逼逾遁遂遄遆遇遍遏遐遑遒" + "道遗遘遛遢遣遥遨遭遮遴遵遹遽避邀邂邃邈邋邑邓邕邗邘邙邛邝邠邡邢那邦邨邪邬邮邯邰邱" + "邲邳邴邵邶邸邹邺邻邽邾邿郁郃郄郅郇郈郊郎郏郐郑郓郗郚郛郜郝郡郢郤郦郧部郪郫郭郯郴" + "郸都郾郿鄀鄂鄃鄄鄅鄌鄑鄗鄘鄙鄚鄜鄞鄠鄢鄣鄫鄯鄱鄹酂酃酅酆酉酊酋酌配酎酏酐酒酗酚酝" + "酞酡酢酣酤酥酦酩酪酬酮酯酰酱酲酴酵酶酷酸酹酺酽酾酿醅醇醉醋醌醍醐醑醒醚醛醢醨醪醭" + "醮醯醴醵醺醾采釉释里重野量釐金釜鉴銎銮鋆鋈錾鍪鎏鏊鏖鐾鑫钆钇针钉钊钋钌钍钎钏钐钒" + "钓钔钕钖钗钘钙钚钛钜钝钞钟钠钡钢钣钤钥钦钧钨钩钪钫钬钭钮钯钰钱钲钳钴钵钷钹钺钻钼" + "钽钾钿铀铁铂铃铄铅铆铈铉铊铋铌铍铎铏铐铑铒铕铖铗铘铙铚铛铜铝铞铟铠铡铢铣铤铥铧铨" + "铩铪铫铬铭铮铯铰铱铲铳铴铵银铷铸铹铺铻铼铽链铿销锁锂锃锄锅锆锇锈锉锊锋锌锍锎锏锐" + "锑锒锓锔锕锖锗锘错锚锛锜锝锞锟锡锢锣锤锥锦锧锨锩锪锫锬锭键锯锰锱锲锳锴锵锶锷锸锹" + "锺锻锼锽锾锿镀镁镂镃镄镅镆镇镈镉镊镋镌镍镎镏镐镑镒镓镔镕镖镗镘镚镛镜镝镞镠镡镢镣" + "镤镥镦镧镨镩镪镫镬镭镮镯镰镱镲镳镴镵镶长门闩闪闫闭问闯闰闱闲闳间闵闶闷闸闹闺闻闼" + "闽闾闿阀阁阂阃阄阅阆阇阈阉阊阋阌阍阎阏阐阑阒阔阕阖阗阘阙阚阜队阡阪阮阱防阳阴阵阶" + "阻阼阽阿陀陂附际陆陇陈陉陋陌降陎限陑陔陕陛陞陟陡院除陧陨险陪陬陲陴陵陶陷隃隅隆隈" + "隋隍随隐隔隗隘隙障隧隩隰隳隶隹隺隼隽难雀雁雄雅集雇雉雊雌雍雎雏雒雕雠雨雩雪雯雱雳" + "零雷雹雾需霁霄霅霆震霈霉霍霎霏霓霖霜霞霨霪霭霰露霸霹霾青靓靖静靛非靠靡面靥革靬靰" + "靳靴靶靸靺靼靽靿鞁鞅鞋鞍鞑鞒鞔鞘鞠鞡鞣鞧鞨鞫鞬鞭鞮鞯鞲鞳鞴韂韦韧韨韩韪韫韬韭音韵" + "韶页顶顷顸项顺须顼顽顾顿颀颁颂颃预颅领颇颈颉颊颋颌颍颎颏颐频颓颔颖颗题颙颚颛颜额" + "颞颟颠颡颢颤颥颦颧风飏飐飑飒飓飔飕飗飘飙飞食飧飨餍餐餮饔饕饥饧饨饩饪饫饬饭饮饯饰" + "饱饲饳饴饵饶饷饸饹饺饻饼饽饿馁馃馄馅馆馇馈馉馊馋馌馍馏馐馑馒馓馔馕首馗馘香馝馞馥" + "馧馨马驭驮驯驰驱驲驳驴驵驶驷驸驹驺驻驼驽驾驿骀骁骂骃骄骅骆骇骈骉骊骋验骍骎骏骐骑" + "骒骓骕骖骗骘骙骚骛骜骝骞骟骠骡骢骣骤骥骦骧骨骰骱骶骷骸骺骼髀髁髂髃髅髋髌髎髑髓高" + "髡髢髦髫髭髯髹髻髽鬃鬈鬏鬒鬓鬘鬟鬣鬯鬲鬶鬷鬻鬼魁魂魃魄魅魆魇魈魉魋魍魏魑魔鱼鱽鱾" + "鱿鲀鲁鲂鲃鲅鲆鲇鲈鲉鲊鲋鲌鲍鲎鲏鲐鲑鲒鲔鲕鲖鲗鲘鲙鲚鲛鲜鲝鲞鲟鲠鲡鲢鲣鲤鲥鲦鲧鲨" + "鲩鲪鲫鲬鲭鲮鲯鲰鲱鲲鲳鲴鲵鲷鲸鲹鲺鲻鲼鲽鲾鲿鳀鳁鳂鳃鳄鳅鳇鳈鳉鳊鳌鳍鳎鳏鳐鳑鳒鳓" + "鳔鳕鳖鳗鳘鳙鳚鳛鳜鳝鳞鳟鳠鳡鳢鳣鳤鸟鸠鸡鸢鸣鸤鸥鸦鸧鸨鸩鸪鸫鸬鸭鸮鸯鸰鸱鸲鸳鸵鸶" + "鸷鸸鸹鸺鸻鸼鸽鸾鸿鹀鹁鹂鹃鹄鹅鹆鹇鹈鹉鹊鹋鹌鹍鹎鹏鹐鹑鹒鹔鹕鹖鹗鹘鹙鹚鹛鹜鹝鹞鹟" + "鹠鹡鹢鹣鹤鹦鹧鹨鹩鹪鹫鹬鹭鹮鹯鹰鹱鹲鹳鹴鹾鹿麀麂麇麈麋麑麒麓麖麝麟麦麸麹麻麽麾黄" + "黇黉黍黎黏黑黔默黛黜黝黟黠黡黢黥黧黩黪黯黹黻黼黾鼋鼍鼎鼐鼒鼓鼗鼙鼠鼢鼩鼫鼬鼯鼱鼷" + "鼹鼻鼽鼾齁齇齉齐齑齿龀龁龂龃龄龅龆龇龈龉龊龋龌龙龚龛龟龠龢鿍鿎鿏㑇㑊㕮㘎㙍㙘㙦㛃" + "㛚㛹㟃㠇㠓㤘㥄㧐㧑㧟㫰㬊㬎㬚㭎㭕㮾㰀㳇㳘㳚㴔㵐㶲㸆㸌㺄㻬㽏㿠䁖䂮䃅䃎䅟䌹䎃䎖䏝䏡" + "䏲䐃䓖䓛䓨䓫䓬䗖䗛䗪䗴䜣䝙䢺䢼䣘䥽䦃䲟䲠䲢䴓䴔䴕䴖䴗䴘䴙䶮𠅤𠙶𠳐𡎚𡐓𣗋𣲗𣲘𣸣𤧛𤩽" + "𤫉𥔲𥕢𥖨𥻗𦈡𦒍𦙶𦝼𦭜𦰡𧿹𨐈𨙸𨚕𨟠𨭉𨱇𨱏𨱑𨱔𨺙𩽾𩾃𩾌𪟝𪣻𪤗𪨰𪨶𪩘𪾢𫄧𫄨𫄷𫄸𫇭𫌀𫍣𫍯" + "𫍲𫍽𫐄𫐐𫐓𫑡𫓧𫓯𫓶𫓹𫔍𫔎𫔶𫖮𫖯𫖳𫗧𫗴𫘜𫘝𫘦𫘧𫘨𫘪𫘬𫚕𫚖𫚭𫛭𫞩𫟅𫟦𫟹𫟼𫠆𫠊𫠜𫢸𫫇𫭟" + "𫭢𫭼𫮃𫰛𫵷𫶇𫷷𫸩𬀩𬀪𬂩𬃊𬇕𬇙𬇹𬉼𬊈𬊤𬌗𬍛𬍡𬍤𬒈𬒔𬒗𬕂𬘓𬘘𬘡𬘩𬘫𬘬𬘭𬘯𬙂𬙊𬙋𬜬𬜯𬞟" + "𬟁𬟽𬣙𬣞𬣡𬣳𬤇𬤊𬤝𬨂𬨎𬩽𬪩𬬩𬬭𬬮𬬱𬬸𬬹𬬻𬬿𬭁𬭊𬭎𬭚𬭛𬭤𬭩𬭬𬭯𬭳𬭶𬭸𬭼𬮱𬮿𬯀𬯎𬱖𬱟" + "𬳵𬳶𬳽𬳿𬴂𬴃𬴊𬶋𬶍𬶏𬶐𬶟𬶠𬶨𬶭𬶮𬷕𬸘𬸚𬸣𬸦𬸪𬹼𬺈𬺓" ) -CN_CHARS_EXT = '吶诶屌囧飚屄' +CN_CHARS_EXT = "吶诶屌囧飚屄" CN_CHARS = CN_CHARS_COMMON + CN_CHARS_EXT -IN_CH_CHARS = { c : True for c in CN_CHARS } +IN_CH_CHARS = {c: True for c in CN_CHARS} EN_CHARS = string.ascii_letters + string.digits -IN_EN_CHARS = { c : True for c in EN_CHARS } +IN_EN_CHARS = {c: True for c in EN_CHARS} + +VALID_CHARS = CN_CHARS + EN_CHARS + " " +IN_VALID_CHARS = {c: True for c in VALID_CHARS} -VALID_CHARS = CN_CHARS + EN_CHARS + ' ' -IN_VALID_CHARS = { c : True for c in VALID_CHARS } # ================================================================================ # # basic class @@ -398,7 +402,7 @@ class ChineseChar(object): def __init__(self, simplified, traditional): self.simplified = simplified self.traditional = traditional - #self.__repr__ = self.__str__ + # self.__repr__ = self.__str__ def __str__(self): return self.simplified or self.traditional or None @@ -421,26 +425,28 @@ class ChineseNumberUnit(ChineseChar): self.big_t = big_t def __str__(self): - return '10^{}'.format(self.power) + return "10^{}".format(self.power) @classmethod def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False): - if small_unit: - return ChineseNumberUnit(power=index + 1, - simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1]) + return ChineseNumberUnit( + power=index + 1, simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1] + ) elif numbering_type == NUMBERING_TYPES[0]: - return ChineseNumberUnit(power=index + 8, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + return ChineseNumberUnit( + power=index + 8, simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1] + ) elif numbering_type == NUMBERING_TYPES[1]: - return ChineseNumberUnit(power=(index + 2) * 4, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + return ChineseNumberUnit( + power=(index + 2) * 4, simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1] + ) elif numbering_type == NUMBERING_TYPES[2]: - return ChineseNumberUnit(power=pow(2, index + 3), - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) + return ChineseNumberUnit( + power=pow(2, index + 3), simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1] + ) else: - raise ValueError( - 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) + raise ValueError("Counting type should be in {0} ({1} provided).".format(NUMBERING_TYPES, numbering_type)) class ChineseNumberDigit(ChineseChar): @@ -484,6 +490,7 @@ class NumberSystem(object): """ 中文数字系统 """ + pass @@ -532,28 +539,22 @@ def create_system(numbering_type=NUMBERING_TYPES[1]): """ # chinese number units of '亿' and larger - all_larger_units = zip( - LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) - larger_units = [CNU.create(i, v, numbering_type, False) - for i, v in enumerate(all_larger_units)] + all_larger_units = zip(LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) + larger_units = [CNU.create(i, v, numbering_type, False) for i, v in enumerate(all_larger_units)] # chinese number units of '十, 百, 千, 万' - all_smaller_units = zip( - SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) - smaller_units = [CNU.create(i, v, small_unit=True) - for i, v in enumerate(all_smaller_units)] + all_smaller_units = zip(SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) + smaller_units = [CNU.create(i, v, small_unit=True) for i, v in enumerate(all_smaller_units)] # digis - chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, - BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) + chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)] digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1] # symbols - positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x) - negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x) - point_cn = CM(POINT[0], POINT[1], '.', lambda x, - y: float(str(x) + '.' + str(y))) + positive_cn = CM(POSITIVE[0], POSITIVE[1], "+", lambda x: x) + negative_cn = CM(NEGATIVE[0], NEGATIVE[1], "-", lambda x: -x) + point_cn = CM(POINT[0], POINT[1], ".", lambda x, y: float(str(x) + "." + str(y))) # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y))) system = NumberSystem() system.units = smaller_units + larger_units @@ -564,7 +565,6 @@ def create_system(numbering_type=NUMBERING_TYPES[1]): def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): - def get_symbol(char, system): for u in system.units: if char in [u.traditional, u.simplified, u.big_s, u.big_t]: @@ -577,13 +577,12 @@ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): return m def string2symbols(chinese_string, system): - int_string, dec_string = chinese_string, '' + int_string, dec_string = chinese_string, "" for p in [system.math.point.simplified, system.math.point.traditional]: if p in chinese_string: int_string, dec_string = chinese_string.split(p) break - return [get_symbol(c, system) for c in int_string], \ - [get_symbol(c, system) for c in dec_string] + return [get_symbol(c, system) for c in int_string], [get_symbol(c, system) for c in dec_string] def correct_symbols(integer_symbols, system): """ @@ -597,8 +596,7 @@ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): if len(integer_symbols) > 1: if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU): - integer_symbols.append( - CNU(integer_symbols[-2].power - 1, None, None, None, None)) + integer_symbols.append(CNU(integer_symbols[-2].power - 1, None, None, None, None)) result = [] unit_count = 0 @@ -615,8 +613,7 @@ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): elif unit_count > 1: for i in range(len(result)): if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power: - result[-i - 1] = CNU(result[-i - 1].power + - current_unit.power, None, None, None, None) + result[-i - 1] = CNU(result[-i - 1].power + current_unit.power, None, None, None, None) return result def compute_value(integer_symbols): @@ -633,8 +630,7 @@ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): elif isinstance(s, CNU): value[-1] *= pow(10, s.power) if s.power > last_power: - value[:-1] = list(map(lambda v: v * - pow(10, s.power), value[:-1])) + value[:-1] = list(map(lambda v: v * pow(10, s.power), value[:-1])) last_power = s.power value.append(0) return sum(value) @@ -643,20 +639,26 @@ def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): int_part, dec_part = string2symbols(chinese_string, system) int_part = correct_symbols(int_part, system) int_str = str(compute_value(int_part)) - dec_str = ''.join([str(d.value) for d in dec_part]) + dec_str = "".join([str(d.value) for d in dec_part]) if dec_part: - return '{0}.{1}'.format(int_str, dec_str) + return "{0}.{1}".format(int_str, dec_str) else: return int_str -def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, - traditional=False, alt_zero=False, alt_one=False, alt_two=True, - use_zeros=True, use_units=True): - +def num2chn( + number_string, + numbering_type=NUMBERING_TYPES[1], + big=False, + traditional=False, + alt_zero=False, + alt_one=False, + alt_two=True, + use_zeros=True, + use_units=True, +): def get_value(value_string, use_zeros=True): - - striped_string = value_string.lstrip('0') + striped_string = value_string.lstrip("0") # record nothing if all zeros if not striped_string: @@ -671,14 +673,13 @@ def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, # recursively record multiple digits else: - result_unit = next(u for u in reversed( - system.units) if u.power < len(striped_string)) - result_string = value_string[:-result_unit.power] - return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:]) + result_unit = next(u for u in reversed(system.units) if u.power < len(striped_string)) + result_string = value_string[: -result_unit.power] + return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power :]) system = create_system(numbering_type) - int_dec = number_string.split('.') + int_dec = number_string.split(".") if len(int_dec) == 1: int_string = int_dec[0] dec_string = "" @@ -686,8 +687,7 @@ def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, int_string = int_dec[0] dec_string = int_dec[1] else: - raise ValueError( - "invalid input num string with more than one dot: {}".format(number_string)) + raise ValueError("invalid input num string with more than one dot: {}".format(number_string)) if use_units and len(int_string) > 1: result_symbols = get_value(int_string) @@ -698,12 +698,10 @@ def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, result_symbols += [system.math.point] + dec_symbols if alt_two: - liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, - system.digits[2].big_s, system.digits[2].big_t) + liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, system.digits[2].big_s, system.digits[2].big_t) for i, v in enumerate(result_symbols): if isinstance(v, CND) and v.value == 2: - next_symbol = result_symbols[i + - 1] if i < len(result_symbols) - 1 else None + next_symbol = result_symbols[i + 1] if i < len(result_symbols) - 1 else None previous_symbol = result_symbols[i - 1] if i > 0 else None if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))): if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)): @@ -711,38 +709,38 @@ def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, # if big is True, '两' will not be used and `alt_two` has no impact on output if big: - attr_name = 'big_' + attr_name = "big_" if traditional: - attr_name += 't' + attr_name += "t" else: - attr_name += 's' + attr_name += "s" else: if traditional: - attr_name = 'traditional' + attr_name = "traditional" else: - attr_name = 'simplified' + attr_name = "simplified" - result = ''.join([getattr(s, attr_name) for s in result_symbols]) + result = "".join([getattr(s, attr_name) for s in result_symbols]) # if not use_zeros: # result = result.strip(getattr(system.digits[0], attr_name)) if alt_zero: - result = result.replace( - getattr(system.digits[0], attr_name), system.digits[0].alt_s) + result = result.replace(getattr(system.digits[0], attr_name), system.digits[0].alt_s) if alt_one: - result = result.replace( - getattr(system.digits[1], attr_name), system.digits[1].alt_s) + result = result.replace(getattr(system.digits[1], attr_name), system.digits[1].alt_s) for i, p in enumerate(POINT): if result.startswith(p): return CHINESE_DIGIS[0] + result # ^10, 11, .., 19 - if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], - SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \ - result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]: + if ( + len(result) >= 2 + and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] + and result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]] + ): result = result[1:] return result @@ -766,6 +764,7 @@ class Cardinal: def cardinal2chntext(self): return num2chn(self.cardinal) + class Digit: """ DIGIT类 @@ -800,19 +799,14 @@ class TelePhone: # return self.telephone def telephone2chntext(self, fixed=False): - if fixed: - sil_parts = self.telephone.split('-') - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sil_parts - ]) - self.chntext = self.raw_chntext.replace('', '') + sil_parts = self.telephone.split("-") + self.raw_chntext = "".join([num2chn(part, alt_two=False, use_units=False) for part in sil_parts]) + self.chntext = self.raw_chntext.replace("", "") else: - sp_parts = self.telephone.strip('+').split() - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sp_parts - ]) - self.chntext = self.raw_chntext.replace('', '') + sp_parts = self.telephone.strip("+").split() + self.raw_chntext = "".join([num2chn(part, alt_two=False, use_units=False) for part in sp_parts]) + self.chntext = self.raw_chntext.replace("", "") return self.chntext @@ -826,12 +820,12 @@ class Fraction: self.chntext = chntext def chntext2fraction(self): - denominator, numerator = self.chntext.split('分之') - return chn2num(numerator) + '/' + chn2num(denominator) + denominator, numerator = self.chntext.split("分之") + return chn2num(numerator) + "/" + chn2num(denominator) def fraction2chntext(self): - numerator, denominator = self.fraction.split('/') - return num2chn(denominator) + '分之' + num2chn(numerator) + numerator, denominator = self.fraction.split("/") + return num2chn(denominator) + "分之" + num2chn(numerator) class Date: @@ -870,23 +864,23 @@ class Date: def date2chntext(self): date = self.date try: - year, other = date.strip().split('年', 1) - year = Digit(digit=year).digit2chntext() + '年' + year, other = date.strip().split("年", 1) + year = Digit(digit=year).digit2chntext() + "年" except ValueError: other = date - year = '' + year = "" if other: try: - month, day = other.strip().split('月', 1) - month = Cardinal(cardinal=month).cardinal2chntext() + '月' + month, day = other.strip().split("月", 1) + month = Cardinal(cardinal=month).cardinal2chntext() + "月" except ValueError: day = date - month = '' + month = "" if day: day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1] else: - month = '' - day = '' + month = "" + day = "" chntext = year + month + day self.chntext = chntext return self.chntext @@ -906,7 +900,7 @@ class Money: def money2chntext(self): money = self.money - pattern = re.compile(r'(\d+(\.\d+)?)') + pattern = re.compile(r"(\d+(\.\d+)?)") matchers = pattern.findall(money) if matchers: for matcher in matchers: @@ -925,20 +919,20 @@ class Percentage: self.chntext = chntext def chntext2percentage(self): - return chn2num(self.chntext.strip().strip('百分之')) + '%' + return chn2num(self.chntext.strip().strip("百分之")) + "%" def percentage2chntext(self): - return '百分之' + num2chn(self.percentage.strip().strip('%')) + return "百分之" + num2chn(self.percentage.strip().strip("%")) def normalize_nsw(raw_text): - text = '^' + raw_text + '$' + text = "^" + raw_text + "$" # 规范化日期 pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)") matchers = pattern.findall(text) if matchers: - #print('date') + # print('date') for matcher in matchers: text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1) @@ -946,7 +940,7 @@ def normalize_nsw(raw_text): pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)") matchers = pattern.findall(text) if matchers: - #print('money') + # print('money') for matcher in matchers: text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1) @@ -959,7 +953,7 @@ def normalize_nsw(raw_text): pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D") matchers = pattern.findall(text) if matchers: - #print('telephone') + # print('telephone') for matcher in matchers: text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1) # 固话 @@ -974,16 +968,16 @@ def normalize_nsw(raw_text): pattern = re.compile(r"(\d+/\d+)") matchers = pattern.findall(text) if matchers: - #print('fraction') + # print('fraction') for matcher in matchers: text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1) # 规范化百分数 - text = text.replace('%', '%') + text = text.replace("%", "%") pattern = re.compile(r"(\d+(\.\d+)?%)") matchers = pattern.findall(text) if matchers: - #print('percentage') + # print('percentage') for matcher in matchers: text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1) @@ -991,7 +985,7 @@ def normalize_nsw(raw_text): pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS) matchers = pattern.findall(text) if matchers: - #print('cardinal+quantifier') + # print('cardinal+quantifier') for matcher in matchers: text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) @@ -999,7 +993,7 @@ def normalize_nsw(raw_text): pattern = re.compile(r"(\d{4,32})") matchers = pattern.findall(text) if matchers: - #print('digit') + # print('digit') for matcher in matchers: text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1) @@ -1007,20 +1001,19 @@ def normalize_nsw(raw_text): pattern = re.compile(r"(\d+(\.\d+)?)") matchers = pattern.findall(text) if matchers: - #print('cardinal') + # print('cardinal') for matcher in matchers: text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) - # restore P2P, O2O, B2C, B2B etc pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))") matchers = pattern.findall(text) if matchers: # print('particular') for matcher in matchers: - text = text.replace(matcher[0], matcher[1]+'2'+matcher[2], 1) + text = text.replace(matcher[0], matcher[1] + "2" + matcher[2], 1) - return text.lstrip('^').rstrip('$') + return text.lstrip("^").rstrip("$") def remove_erhua(text): @@ -1029,9 +1022,9 @@ def remove_erhua(text): 他女儿在那边儿 -> 他女儿在那边 """ - new_str='' - while re.search('儿',text): - a = re.search('儿',text).span() + new_str = "" + while re.search("儿", text): + a = re.search("儿", text).span() remove_er_flag = 0 if ER_WHITELIST_PATTERN.search(text): @@ -1039,12 +1032,12 @@ def remove_erhua(text): if b[0] <= a[0]: remove_er_flag = 1 - if remove_er_flag == 0 : - new_str = new_str + text[0:a[0]] - text = text[a[1]:] + if remove_er_flag == 0: + new_str = new_str + text[0 : a[0]] + text = text[a[1] :] else: - new_str = new_str + text[0:b[1]] - text = text[b[1]:] + new_str = new_str + text[0 : b[1]] + text = text[b[1] :] text = new_str + text return text @@ -1053,25 +1046,26 @@ def remove_erhua(text): def remove_space(text): tokens = text.split() new = [] - for k,t in enumerate(tokens): + for k, t in enumerate(tokens): if k != 0: - if IN_EN_CHARS.get(tokens[k-1][-1]) and IN_EN_CHARS.get(t[0]): - new.append(' ') + if IN_EN_CHARS.get(tokens[k - 1][-1]) and IN_EN_CHARS.get(t[0]): + new.append(" ") new.append(t) - return ''.join(new) + return "".join(new) class TextNorm: - def __init__(self, - to_banjiao:bool = False, - to_upper:bool = False, - to_lower:bool = False, - remove_fillers:bool = False, - remove_erhua:bool = False, - check_chars:bool = False, - remove_space:bool = False, - cc_mode:str = '', - ) : + def __init__( + self, + to_banjiao: bool = False, + to_upper: bool = False, + to_lower: bool = False, + remove_fillers: bool = False, + remove_erhua: bool = False, + check_chars: bool = False, + remove_space: bool = False, + cc_mode: str = "", + ): self.to_banjiao = to_banjiao self.to_upper = to_upper self.to_lower = to_lower @@ -1083,6 +1077,7 @@ class TextNorm: self.cc = None if cc_mode: from opencc import OpenCC # Open Chinese Convert: pip install opencc + self.cc = OpenCC(cc_mode) def __call__(self, text): @@ -1100,7 +1095,7 @@ class TextNorm: if self.remove_fillers: for c in FILLER_CHARS: - text = text.replace(c, '') + text = text.replace(c, "") if self.remove_erhua: text = remove_erhua(text) @@ -1112,8 +1107,8 @@ class TextNorm: if self.check_chars: for c in text: if not IN_VALID_CHARS.get(c): - print(f'WARNING: illegal char {c} in: {text}', file=sys.stderr) - return '' + print(f"WARNING: illegal char {c} in: {text}", file=sys.stderr) + return "" if self.remove_space: text = remove_space(text) @@ -1121,79 +1116,81 @@ class TextNorm: return text -if __name__ == '__main__': +if __name__ == "__main__": p = argparse.ArgumentParser() # normalizer options - p.add_argument('--to_banjiao', action='store_true', help='convert quanjiao chars to banjiao') - p.add_argument('--to_upper', action='store_true', help='convert to upper case') - p.add_argument('--to_lower', action='store_true', help='convert to lower case') - p.add_argument('--remove_fillers', action='store_true', help='remove filler chars such as "呃, 啊"') - p.add_argument('--remove_erhua', action='store_true', help='remove erhua chars such as "他女儿在那边儿 -> 他女儿在那边"') - p.add_argument('--check_chars', action='store_true' , help='skip sentences containing illegal chars') - p.add_argument('--remove_space', action='store_true' , help='remove whitespace') - p.add_argument('--cc_mode', choices=['', 't2s', 's2t'], default='', help='convert between traditional to simplified') + p.add_argument("--to_banjiao", action="store_true", help="convert quanjiao chars to banjiao") + p.add_argument("--to_upper", action="store_true", help="convert to upper case") + p.add_argument("--to_lower", action="store_true", help="convert to lower case") + p.add_argument("--remove_fillers", action="store_true", help='remove filler chars such as "呃, 啊"') + p.add_argument("--remove_erhua", action="store_true", help='remove erhua chars such as "他女儿在那边儿 -> 他女儿在那边"') + p.add_argument("--check_chars", action="store_true", help="skip sentences containing illegal chars") + p.add_argument("--remove_space", action="store_true", help="remove whitespace") + p.add_argument( + "--cc_mode", choices=["", "t2s", "s2t"], default="", help="convert between traditional to simplified" + ) # I/O options - p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines') - p.add_argument('--has_key', action='store_true', help="will be deprecated, set --format ark instead") - p.add_argument('--format', type=str, choices=['txt', 'ark', 'tsv'], default='txt', help='input format') - p.add_argument('ifile', help='input filename, assume utf-8 encoding') - p.add_argument('ofile', help='output filename') + p.add_argument("--log_interval", type=int, default=10000, help="log interval in number of processed lines") + p.add_argument("--has_key", action="store_true", help="will be deprecated, set --format ark instead") + p.add_argument("--format", type=str, choices=["txt", "ark", "tsv"], default="txt", help="input format") + p.add_argument("ifile", help="input filename, assume utf-8 encoding") + p.add_argument("ofile", help="output filename") args = p.parse_args() if args.has_key: - args.format = 'ark' + args.format = "ark" normalizer = TextNorm( - to_banjiao = args.to_banjiao, - to_upper = args.to_upper, - to_lower = args.to_lower, - remove_fillers = args.remove_fillers, - remove_erhua = args.remove_erhua, - check_chars = args.check_chars, - remove_space = args.remove_space, - cc_mode = args.cc_mode, + to_banjiao=args.to_banjiao, + to_upper=args.to_upper, + to_lower=args.to_lower, + remove_fillers=args.remove_fillers, + remove_erhua=args.remove_erhua, + check_chars=args.check_chars, + remove_space=args.remove_space, + cc_mode=args.cc_mode, ) normalizer = TextNorm( - to_banjiao = args.to_banjiao, - to_upper = args.to_upper, - to_lower = args.to_lower, - remove_fillers = args.remove_fillers, - remove_erhua = args.remove_erhua, - check_chars = args.check_chars, - remove_space = args.remove_space, - cc_mode = args.cc_mode, + to_banjiao=args.to_banjiao, + to_upper=args.to_upper, + to_lower=args.to_lower, + remove_fillers=args.remove_fillers, + remove_erhua=args.remove_erhua, + check_chars=args.check_chars, + remove_space=args.remove_space, + cc_mode=args.cc_mode, ) ndone = 0 - with open(args.ifile, 'r', encoding = 'utf8') as istream, open(args.ofile, 'w+', encoding = 'utf8') as ostream: - if args.format == 'tsv': - reader = csv.DictReader(istream, delimiter = '\t') - assert('TEXT' in reader.fieldnames) - print('\t'.join(reader.fieldnames), file=ostream) + with open(args.ifile, "r", encoding="utf8") as istream, open(args.ofile, "w+", encoding="utf8") as ostream: + if args.format == "tsv": + reader = csv.DictReader(istream, delimiter="\t") + assert "TEXT" in reader.fieldnames + print("\t".join(reader.fieldnames), file=ostream) for item in reader: - text = item['TEXT'] + text = item["TEXT"] if text: text = normalizer(text) if text: - item['TEXT'] = text - print('\t'.join([ item[f] for f in reader.fieldnames ]), file = ostream) + item["TEXT"] = text + print("\t".join([item[f] for f in reader.fieldnames]), file=ostream) ndone += 1 if ndone % args.log_interval == 0: - print(f'text norm: {ndone} lines done.', file = sys.stderr, flush = True) + print(f"text norm: {ndone} lines done.", file=sys.stderr, flush=True) else: for l in istream: - key, text = '', '' - if args.format == 'ark': # KALDI archive, line format: "key text" + key, text = "", "" + if args.format == "ark": # KALDI archive, line format: "key text" cols = l.strip().split(maxsplit=1) - key, text = cols[0], cols[1] if len(cols) == 2 else '' + key, text = cols[0], cols[1] if len(cols) == 2 else "" else: text = l.strip() @@ -1201,12 +1198,12 @@ if __name__ == '__main__': text = normalizer(text) if text: - if args.format == 'ark': - print(key + '\t' + text, file = ostream) + if args.format == "ark": + print(key + "\t" + text, file=ostream) else: - print(text, file = ostream) + print(text, file=ostream) ndone += 1 if ndone % args.log_interval == 0: - print(f'text norm: {ndone} lines done.', file = sys.stderr, flush = True) - print(f'text norm: {ndone} lines done in total.', file = sys.stderr, flush = True) + print(f"text norm: {ndone} lines done.", file=sys.stderr, flush=True) + print(f"text norm: {ndone} lines done in total.", file=sys.stderr, flush=True) diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index f38dace2..4aaf5261 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -252,12 +252,7 @@ class BaseTacotron(BaseTTS): def compute_capacitron_VAE_embedding(self, inputs, reference_mel_info, text_info=None, speaker_embedding=None): """Capacitron Variational Autoencoder""" - ( - VAE_outputs, - posterior_distribution, - prior_distribution, - capacitron_beta, - ) = self.capacitron_vae_layer( + (VAE_outputs, posterior_distribution, prior_distribution, capacitron_beta,) = self.capacitron_vae_layer( reference_mel_info, text_info, speaker_embedding, # pylint: disable=not-callable diff --git a/TTS/tts/models/tortoise.py b/TTS/tts/models/tortoise.py index 16644ff9..c8cfcfdd 100644 --- a/TTS/tts/models/tortoise.py +++ b/TTS/tts/models/tortoise.py @@ -676,12 +676,7 @@ class Tortoise(BaseTTS): ), "Too much text provided. Break the text up into separate segments and re-try inference." if voice_samples is not None: - ( - auto_conditioning, - diffusion_conditioning, - _, - _, - ) = self.get_conditioning_latents( + (auto_conditioning, diffusion_conditioning, _, _,) = self.get_conditioning_latents( voice_samples, return_mels=True, latent_averaging_mode=latent_averaging_mode, diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index c0532b36..58f8542b 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -23,7 +23,19 @@ init_stream_support() def wav_to_mel_cloning( - wav, mel_norms_file="../experiments/clips_mel_norms.pth", mel_norms=None, device=torch.device("cpu") + wav, + mel_norms_file="../experiments/clips_mel_norms.pth", + mel_norms=None, + device=torch.device("cpu"), + n_fft=4096, + hop_length=1024, + win_length=4096, + power=2, + normalized=False, + sample_rate=22050, + f_min=0, + f_max=8000, + n_mels=80, ): """ Convert waveform to mel-spectrogram with hard-coded parameters for cloning. @@ -38,15 +50,15 @@ def wav_to_mel_cloning( torch.Tensor: Mel-spectrogram tensor. """ mel_stft = torchaudio.transforms.MelSpectrogram( - n_fft=4096, - hop_length=1024, - win_length=4096, - power=2, - normalized=False, - sample_rate=22050, - f_min=0, - f_max=8000, - n_mels=80, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + power=power, + normalized=normalized, + sample_rate=sample_rate, + f_min=f_min, + f_max=f_max, + n_mels=n_mels, norm="slaney", ).to(device) wav = wav.to(device) @@ -177,19 +189,23 @@ class XttsArgs(Coqpit): clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. decoder_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. - use_hifigan (bool, optional): Whether to use hifigan or diffusion + univnet as a decoder. Defaults to True. + use_hifigan (bool, optional): Whether to use hifigan with implicit enhancement or diffusion + univnet as a decoder. Defaults to True. + use_ne_hifigan (bool, optional): Whether to use regular hifigan or diffusion + univnet as a decoder. Defaults to False. For GPT model: - ar_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. - ar_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. - ar_max_prompt_tokens (int, optional): The maximum prompt tokens or the autoregressive model. Defaults to 70. - ar_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. - ar_n_model_channels (int, optional): The model dimension for the autoregressive model. Defaults to 1024. - ar_n_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. - ar_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. - ar_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. + gpt_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. + gpt_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. + gpt_max_prompt_tokens (int, optional): The maximum prompt tokens or the autoregressive model. Defaults to 70. + gpt_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. + gpt_n_model_channels (int, optional): The model dimension for the autoregressive model. Defaults to 1024. + gpt_n_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. + gpt_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. + gpt_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. gpt_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. - ar_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. + gpt_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. + gpt_code_stride_len (int, optional): The hop_size of dvae and consequently of the gpt output. Defaults to 1024. + gpt_use_masking_gt_prompt_approach (bool, optional): If True, it will use ground truth as prompt and it will mask the loss to avoid repetition. Defaults to True. + gpt_use_perceiver_resampler (bool, optional): If True, it will use perceiver resampler from flamingo paper - https://arxiv.org/abs/2204.14198. Defaults to False. For DiffTTS model: diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. @@ -229,6 +245,9 @@ class XttsArgs(Coqpit): gpt_num_audio_tokens: int = 8194 gpt_start_audio_token: int = 8192 gpt_stop_audio_token: int = 8193 + gpt_code_stride_len: int = 1024 + gpt_use_masking_gt_prompt_approach: bool = True + gpt_use_perceiver_resampler: bool = False # Diffusion Decoder params diff_model_channels: int = 1024 @@ -247,7 +266,6 @@ class XttsArgs(Coqpit): input_sample_rate: int = 22050 output_sample_rate: int = 24000 output_hop_length: int = 256 - ar_mel_length_compression: int = 1024 decoder_input_dim: int = 1024 d_vector_dim: int = 512 cond_d_vector_in_each_upsampling_layer: bool = True @@ -304,6 +322,8 @@ class Xtts(BaseTTS): num_audio_tokens=self.args.gpt_num_audio_tokens, start_audio_token=self.args.gpt_start_audio_token, stop_audio_token=self.args.gpt_stop_audio_token, + use_perceiver_resampler=self.args.gpt_use_perceiver_resampler, + code_stride_len=self.args.gpt_code_stride_len, ) if self.args.use_hifigan: @@ -311,7 +331,7 @@ class Xtts(BaseTTS): input_sample_rate=self.args.input_sample_rate, output_sample_rate=self.args.output_sample_rate, output_hop_length=self.args.output_hop_length, - ar_mel_length_compression=self.args.ar_mel_length_compression, + ar_mel_length_compression=self.args.gpt_code_stride_len, decoder_input_dim=self.args.decoder_input_dim, d_vector_dim=self.args.d_vector_dim, cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, @@ -322,7 +342,7 @@ class Xtts(BaseTTS): input_sample_rate=self.args.input_sample_rate, output_sample_rate=self.args.output_sample_rate, output_hop_length=self.args.output_hop_length, - ar_mel_length_compression=self.args.ar_mel_length_compression, + ar_mel_length_compression=self.args.gpt_code_stride_len, decoder_input_dim=self.args.decoder_input_dim, d_vector_dim=self.args.d_vector_dim, cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, @@ -354,12 +374,33 @@ class Xtts(BaseTTS): Args: audio_path (str): Path to the audio file. + sr (int): Sample rate of the audio. length (int): Length of the audio in seconds. Defaults to 3. """ - - audio_22k = torchaudio.functional.resample(audio, sr, 22050) - audio_22k = audio_22k[:, : 22050 * length] - mel = wav_to_mel_cloning(audio_22k, mel_norms=self.mel_stats.cpu()) + if sr != 22050: + audio = torchaudio.functional.resample(audio, sr, 22050) + audio = audio[:, : 22050 * length] + if self.args.gpt_use_perceiver_resampler: + n_fft = 2048 + hop_length = 256 + win_length = 1024 + else: + n_fft = 4096 + hop_length = 1024 + win_length = 4096 + mel = wav_to_mel_cloning( + audio, + mel_norms=self.mel_stats.cpu(), + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + power=2, + normalized=False, + sample_rate=22050, + f_min=0, + f_max=8000, + n_mels=80, + ) cond_latent = self.gpt.get_style_emb(mel.to(self.device)) return cond_latent.transpose(1, 2) @@ -461,6 +502,9 @@ class Xtts(BaseTTS): "diffusion_temperature": config.diffusion_temperature, "decoder_iterations": config.decoder_iterations, "decoder_sampler": config.decoder_sampler, + "gpt_cond_len": config.gpt_cond_len, + "max_ref_len": config.max_ref_len, + "sound_norm_refs": config.sound_norm_refs, } settings.update(kwargs) # allow overriding of preset settings with kwargs return self.full_inference(text, ref_audio_path, language, **settings) @@ -477,8 +521,11 @@ class Xtts(BaseTTS): repetition_penalty=2.0, top_k=50, top_p=0.85, - gpt_cond_len=6, do_sample=True, + # Cloning + gpt_cond_len=6, + max_ref_len=10, + sound_norm_refs=False, # Decoder inference decoder_iterations=100, cond_free=True, @@ -546,8 +593,12 @@ class Xtts(BaseTTS): Sample rate is 24kHz. """ (gpt_cond_latent, diffusion_conditioning, speaker_embedding) = self.get_conditioning_latents( - audio_path=ref_audio_path, gpt_cond_len=gpt_cond_len + audio_path=ref_audio_path, + gpt_cond_len=gpt_cond_len, + max_ref_length=max_ref_len, + sound_norm_refs=sound_norm_refs, ) + return self.inference( text, language, @@ -591,11 +642,16 @@ class Xtts(BaseTTS): diffusion_temperature=1.0, decoder_sampler="ddim", decoder="hifigan", + num_beams=1, **hf_generate_kwargs, ): text = text.strip().lower() text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) + # print(" > Input text: ", text) + # print(" > Input text preprocessed: ",self.tokenizer.preprocess_text(text, language)) + # print(" > Input tokens: ", text_tokens) + # print(" > Decoded text: ", self.tokenizer.decode(text_tokens[0].cpu().numpy())) assert ( text_tokens.shape[-1] < self.args.gpt_max_text_tokens ), " ❗ XTTS can only generate text with a maximum of 400 tokens." @@ -618,6 +674,7 @@ class Xtts(BaseTTS): top_k=top_k, temperature=temperature, num_return_sequences=self.gpt_batch_size, + num_beams=num_beams, length_penalty=length_penalty, repetition_penalty=repetition_penalty, output_attentions=False, @@ -671,7 +728,12 @@ class Xtts(BaseTTS): ) wav = self.vocoder.inference(mel) - return {"wav": wav.cpu().numpy().squeeze()} + return { + "wav": wav.cpu().numpy().squeeze(), + "gpt_latents": gpt_latents, + "speaker_embedding": speaker_embedding, + "diffusion_conditioning": diffusion_conditioning, + } def handle_chunks(self, wav_gen, wav_gen_prev, wav_overlap, overlap_len): """Handle chunk formatting in streaming mode""" diff --git a/TTS/utils/manage.py b/TTS/utils/manage.py index eef987ef..c732e1f5 100644 --- a/TTS/utils/manage.py +++ b/TTS/utils/manage.py @@ -392,7 +392,7 @@ class ModelManager(object): self.create_dir_and_download_model(model_name, model_item, output_path) # if the configs are different, redownload it # ToDo: we need a better way to handle it - if "xtts_v1" in model_name: + if "xtts" in model_name: try: self.check_if_configs_are_equal(model_name, model_item, output_path) except: @@ -406,7 +406,7 @@ class ModelManager(object): output_model_path = output_path output_config_path = None if ( - model not in ["tortoise-v2", "bark", "xtts_v1", "xtts_v1.1"] and "fairseq" not in model_name + model not in ["tortoise-v2", "bark"] and "fairseq" not in model_name and "xtts" not in model_name ): # TODO:This is stupid but don't care for now. output_model_path, output_config_path = self._find_files(output_path) # update paths in the config.json diff --git a/docs/source/models/xtts.md b/docs/source/models/xtts.md index 09373b4a..1d034aea 100644 --- a/docs/source/models/xtts.md +++ b/docs/source/models/xtts.md @@ -7,17 +7,25 @@ This is the same model that powers [Coqui Studio](https://coqui.ai/), and [Coqui a few tricks to make it faster and support streaming inference. ### Features -- Voice cloning with just a 3-second audio clip. +- Voice cloning. - Cross-language voice cloning. - Multi-lingual speech generation. - 24khz sampling rate. +- Streaming inference with < 200ms latency. (See [Streaming inference](#streaming-inference)) +- Fine-tuning support. (See [Training](#training)) + +### Updates with v2 +- Improved voice cloning. +- Voices can be cloned with a single audio file or multiple audio files, without any effect on the runtime. +- 2 new languages: Hungarian and Korean. +- Across the board quality improvements. ### Code Current implementation only supports inference. ### Languages -As of now, XTTS-v1.1 supports 14 languages: English, Spanish, French, German, Italian, Portuguese, -Polish, Turkish, Russian, Dutch, Czech, Arabic, Chinese (Simplified) and Japanese. +As of now, XTTS-v2 supports 16 languages: English, Spanish, French, German, Italian, Portuguese, +Polish, Turkish, Russian, Dutch, Czech, Arabic, Chinese (Simplified), Japanese, Hungarian, Korean Stay tuned as we continue to add support for more languages. If you have any language requests, please feel free to reach out. @@ -33,7 +41,7 @@ You can also mail us at info@coqui.ai. ```python from TTS.api import TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1.1", gpu=True) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", @@ -45,7 +53,7 @@ tts.tts_to_file(text="It took me quite a long time to develop a voice, and now t #### 🐸TTS Command line ```console - tts --model_name tts_models/multilingual/multi-dataset/xtts_v1.1 \ + tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 \ --text "Bugün okula gitmek istemiyorum." \ --speaker_wav /path/to/target/speaker.wav \ --language_idx tr \ @@ -73,7 +81,7 @@ config.load_json("/path/to/xtts/config.json") model = Xtts.init_from_config(config) model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", use_deepspeed=True) model.cuda() - + print("Computing speaker latents...") gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path="reference.wav") @@ -122,7 +130,7 @@ chunks = model.inference_stream( gpt_cond_latent, speaker_embedding ) - + wav_chuncks = [] for i, chunk in enumerate(chunks): if i == 0: @@ -136,7 +144,7 @@ torchaudio.save("xtts_streaming.wav", wav.squeeze().unsqueeze(0).cpu(), 24000) ### Training -A recipe for `XTTS_v1.1` GPT encoder training using `LJSpeech` dataset is available at https://github.com/coqui-ai/TTS/tree/dev/recipes/ljspeech/xtts_v1/train_gpt_xtts.py +A recipe for `XTTS_v2` GPT encoder training using `LJSpeech` dataset is available at https://github.com/coqui-ai/TTS/tree/dev/recipes/ljspeech/xtts_v1/train_gpt_xtts.py You need to change the fields of the `BaseDatasetConfig` to match your dataset and then update `GPTArgs` and `GPTTrainerConfig` fields as you need. By default, it will use the same parameters that XTTS v1.1 model was trained with. To speed up the model convergence, as default, it will also download the XTTS v1.1 checkpoint and load it. @@ -152,7 +160,7 @@ from TTS.tts.models.xtts import Xtts # Add here the xtts_config path CONFIG_PATH = "recipes/ljspeech/xtts_v1/run/training/GPT_XTTS_LJSpeech_FT-October-23-2023_10+36AM-653f2e75/config.json" # Add here the vocab file that you have used to train the model -TOKENIZER_PATH = "recipes/ljspeech/xtts_v1/run/training/XTTS_v1.1_original_model_files/vocab.json" +TOKENIZER_PATH = "recipes/ljspeech/xtts_v1/run/training/XTTS_v2_original_model_files/vocab.json" # Add here the checkpoint that you want to do inference with XTTS_CHECKPOINT = "recipes/ljspeech/xtts_v1/run/training/GPT_XTTS_LJSpeech_FT/best_model.pth" # Add here the speaker reference @@ -184,13 +192,14 @@ torchaudio.save(OUTPUT_WAV_PATH, torch.tensor(out["wav"]).unsqueeze(0), 24000) ``` -## Important resources & papers +## References and Acknowledgements - VallE: https://arxiv.org/abs/2301.02111 - Tortoise Repo: https://github.com/neonbjb/tortoise-tts - Faster implementation: https://github.com/152334H/tortoise-tts-fast - Univnet: https://arxiv.org/abs/2106.07889 - Latent Diffusion:https://arxiv.org/abs/2112.10752 - DALL-E: https://arxiv.org/abs/2102.12092 +- Perceiver: https://arxiv.org/abs/2103.03206 ## XttsConfig diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py new file mode 100644 index 00000000..ee6b22be --- /dev/null +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -0,0 +1,180 @@ +import os + +from trainer import Trainer, TrainerArgs + +from TTS.config.shared_configs import BaseDatasetConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig +from TTS.utils.manage import ModelManager + +# Logging parameters +RUN_NAME = "GPT_XTTS_v2.0_LJSpeech_FT" +PROJECT_NAME = "XTTS_trainer" +DASHBOARD_LOGGER = "tensorboard" +LOGGER_URI = None + +# Set here the path that the checkpoints will be saved. Default: ./run/training/ +OUT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "run", "training") + +# Training Parameters +OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False +START_WITH_EVAL = True # if True it will star with evaluation +BATCH_SIZE = 3 # set here the batch size +GRAD_ACUMM_STEPS = 84 # set here the grad accumulation steps +# Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly. + +# Define here the dataset that you want to use for the fine-tuning on. +config_dataset = BaseDatasetConfig( + formatter="ljspeech", + dataset_name="ljspeech", + path="/raid/datasets/LJSpeech-1.1_24khz/", + meta_file_train="/raid/datasets/LJSpeech-1.1_24khz/metadata.csv", + language="en", +) + +# Add here the configs of the datasets +DATASETS_CONFIG_LIST = [config_dataset] + +# Define the path where XTTS v2.0.1 files will be downloaded +CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/") +os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True) + + +# DVAE files +DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/dvae.pth" +MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/mel_stats.pth" + +# Set the path to the downloaded files +DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, DVAE_CHECKPOINT_LINK.split("/")[-1]) +MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, MEL_NORM_LINK.split("/")[-1]) + +# download DVAE files if needed +if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE): + print(" > Downloading DVAE files!") + ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True) + +# ToDo: Update links for XTTS v2.0 + +# Download XTTS v2.0 checkpoint if needed +TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v2.0/vocab.json" +XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v2.0/model.pth" + +# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. +TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file +XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, XTTS_CHECKPOINT_LINK.split("/")[-1]) # model.pth file + +# download XTTS v2.0 files if needed +if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT): + print(" > Downloading XTTS v2.0 files!") + ModelManager._download_model_files( + [TOKENIZER_FILE_LINK, XTTS_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True + ) + + +# Training sentences generations +SPEAKER_REFERENCE = ( + "./tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences +) +LANGUAGE = config_dataset.language + + +def main(): + # init args and config + model_args = GPTArgs( + max_conditioning_length=132300, # 6 secs + min_conditioning_length=66150, # 3 secs + debug_loading_failures=False, + max_wav_length=255995, # ~11.6 seconds + max_text_length=200, + mel_norm_file=MEL_NORM_FILE, + dvae_checkpoint=DVAE_CHECKPOINT, + xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune + tokenizer_file=TOKENIZER_FILE, + gpt_num_audio_tokens=8194, + gpt_start_audio_token=8192, + gpt_stop_audio_token=8193, + use_ne_hifigan=True, # if it is true it will keep the non-enhanced keys on the output checkpoint + gpt_use_masking_gt_prompt_approach=True, + gpt_use_perceiver_resampler=True, + ) + # define audio config + audio_config = XttsAudioConfig( + sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 + ) + # training parameters config + config = GPTTrainerConfig( + output_path=OUT_PATH, + model_args=model_args, + run_name=RUN_NAME, + project_name=PROJECT_NAME, + run_description=""" + GPT XTTS training + """, + dashboard_logger=DASHBOARD_LOGGER, + logger_uri=LOGGER_URI, + audio=audio_config, + batch_size=BATCH_SIZE, + batch_group_size=48, + eval_batch_size=BATCH_SIZE, + num_loader_workers=8, + eval_split_max_size=256, + print_step=50, + plot_step=100, + log_model_step=1000, + save_step=10000, + save_n_checkpoints=1, + save_checkpoints=True, + # target_loss="loss", + print_eval=False, + # Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters. + optimizer="AdamW", + optimizer_wd_only_on_weights=OPTIMIZER_WD_ONLY_ON_WEIGHTS, + optimizer_params={"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2}, + lr=5e-06, # learning rate + lr_scheduler="MultiStepLR", + # it was adjusted accordly for the new step scheme + lr_scheduler_params={"milestones": [50000 * 18, 150000 * 18, 300000 * 18], "gamma": 0.5, "last_epoch": -1}, + test_sentences=[ + { + "text": "It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", + "speaker_wav": SPEAKER_REFERENCE, + "language": LANGUAGE, + }, + { + "text": "This cake is great. It's so delicious and moist.", + "speaker_wav": SPEAKER_REFERENCE, + "language": LANGUAGE, + }, + ], + ) + + # init the model from config + model = GPTTrainer.init_from_config(config) + + # load training samples + train_samples, eval_samples = load_tts_samples( + DATASETS_CONFIG_LIST, + eval_split=True, + eval_split_max_size=config.eval_split_max_size, + eval_split_size=config.eval_split_size, + ) + + # init the trainer and 🚀 + trainer = Trainer( + TrainerArgs( + restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter + skip_train_epoch=False, + start_with_eval=START_WITH_EVAL, + grad_accum_steps=GRAD_ACUMM_STEPS, + ), + config, + output_path=OUT_PATH, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + ) + trainer.fit() + + +if __name__ == "__main__": + main() diff --git a/requirements.txt b/requirements.txt index 2837c36e..04343c84 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,8 @@ coqpit>=0.0.16 # chinese g2p deps jieba pypinyin +# korean +hangul_romanize # gruut+supported langs gruut[de,es,fr]==2.2.3 # deps for korean @@ -51,3 +53,4 @@ transformers==4.33.* encodec==0.1.* # deps for XTTS unidecode==1.3.* +num2words diff --git a/tests/xtts_tests/test_xtts_gpt_train.py b/tests/xtts_tests/test_xtts_gpt_train.py index 5e3bc226..47b1dd7d 100644 --- a/tests/xtts_tests/test_xtts_gpt_train.py +++ b/tests/xtts_tests/test_xtts_gpt_train.py @@ -86,6 +86,7 @@ model_args = GPTArgs( gpt_num_audio_tokens=8194, gpt_start_audio_token=8192, gpt_stop_audio_token=8193, + use_ne_hifigan=True, ) audio_config = XttsAudioConfig( sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 diff --git a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py new file mode 100644 index 00000000..6b6f1330 --- /dev/null +++ b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py @@ -0,0 +1,162 @@ +import os +import shutil + +import torch +from trainer import Trainer, TrainerArgs + +from tests import get_tests_output_path +from TTS.config.shared_configs import BaseDatasetConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.layers.xtts.dvae import DiscreteVAE +from TTS.tts.layers.xtts.trainer.gpt_trainer import GPTArgs, GPTTrainer, GPTTrainerConfig, XttsAudioConfig + +config_dataset = BaseDatasetConfig( + formatter="ljspeech", + dataset_name="ljspeech", + path="tests/data/ljspeech/", + meta_file_train="metadata.csv", + meta_file_val="metadata.csv", + language="en", +) + +DATASETS_CONFIG_LIST = [config_dataset] + +# Logging parameters +RUN_NAME = "GPT_XTTS_LJSpeech_FT" +PROJECT_NAME = "XTTS_trainer" +DASHBOARD_LOGGER = "tensorboard" +LOGGER_URI = None + +OUT_PATH = os.path.join(get_tests_output_path(), "train_outputs", "xtts_tests") +os.makedirs(OUT_PATH, exist_ok=True) + +# Create DVAE checkpoint and mel_norms on test time +# DVAE parameters: For the training we need the dvae to extract the dvae tokens, given that you must provide the paths for this model +DVAE_CHECKPOINT = os.path.join(OUT_PATH, "dvae.pth") # DVAE checkpoint +# Mel spectrogram norms, required for dvae mel spectrogram extraction +MEL_NORM_FILE = os.path.join(OUT_PATH, "mel_stats.pth") +dvae = DiscreteVAE( + channels=80, + normalization=None, + positional_dims=1, + num_tokens=8192, + codebook_dim=512, + hidden_dim=512, + num_resnet_blocks=3, + kernel_size=3, + num_layers=2, + use_transposed_convs=False, +) +torch.save(dvae.state_dict(), DVAE_CHECKPOINT) +mel_stats = torch.ones(80) +torch.save(mel_stats, MEL_NORM_FILE) + + +# XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. +TOKENIZER_FILE = "tests/inputs/xtts_vocab.json" # vocab.json file +XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_style_emb_repetition_fix_gt/132500_gpt_ema_coqui_tts_with_enhanced_hifigan.pth" # model.pth file + + +# Training sentences generations +SPEAKER_REFERENCE = "tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences +LANGUAGE = config_dataset.language + + +# Training Parameters +OPTIMIZER_WD_ONLY_ON_WEIGHTS = True # for multi-gpu training please make it False +START_WITH_EVAL = False # if True it will star with evaluation +BATCH_SIZE = 2 # set here the batch size +GRAD_ACUMM_STEPS = 1 # set here the grad accumulation steps +# Note: we recommend that BATCH_SIZE * GRAD_ACUMM_STEPS need to be at least 252 for more efficient training. You can increase/decrease BATCH_SIZE but then set GRAD_ACUMM_STEPS accordingly. + + +# init args and config +model_args = GPTArgs( + max_conditioning_length=132300, # 6 secs + min_conditioning_length=66150, # 3 secs + debug_loading_failures=False, + max_wav_length=255995, # ~11.6 seconds + max_text_length=200, + mel_norm_file=MEL_NORM_FILE, + dvae_checkpoint=DVAE_CHECKPOINT, + xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune + tokenizer_file=TOKENIZER_FILE, + gpt_num_audio_tokens=8194, + gpt_start_audio_token=8192, + gpt_stop_audio_token=8193, + gpt_use_masking_gt_prompt_approach=True, + gpt_use_perceiver_resampler=True, + use_ne_hifigan=True, +) +audio_config = XttsAudioConfig( + sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 +) +config = GPTTrainerConfig( + epochs=1, + output_path=OUT_PATH, + model_args=model_args, + run_name=RUN_NAME, + project_name=PROJECT_NAME, + run_description="GPT XTTS training", + dashboard_logger=DASHBOARD_LOGGER, + logger_uri=LOGGER_URI, + audio=audio_config, + batch_size=BATCH_SIZE, + batch_group_size=48, + eval_batch_size=BATCH_SIZE, + num_loader_workers=8, + eval_split_max_size=256, + print_step=50, + plot_step=100, + log_model_step=1000, + save_step=10000, + save_n_checkpoints=1, + save_checkpoints=True, + # target_loss="loss", + print_eval=False, + # Optimizer values like tortoise, pytorch implementation with modifications to not apply WD to non-weight parameters. + optimizer="AdamW", + optimizer_wd_only_on_weights=OPTIMIZER_WD_ONLY_ON_WEIGHTS, + optimizer_params={"betas": [0.9, 0.96], "eps": 1e-8, "weight_decay": 1e-2}, + lr=5e-06, # learning rate + lr_scheduler="MultiStepLR", + # it was adjusted accordly for the new step scheme + lr_scheduler_params={"milestones": [50000 * 18, 150000 * 18, 300000 * 18], "gamma": 0.5, "last_epoch": -1}, + test_sentences=[ + { + "text": "This cake is great. It's so delicious and moist.", + "speaker_wav": SPEAKER_REFERENCE, + "language": LANGUAGE, + }, + ], +) + +# init the model from config +model = GPTTrainer.init_from_config(config) + +# load training samples +train_samples, eval_samples = load_tts_samples( + DATASETS_CONFIG_LIST, + eval_split=True, + eval_split_max_size=config.eval_split_max_size, + eval_split_size=config.eval_split_size, +) + +# init the trainer and 🚀 +trainer = Trainer( + TrainerArgs( + restore_path=None, # xtts checkpoint is restored via xtts_checkpoint key so no need of restore it using Trainer restore_path parameter + skip_train_epoch=False, + start_with_eval=True, + grad_accum_steps=GRAD_ACUMM_STEPS, + ), + config, + output_path=OUT_PATH, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, +) +trainer.fit() + +# remove output path +shutil.rmtree(OUT_PATH) diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index 7194ed5c..2f9399ad 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -16,6 +16,7 @@ MODELS_WITH_SEP_TESTS = [ "tts_models/en/multi-dataset/tortoise-v2", "tts_models/multilingual/multi-dataset/xtts_v1", "tts_models/multilingual/multi-dataset/xtts_v1.1", + "tts_models/multilingual/multi-dataset/xtts_v2", ] @@ -126,6 +127,58 @@ def test_xtts_streaming(): assert len(wav_chuncks) > 1 +def test_xtts_v2(): + """XTTS is too big to run on github actions. We need to test it locally""" + output_path = os.path.join(get_tests_output_path(), "output.wav") + speaker_wav = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav") + use_gpu = torch.cuda.is_available() + if use_gpu: + run_cli( + "yes | " + f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 " + f'--text "This is an example." --out_path "{output_path}" --progress_bar False --use_cuda True ' + f'--speaker_wav "{speaker_wav}" --language_idx "en"' + ) + else: + run_cli( + "yes | " + f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 " + f'--text "This is an example." --out_path "{output_path}" --progress_bar False ' + f'--speaker_wav "{speaker_wav}" --language_idx "en"' + ) + + +def test_xtts_v2_streaming(): + """Testing the new inference_stream method""" + from TTS.tts.configs.xtts_config import XttsConfig + from TTS.tts.models.xtts import Xtts + + speaker_wav = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav") + model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v2") + config = XttsConfig() + config.load_json(os.path.join(model_path, "config.json")) + model = Xtts.init_from_config(config) + model.load_checkpoint(config, checkpoint_dir=model_path) + model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) + + print("Computing speaker latents...") + gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav) + + print("Inference...") + chunks = model.inference_stream( + "It took me quite a long time to develop a voice and now that I have it I am not going to be silent.", + "en", + gpt_cond_latent, + speaker_embedding, + ) + wav_chuncks = [] + for i, chunk in enumerate(chunks): + if i == 0: + assert chunk.shape[-1] > 5000 + wav_chuncks.append(chunk) + assert len(wav_chuncks) > 1 + + def test_tortoise(): output_path = os.path.join(get_tests_output_path(), "output.wav") use_gpu = torch.cuda.is_available() From 7eedfc67daed77a031f6d4f3e3c53dc19083cfd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 6 Nov 2023 15:37:32 +0100 Subject: [PATCH 04/67] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 594777c1..1a9285eb 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@ ## 🐸Coqui.ai News +- 📣 ⓍTTSv2 is here with 16 languages and better performance across the board. +- 📣 ⓍTTS fine-tuning code is out. Check the [example recipes](https://github.com/coqui-ai/TTS/tree/dev/recipes/ljspeech). +- 📣 ⓍTTS can now stream with <200ms latency. - 📣 ⓍTTS, our production TTS model that can speak 13 languages, is released [Blog Post](https://coqui.ai/blog/tts/open_xtts), [Demo](https://huggingface.co/spaces/coqui/xtts), [Docs](https://tts.readthedocs.io/en/dev/models/xtts.html) - 📣 [🐶Bark](https://github.com/suno-ai/bark) is now available for inference with unconstrained voice cloning. [Docs](https://tts.readthedocs.io/en/dev/models/bark.html) - 📣 You can use [~1100 Fairseq models](https://github.com/facebookresearch/fairseq/tree/main/examples/mms) with 🐸TTS. From c713a839da5b817ffc1d87d8a470c3126e551cbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 6 Nov 2023 15:51:56 +0100 Subject: [PATCH 05/67] Update VERSION --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 41915c79..5a03fb73 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.19.1 +0.20.0 From 9d54bd765548c63a6905f2db754c60d8b67efd5a Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 6 Nov 2023 18:13:58 +0100 Subject: [PATCH 06/67] Fixup XTTS --- TTS/tts/configs/xtts_config.py | 2 +- TTS/tts/models/xtts.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/TTS/tts/configs/xtts_config.py b/TTS/tts/configs/xtts_config.py index 1865a3fd..ea95faf5 100644 --- a/TTS/tts/configs/xtts_config.py +++ b/TTS/tts/configs/xtts_config.py @@ -30,7 +30,7 @@ class XttsConfig(BaseTTSConfig): which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), length_penalty > 0.0 promotes longer sequences, while length_penalty < 0.0 encourages shorter sequences. - reperation_penalty (float): + repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty. Defaults to `2.0`. top_p (float): diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 58f8542b..3e413114 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -916,7 +916,7 @@ class Xtts(BaseTTS): if hasattr(self, "hifigan_decoder"): self.hifigan_decoder.eval() if hasattr(self, "ne_hifigan_decoder"): - self.hifigan_decoder.eval() + self.ne_hifigan_decoder.eval() if hasattr(self, "diffusion_decoder"): self.diffusion_decoder.eval() if hasattr(self, "vocoder"): From 9bbf6eb8dd5145dd4f5f72cc92f20337017745b9 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 6 Nov 2023 18:43:38 +0100 Subject: [PATCH 07/67] Drop use_ne_hifigan --- TTS/tts/models/xtts.py | 38 ++++++-------------------------------- 1 file changed, 6 insertions(+), 32 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 3e413114..de38f7c8 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -190,7 +190,6 @@ class XttsArgs(Coqpit): decoder_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. use_hifigan (bool, optional): Whether to use hifigan with implicit enhancement or diffusion + univnet as a decoder. Defaults to True. - use_ne_hifigan (bool, optional): Whether to use regular hifigan or diffusion + univnet as a decoder. Defaults to False. For GPT model: gpt_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. @@ -229,7 +228,6 @@ class XttsArgs(Coqpit): decoder_checkpoint: str = None num_chars: int = 255 use_hifigan: bool = True - use_ne_hifigan: bool = False # XTTS GPT Encoder params tokenizer_file: str = "" @@ -337,18 +335,7 @@ class Xtts(BaseTTS): cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, ) - if self.args.use_ne_hifigan: - self.ne_hifigan_decoder = HifiDecoder( - input_sample_rate=self.args.input_sample_rate, - output_sample_rate=self.args.output_sample_rate, - output_hop_length=self.args.output_hop_length, - ar_mel_length_compression=self.args.gpt_code_stride_len, - decoder_input_dim=self.args.decoder_input_dim, - d_vector_dim=self.args.d_vector_dim, - cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, - ) - - if not (self.args.use_hifigan or self.args.use_ne_hifigan): + if not self.args.use_hifigan: self.diffusion_decoder = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, @@ -454,7 +441,7 @@ class Xtts(BaseTTS): if librosa_trim_db is not None: audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] - if self.args.use_hifigan or self.args.use_ne_hifigan: + if self.args.use_hifigan or self.args.use_hifigan: speaker_embedding = self.get_speaker_embedding(audio, sr) else: diffusion_cond_latents = self.get_diffusion_cond_latents(audio, sr) @@ -706,19 +693,14 @@ class Xtts(BaseTTS): break if decoder == "hifigan": - assert hasattr( - self, "hifigan_decoder" + assert ( + hasattr(self, "hifigan_decoder") and self.hifigan_decoder is not None ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) - elif decoder == "ne_hifigan": - assert hasattr( - self, "ne_hifigan_decoder" - ), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" - wav = self.ne_hifigan_decoder(gpt_latents, g=speaker_embedding) else: assert hasattr( self, "diffusion_decoder" - ), "You must disable hifigan decoders to use difffusion by setting config `use_ne_hifigan: false` and `use_hifigan: false`" + ), "You must disable hifigan decoders to use difffusion by setting `use_hifigan: false`" mel = do_spectrogram_diffusion( self.diffusion_decoder, diffuser, @@ -816,11 +798,6 @@ class Xtts(BaseTTS): self, "hifigan_decoder" ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) - elif decoder == "ne_hifigan": - assert hasattr( - self, "ne_hifigan_decoder" - ), "You must enable ne_hifigan decoder to use it by setting config `use_ne_hifigan: true`" - wav_gen = self.ne_hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) else: raise NotImplementedError("Diffusion for streaming inference not implemented.") wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( @@ -850,9 +827,8 @@ class Xtts(BaseTTS): def get_compatible_checkpoint_state_dict(self, model_path): checkpoint = load_fsspec(model_path, map_location=torch.device("cpu"))["model"] - ignore_keys = ["diffusion_decoder", "vocoder"] if self.args.use_hifigan or self.args.use_ne_hifigan else [] + ignore_keys = ["diffusion_decoder", "vocoder"] if self.args.use_hifigan else [] ignore_keys += [] if self.args.use_hifigan else ["hifigan_decoder"] - ignore_keys += [] if self.args.use_ne_hifigan else ["ne_hifigan_decoder"] # remove xtts gpt trainer extra keys ignore_keys += ["torch_mel_spectrogram_style_encoder", "torch_mel_spectrogram_dvae", "dvae"] for key in list(checkpoint.keys()): @@ -915,8 +891,6 @@ class Xtts(BaseTTS): if eval: if hasattr(self, "hifigan_decoder"): self.hifigan_decoder.eval() - if hasattr(self, "ne_hifigan_decoder"): - self.ne_hifigan_decoder.eval() if hasattr(self, "diffusion_decoder"): self.diffusion_decoder.eval() if hasattr(self, "vocoder"): From 5d418bb84a892d30530d8c8069a934375db5906c Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 6 Nov 2023 18:48:41 +0100 Subject: [PATCH 08/67] Update docs --- TTS/tts/models/xtts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index de38f7c8..477f31bf 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -568,7 +568,7 @@ class Xtts(BaseTTS): Values at 0 re the "mean" prediction of the diffusion network and will sound bland and smeared. Defaults to 1.0. - decoder: (str) Selects the decoder to use between ("hifigan", "ne_hifigan" and "diffusion") + decoder: (str) Selects the decoder to use between ("hifigan", "diffusion") Defaults to hifigan hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive From f0cb19ecca45f7ccb238cdf0b84ea8e0093e9c37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 6 Nov 2023 20:15:49 +0100 Subject: [PATCH 09/67] Drop diffusion from XTTS (#3150) * Drop diffusion for XTTS * Make style * Drop diffusion deps in code * Restore thrashed --- TTS/tts/layers/tortoise/dpm_solver.py | 2 +- TTS/tts/layers/xtts/diffusion.py | 1319 ------------------ TTS/tts/layers/xtts/vocoder.py | 385 ----- TTS/tts/models/base_tacotron.py | 7 +- TTS/tts/models/tortoise.py | 7 +- TTS/tts/models/xtts.py | 103 +- recipes/ljspeech/xtts_v1/train_gpt_xtts.py | 5 +- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 5 +- tests/xtts_tests/test_xtts_gpt_train.py | 5 +- tests/xtts_tests/test_xtts_v2-0_gpt_train.py | 5 +- 10 files changed, 33 insertions(+), 1810 deletions(-) delete mode 100644 TTS/tts/layers/xtts/diffusion.py delete mode 100644 TTS/tts/layers/xtts/vocoder.py diff --git a/TTS/tts/layers/tortoise/dpm_solver.py b/TTS/tts/layers/tortoise/dpm_solver.py index cb540577..2166eebb 100644 --- a/TTS/tts/layers/tortoise/dpm_solver.py +++ b/TTS/tts/layers/tortoise/dpm_solver.py @@ -1548,4 +1548,4 @@ def expand_dims(v, dims): Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. """ - return v[(...,) + (None,) * (dims - 1)] + return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/TTS/tts/layers/xtts/diffusion.py b/TTS/tts/layers/xtts/diffusion.py deleted file mode 100644 index 37665bc6..00000000 --- a/TTS/tts/layers/xtts/diffusion.py +++ /dev/null @@ -1,1319 +0,0 @@ -import enum -import math - -import numpy as np -import torch -import torch as th -from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral -from tqdm import tqdm - -from TTS.tts.layers.tortoise.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper - -K_DIFFUSION_SAMPLERS = {"k_euler_a": sample_euler_ancestral, "dpm++2m": sample_dpmpp_2m} -SAMPLERS = ["dpm++2m", "p", "ddim"] - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - Compute the KL divergence between two gaussians. - - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, th.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for th.exp(). - logvar1, logvar2 = [x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) for x in (logvar1, logvar2)] - - return 0.5 * (-1.0 + logvar2 - logvar1 + th.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * th.exp(-logvar2)) - - -def approx_standard_normal_cdf(x): - """ - A fast approximation of the cumulative distribution function of the - standard normal. - """ - return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) - - -def discretized_gaussian_log_likelihood(x, *, means, log_scales): - """ - Compute the log-likelihood of a Gaussian distribution discretizing to a - given image. - - :param x: the target images. It is assumed that this was uint8 values, - rescaled to the range [-1, 1]. - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - assert x.shape == means.shape == log_scales.shape - centered_x = x - means - inv_stdv = th.exp(-log_scales) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = approx_standard_normal_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = approx_standard_normal_cdf(min_in) - log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = th.where( - x < -0.999, - log_cdf_plus, - th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), - ) - assert log_probs.shape == x.shape - return log_probs - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): - """ - Get a pre-defined beta schedule for the given name. - - The beta schedule library consists of beta schedules which remain similar - in the limit of num_diffusion_timesteps. - Beta schedules may be added, but should not be removed or changed once - they are committed to maintain backwards compatibility. - """ - if schedule_name == "linear": - # Linear schedule from Ho et al, extended to work for any number of - # diffusion steps. - scale = 1000 / num_diffusion_timesteps - beta_start = scale * 0.0001 - beta_end = scale * 0.02 - return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) - elif schedule_name == "cosine": - return betas_for_alpha_bar( - num_diffusion_timesteps, - lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, - ) - else: - raise NotImplementedError(f"unknown beta schedule: {schedule_name}") - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -class ModelMeanType(enum.Enum): - """ - Which type of output the model predicts. - """ - - PREVIOUS_X = "previous_x" # the model predicts x_{t-1} - START_X = "start_x" # the model predicts x_0 - EPSILON = "epsilon" # the model predicts epsilon - - -class ModelVarType(enum.Enum): - """ - What is used as the model's output variance. - - The LEARNED_RANGE option has been added to allow the model to predict - values between FIXED_SMALL and FIXED_LARGE, making its job easier. - """ - - LEARNED = "learned" - FIXED_SMALL = "fixed_small" - FIXED_LARGE = "fixed_large" - LEARNED_RANGE = "learned_range" - - -class LossType(enum.Enum): - MSE = "mse" # use raw MSE loss (and KL when learning variances) - RESCALED_MSE = "rescaled_mse" # use raw MSE loss (with RESCALED_KL when learning variances) - KL = "kl" # use the variational lower-bound - RESCALED_KL = "rescaled_kl" # like KL, but rescale to estimate the full VLB - - def is_vb(self): - return self == LossType.KL or self == LossType.RESCALED_KL - - -class GaussianDiffusion: - """ - Utilities for training and sampling diffusion models. - - Ported directly from here, and then adapted over time to further experimentation. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 - - :param betas: a 1-D numpy array of betas for each diffusion timestep, - starting at T and going to 1. - :param model_mean_type: a ModelMeanType determining what the model outputs. - :param model_var_type: a ModelVarType determining how variance is output. - :param loss_type: a LossType determining the loss function to use. - :param rescale_timesteps: if True, pass floating point timesteps into the - model so that they are always scaled like in the - original paper (0 to 1000). - """ - - def __init__( - self, - *, - betas, - model_mean_type, - model_var_type, - loss_type, - rescale_timesteps=False, # this is generally False - conditioning_free=False, - conditioning_free_k=1, - ramp_conditioning_free=True, - sampler="ddim", - ): - self.sampler = sampler - self.model_mean_type = ModelMeanType(model_mean_type) - self.model_var_type = ModelVarType(model_var_type) - self.loss_type = LossType(loss_type) - self.rescale_timesteps = rescale_timesteps - self.conditioning_free = conditioning_free - self.conditioning_free_k = conditioning_free_k - self.ramp_conditioning_free = ramp_conditioning_free - - # Use float64 for accuracy. - betas = np.array(betas, dtype=np.float64) - self.betas = betas - assert len(betas.shape) == 1, "betas must be 1-D" - assert (betas > 0).all() and (betas <= 1).all() - - self.num_timesteps = int(betas.shape[0]) - - alphas = 1.0 - betas - self.alphas_cumprod = np.cumprod(alphas, axis=0) - self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) - self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) - assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) - self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) - self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) - self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) - self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - self.posterior_variance = betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) - # log calculation clipped because the posterior variance is 0 at the - # beginning of the diffusion chain. - self.posterior_log_variance_clipped = np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) - self.posterior_mean_coef1 = betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) - self.posterior_mean_coef2 = (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod) - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def q_sample(self, x_start, t, noise=None): - """ - Diffuse the data for a given number of diffusion steps. - - In other words, sample from q(x_t | x_0). - - :param x_start: the initial data batch. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :param noise: if specified, the split-out normal noise. - :return: A noisy version of x_start. - """ - if noise is None: - noise = th.randn_like(x_start) - assert noise.shape == x_start.shape - return ( - _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise - ) - - def q_posterior_mean_variance(self, x_start, x_t, t): - """ - Compute the mean and variance of the diffusion posterior: - - q(x_{t-1} | x_t, x_0) - - """ - assert x_start.shape == x_t.shape - posterior_mean = ( - _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start - + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - assert ( - posterior_mean.shape[0] - == posterior_variance.shape[0] - == posterior_log_variance_clipped.shape[0] - == x_start.shape[0] - ) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None): - """ - Apply the model to get p(x_{t-1} | x_t), as well as a prediction of - the initial x, x_0. - - :param model: the model, which takes a signal and a batch of timesteps - as input. - :param x: the [N x C x ...] tensor at time t. - :param t: a 1-D Tensor of timesteps. - :param clip_denoised: if True, clip the denoised signal into [-1, 1]. - :param denoised_fn: if not None, a function which applies to the - x_start prediction before it is used to sample. Applies before - clip_denoised. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - :return: a dict with the following keys: - - 'mean': the model mean output. - - 'variance': the model variance output. - - 'log_variance': the log of 'variance'. - - 'pred_xstart': the prediction for x_0. - """ - if model_kwargs is None: - model_kwargs = {} - - assert self.model_var_type == ModelVarType.LEARNED_RANGE - assert self.model_mean_type == ModelMeanType.EPSILON - assert denoised_fn is None - assert clip_denoised is True - B, C = x.shape[:2] - assert t.shape == (B,) - model_output = model(x, self._scale_timesteps(t), **model_kwargs) - if self.conditioning_free: - model_output_no_conditioning = model(x, self._scale_timesteps(t), conditioning_free=True, **model_kwargs) - - if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: - assert model_output.shape == (B, C * 2, *x.shape[2:]) - model_output, model_var_values = th.split(model_output, C, dim=1) - if self.conditioning_free: - model_output_no_conditioning, _ = th.split(model_output_no_conditioning, C, dim=1) - if self.model_var_type == ModelVarType.LEARNED: - assert False - model_log_variance = model_var_values - model_variance = th.exp(model_log_variance) - else: - min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape) - max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) - # The model_var_values is [-1, 1] for [min_var, max_var]. - frac = (model_var_values + 1) / 2 - model_log_variance = frac * max_log + (1 - frac) * min_log - model_variance = th.exp(model_log_variance) - else: - assert False - model_variance, model_log_variance = { - # for fixedlarge, we set the initial (log-)variance like so - # to get a better decoder log likelihood. - ModelVarType.FIXED_LARGE: ( - np.append(self.posterior_variance[1], self.betas[1:]), - np.log(np.append(self.posterior_variance[1], self.betas[1:])), - ), - ModelVarType.FIXED_SMALL: ( - self.posterior_variance, - self.posterior_log_variance_clipped, - ), - }[self.model_var_type] - model_variance = _extract_into_tensor(model_variance, t, x.shape) - model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) - - if self.conditioning_free: - if self.ramp_conditioning_free: - assert t.shape[0] == 1 # This should only be used in inference. - cfk = self.conditioning_free_k * (1 - self._scale_timesteps(t)[0].item() / self.num_timesteps) - else: - cfk = self.conditioning_free_k - model_output = (1 + cfk) * model_output - cfk * model_output_no_conditioning - - def process_xstart(x): - if denoised_fn is not None: - assert False - x = denoised_fn(x) - if clip_denoised: - return x.clamp(-1, 1) - assert False - return x - - if self.model_mean_type == ModelMeanType.PREVIOUS_X: - assert False - pred_xstart = process_xstart(self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)) - model_mean = model_output - elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: - if self.model_mean_type == ModelMeanType.START_X: - assert False - pred_xstart = process_xstart(model_output) - else: - pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)) - model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) - else: - raise NotImplementedError(self.model_mean_type) - - assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape - return { - "mean": model_mean, - "variance": model_variance, - "log_variance": model_log_variance, - "pred_xstart": pred_xstart, - } - - def _predict_xstart_from_eps(self, x_t, t, eps): - assert x_t.shape == eps.shape - return ( - _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps - ) - - def _predict_xstart_from_xprev(self, x_t, t, xprev): - assert x_t.shape == xprev.shape - return ( # (xprev - coef2*x_t) / coef1 - _extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev - - _extract_into_tensor(self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape) * x_t - ) - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return ( - _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart - ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _scale_timesteps(self, t): - if self.rescale_timesteps: - return t.float() * (1000.0 / self.num_timesteps) - return t - - def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): - """ - Compute the mean for the previous step, given a function cond_fn that - computes the gradient of a conditional log probability with respect to - x. In particular, cond_fn computes grad(log(p(y|x))), and we want to - condition on y. - - This uses the conditioning strategy from Sohl-Dickstein et al. (2015). - """ - gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs) - new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() - return new_mean - - def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): - """ - Compute what the p_mean_variance output would have been, should the - model's score function be conditioned by cond_fn. - - See condition_mean() for details on cond_fn. - - Unlike condition_mean(), this instead uses the conditioning strategy - from Song et al (2020). - """ - alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) - - eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) - eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, self._scale_timesteps(t), **model_kwargs) - - out = p_mean_var.copy() - out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) - out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) - return out - - def p_sample( - self, - model, - x, - t, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - ): - """ - Sample x_{t-1} from the model at the given timestep. - - :param model: the model to sample from. - :param x: the current tensor at x_{t-1}. - :param t: the value of t, starting at 0 for the first diffusion step. - :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. - :param denoised_fn: if not None, a function which applies to the - x_start prediction before it is used to sample. - :param cond_fn: if not None, this is a gradient function that acts - similarly to the model. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - :return: a dict containing the following keys: - - 'sample': a random sample from the model. - - 'pred_xstart': a prediction of x_0. - """ - out = self.p_mean_variance( - model, - x, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - model_kwargs=model_kwargs, - ) - noise = th.randn_like(x) - nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) # no noise when t == 0 - if cond_fn is not None: - out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs) - sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise - return {"sample": sample, "pred_xstart": out["pred_xstart"]} - - def k_diffusion_sample_loop( - self, - k_sampler, - pbar, - model, - shape, - noise=None, # all given - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - device=None, # ALL UNUSED - model_kwargs=None, # {'precomputed_aligned_embeddings': precomputed_embeddings}, - progress=False, # unused as well - ): - assert isinstance(model_kwargs, dict) - if device is None: - device = next(model.parameters()).device - s_in = noise.new_ones([noise.shape[0]]) - - def model_split(*args, **kwargs): - model_output = model(*args, **kwargs) - model_epsilon, model_var = th.split(model_output, model_output.shape[1] // 2, dim=1) - return model_epsilon, model_var - - # - """ - print(self.betas) - print(th.tensor(self.betas)) - noise_schedule = NoiseScheduleVP(schedule='discrete', betas=th.tensor(self.betas)) - """ - noise_schedule = NoiseScheduleVP(schedule="linear", continuous_beta_0=0.1 / 4, continuous_beta_1=20.0 / 4) - - def model_fn_prewrap(x, t, *args, **kwargs): - """ - x_in = torch.cat([x] * 2) - t_in = torch.cat([t_continuous] * 2) - c_in = torch.cat([unconditional_condition, condition]) - noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) - print(t) - print(self.timestep_map) - exit() - """ - """ - model_output = model(x, self._scale_timesteps(t*4000), **model_kwargs) - out = self.p_mean_variance(model, x, t*4000, model_kwargs=model_kwargs) - return out['pred_xstart'] - """ - x, _ = x.chunk(2) - t, _ = (t * 1000).chunk(2) - res = torch.cat( - [ - model_split(x, t, conditioning_free=True, **model_kwargs)[0], - model_split(x, t, **model_kwargs)[0], - ] - ) - pbar.update(1) - return res - - model_fn = model_wrapper( - model_fn_prewrap, - noise_schedule, - model_type="noise", # "noise" or "x_start" or "v" or "score" - model_kwargs=model_kwargs, - guidance_type="classifier-free", - condition=th.Tensor(1), - unconditional_condition=th.Tensor(1), - guidance_scale=self.conditioning_free_k, - ) - """ - model_fn = model_wrapper( - model_fn_prewrap, - noise_schedule, - model_type='x_start', - model_kwargs={} - ) - # - dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver") - x_sample = dpm_solver.sample( - noise, - steps=20, - order=3, - skip_type="time_uniform", - method="singlestep", - ) - """ - dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++") - x_sample = dpm_solver.sample( - noise, - steps=self.num_timesteps, - order=2, - skip_type="time_uniform", - method="multistep", - ) - #''' - return x_sample - - # HF DIFFUSION ATTEMPT - """ - from .hf_diffusion import EulerAncestralDiscreteScheduler - Scheduler = EulerAncestralDiscreteScheduler() - Scheduler.set_timesteps(100) - for timestep in Scheduler.timesteps: - noise_input = Scheduler.scale_model_input(noise, timestep) - ts = s_in * timestep - model_output = model(noise_input, ts, **model_kwargs) - model_epsilon, _model_var = th.split(model_output, model_output.shape[1]//2, dim=1) - noise, _x0 = Scheduler.step(model_epsilon, timestep, noise) - return noise - """ - - # KARRAS DIFFUSION ATTEMPT - """ - TRAINED_DIFFUSION_STEPS = 4000 # HARDCODED - ratio = TRAINED_DIFFUSION_STEPS/14.5 - def call_model(*args, **kwargs): - model_output = model(*args, **kwargs) - model_output, model_var_values = th.split(model_output, model_output.shape[1]//2, dim=1) - return model_output - print(get_sigmas_karras(self.num_timesteps, sigma_min=0.0, sigma_max=4000, device=device)) - exit() - sigmas = get_sigmas_karras(self.num_timesteps, sigma_min=0.03, sigma_max=14.5, device=device) - return k_sampler(call_model, noise, sigmas, extra_args=model_kwargs, disable=not progress) - ''' - sigmas = get_sigmas_karras(self.num_timesteps, sigma_min=0.03, sigma_max=14.5, device=device) - step = 0 # LMAO - global_sigmas = None - # - def fakemodel(x, t, **model_kwargs): - print(t,global_sigmas*ratio) - return model(x, t, **model_kwargs) - def denoised(x, sigmas, **extra_args): - t = th.tensor([self.num_timesteps-step-1] * shape[0], device=device) - nonlocal global_sigmas - global_sigmas = sigmas - with th.no_grad(): - out = self.p_sample( - fakemodel, - x, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - cond_fn=cond_fn, - model_kwargs=model_kwargs, - ) - return out["sample"] - def callback(d): - nonlocal step - step += 1 - - return k_sampler(denoised, noise, sigmas, extra_args=model_kwargs, callback=callback, disable=not progress) - ''' - """ - - def sample_loop(self, *args, **kwargs): - s = self.sampler - if s == "p": - return self.p_sample_loop(*args, **kwargs) - elif s == "ddim": - return self.ddim_sample_loop(*args, **kwargs) - elif s == "dpm++2m": - if self.conditioning_free is not True: - raise RuntimeError("cond_free must be true") - with tqdm(total=self.num_timesteps) as pbar: - return self.k_diffusion_sample_loop(K_DIFFUSION_SAMPLERS[s], pbar, *args, **kwargs) - else: - raise RuntimeError("sampler not impl") - - def p_sample_loop( - self, - model, - shape, - noise=None, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - device=None, - progress=False, - ): - """ - Generate samples from the model. - - :param model: the model module. - :param shape: the shape of the samples, (N, C, H, W). - :param noise: if specified, the noise from the encoder to sample. - Should be of the same shape as `shape`. - :param clip_denoised: if True, clip x_start predictions to [-1, 1]. - :param denoised_fn: if not None, a function which applies to the - x_start prediction before it is used to sample. - :param cond_fn: if not None, this is a gradient function that acts - similarly to the model. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - :param device: if specified, the device to create the samples on. - If not specified, use a model parameter's device. - :param progress: if True, show a tqdm progress bar. - :return: a non-differentiable batch of samples. - """ - final = None - for sample in self.p_sample_loop_progressive( - model, - shape, - noise=noise, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - cond_fn=cond_fn, - model_kwargs=model_kwargs, - device=device, - progress=progress, - ): - final = sample - return final["sample"] - - def p_sample_loop_progressive( - self, - model, - shape, - noise=None, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - device=None, - progress=False, - ): - """ - Generate samples from the model and yield intermediate samples from - each timestep of diffusion. - - Arguments are the same as p_sample_loop(). - Returns a generator over dicts, where each dict is the return value of - p_sample(). - """ - if device is None: - device = next(model.parameters()).device - assert isinstance(shape, (tuple, list)) - if noise is not None: - img = noise - else: - img = th.randn(*shape, device=device) - indices = list(range(self.num_timesteps))[::-1] - - for i in tqdm(indices, disable=not progress): - t = th.tensor([i] * shape[0], device=device) - with th.no_grad(): - out = self.p_sample( - model, - img, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - cond_fn=cond_fn, - model_kwargs=model_kwargs, - ) - yield out - img = out["sample"] - - def ddim_sample( - self, - model, - x, - t, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - eta=0.0, - ): - """ - Sample x_{t-1} from the model using DDIM. - - Same usage as p_sample(). - """ - out = self.p_mean_variance( - model, - x, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - model_kwargs=model_kwargs, - ) - if cond_fn is not None: - out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) - - # Usually our model outputs epsilon, but we re-derive it - # in case we used x_start or x_prev prediction. - eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) - - alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) - alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) - sigma = eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev) - # Equation 12. - noise = th.randn_like(x) - mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps - nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) # no noise when t == 0 - sample = mean_pred + nonzero_mask * sigma * noise - return {"sample": sample, "pred_xstart": out["pred_xstart"]} - - def ddim_reverse_sample( - self, - model, - x, - t, - clip_denoised=True, - denoised_fn=None, - model_kwargs=None, - eta=0.0, - ): - """ - Sample x_{t+1} from the model using DDIM reverse ODE. - """ - assert eta == 0.0, "Reverse ODE only for deterministic path" - out = self.p_mean_variance( - model, - x, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - model_kwargs=model_kwargs, - ) - # Usually our model outputs epsilon, but we re-derive it - # in case we used x_start or x_prev prediction. - eps = ( - _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] - ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) - alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) - - # Equation 12. reversed - mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps - - return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} - - def ddim_sample_loop( - self, - model, - shape, - noise=None, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - device=None, - progress=False, - eta=0.0, - ): - """ - Generate samples from the model using DDIM. - - Same usage as p_sample_loop(). - """ - final = None - for sample in self.ddim_sample_loop_progressive( - model, - shape, - noise=noise, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - cond_fn=cond_fn, - model_kwargs=model_kwargs, - device=device, - progress=progress, - eta=eta, - ): - final = sample - return final["sample"] - - def ddim_sample_loop_progressive( - self, - model, - shape, - noise=None, - clip_denoised=True, - denoised_fn=None, - cond_fn=None, - model_kwargs=None, - device=None, - progress=False, - eta=0.0, - ): - """ - Use DDIM to sample from the model and yield intermediate samples from - each timestep of DDIM. - - Same usage as p_sample_loop_progressive(). - """ - if device is None: - device = next(model.parameters()).device - assert isinstance(shape, (tuple, list)) - if noise is not None: - img = noise - else: - img = th.randn(*shape, device=device) - indices = list(range(self.num_timesteps))[::-1] - - if progress: - # Lazy import so that we don't depend on tqdm. - from tqdm.auto import tqdm - - indices = tqdm(indices, disable=not progress) - - for i in indices: - t = th.tensor([i] * shape[0], device=device) - with th.no_grad(): - out = self.ddim_sample( - model, - img, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - cond_fn=cond_fn, - model_kwargs=model_kwargs, - eta=eta, - ) - yield out - img = out["sample"] - - def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None): - """ - Get a term for the variational lower-bound. - - The resulting units are bits (rather than nats, as one might expect). - This allows for comparison to other papers. - - :return: a dict with the following keys: - - 'output': a shape [N] tensor of NLLs or KLs. - - 'pred_xstart': the x_0 predictions. - """ - true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t) - out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs) - kl = normal_kl(true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]) - kl = mean_flat(kl) / np.log(2.0) - - decoder_nll = -discretized_gaussian_log_likelihood( - x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] - ) - assert decoder_nll.shape == x_start.shape - decoder_nll = mean_flat(decoder_nll) / np.log(2.0) - - # At the first timestep return the decoder NLL, - # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) - output = th.where((t == 0), decoder_nll, kl) - return {"output": output, "pred_xstart": out["pred_xstart"]} - - def training_losses(self, model, x_start, t, model_kwargs=None, noise=None): - """ - Compute training losses for a single timestep. - - :param model: the model to evaluate loss on. - :param x_start: the [N x C x ...] tensor of inputs. - :param t: a batch of timestep indices. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - :param noise: if specified, the specific Gaussian noise to try to remove. - :return: a dict with the key "loss" containing a tensor of shape [N]. - Some mean or variance settings may also have other keys. - """ - if model_kwargs is None: - model_kwargs = {} - if noise is None: - noise = th.randn_like(x_start) - x_t = self.q_sample(x_start, t, noise=noise) - - terms = {} - - if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: - # TODO: support multiple model outputs for this mode. - terms["loss"] = self._vb_terms_bpd( - model=model, - x_start=x_start, - x_t=x_t, - t=t, - clip_denoised=False, - model_kwargs=model_kwargs, - )["output"] - if self.loss_type == LossType.RESCALED_KL: - terms["loss"] *= self.num_timesteps - elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: - model_outputs = model(x_t, self._scale_timesteps(t), **model_kwargs) - if isinstance(model_outputs, tuple): - model_output = model_outputs[0] - terms["extra_outputs"] = model_outputs[1:] - else: - model_output = model_outputs - - if self.model_var_type in [ - ModelVarType.LEARNED, - ModelVarType.LEARNED_RANGE, - ]: - B, C = x_t.shape[:2] - assert model_output.shape == (B, C * 2, *x_t.shape[2:]) - model_output, model_var_values = th.split(model_output, C, dim=1) - # Learn the variance using the variational bound, but don't let - # it affect our mean prediction. - frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) - terms["vb"] = self._vb_terms_bpd( - model=lambda *args, r=frozen_out: r, - x_start=x_start, - x_t=x_t, - t=t, - clip_denoised=False, - )["output"] - if self.loss_type == LossType.RESCALED_MSE: - # Divide by 1000 for equivalence with initial implementation. - # Without a factor of 1/1000, the VB term hurts the MSE term. - terms["vb"] *= self.num_timesteps / 1000.0 - - if self.model_mean_type == ModelMeanType.PREVIOUS_X: - target = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0] - x_start_pred = torch.zeros(x_start) # Not supported. - elif self.model_mean_type == ModelMeanType.START_X: - target = x_start - x_start_pred = model_output - elif self.model_mean_type == ModelMeanType.EPSILON: - target = noise - x_start_pred = self._predict_xstart_from_eps(x_t, t, model_output) - else: - raise NotImplementedError(self.model_mean_type) - assert model_output.shape == target.shape == x_start.shape - terms["mse"] = mean_flat((target - model_output) ** 2) - terms["x_start_predicted"] = x_start_pred - if "vb" in terms: - terms["loss"] = terms["mse"] + terms["vb"] - else: - terms["loss"] = terms["mse"] - else: - raise NotImplementedError(self.loss_type) - - return terms - - def autoregressive_training_losses( - self, - model, - x_start, - t, - model_output_keys, - gd_out_key, - model_kwargs=None, - noise=None, - ): - """ - Compute training losses for a single timestep. - - :param model: the model to evaluate loss on. - :param x_start: the [N x C x ...] tensor of inputs. - :param t: a batch of timestep indices. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - :param noise: if specified, the specific Gaussian noise to try to remove. - :return: a dict with the key "loss" containing a tensor of shape [N]. - Some mean or variance settings may also have other keys. - """ - if model_kwargs is None: - model_kwargs = {} - if noise is None: - noise = th.randn_like(x_start) - x_t = self.q_sample(x_start, t, noise=noise) - terms = {} - if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: - assert False # not currently supported for this type of diffusion. - elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: - model_outputs = model(x_t, x_start, self._scale_timesteps(t), **model_kwargs) - terms.update({k: o for k, o in zip(model_output_keys, model_outputs)}) - model_output = terms[gd_out_key] - if self.model_var_type in [ - ModelVarType.LEARNED, - ModelVarType.LEARNED_RANGE, - ]: - B, C = x_t.shape[:2] - assert model_output.shape == (B, C, 2, *x_t.shape[2:]) - model_output, model_var_values = ( - model_output[:, :, 0], - model_output[:, :, 1], - ) - # Learn the variance using the variational bound, but don't let - # it affect our mean prediction. - frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) - terms["vb"] = self._vb_terms_bpd( - model=lambda *args, r=frozen_out: r, - x_start=x_start, - x_t=x_t, - t=t, - clip_denoised=False, - )["output"] - if self.loss_type == LossType.RESCALED_MSE: - # Divide by 1000 for equivalence with initial implementation. - # Without a factor of 1/1000, the VB term hurts the MSE term. - terms["vb"] *= self.num_timesteps / 1000.0 - - if self.model_mean_type == ModelMeanType.PREVIOUS_X: - target = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0] - x_start_pred = torch.zeros(x_start) # Not supported. - elif self.model_mean_type == ModelMeanType.START_X: - target = x_start - x_start_pred = model_output - elif self.model_mean_type == ModelMeanType.EPSILON: - target = noise - x_start_pred = self._predict_xstart_from_eps(x_t, t, model_output) - else: - raise NotImplementedError(self.model_mean_type) - assert model_output.shape == target.shape == x_start.shape - terms["mse"] = mean_flat((target - model_output) ** 2) - terms["x_start_predicted"] = x_start_pred - if "vb" in terms: - terms["loss"] = terms["mse"] + terms["vb"] - else: - terms["loss"] = terms["mse"] - else: - raise NotImplementedError(self.loss_type) - - return terms - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - - This term can't be optimized, as it only depends on the encoder. - - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): - """ - Compute the entire variational lower-bound, measured in bits-per-dim, - as well as other related quantities. - - :param model: the model to evaluate loss on. - :param x_start: the [N x C x ...] tensor of inputs. - :param clip_denoised: if True, clip denoised samples. - :param model_kwargs: if not None, a dict of extra keyword arguments to - pass to the model. This can be used for conditioning. - - :return: a dict containing the following keys: - - total_bpd: the total variational lower-bound, per batch element. - - prior_bpd: the prior term in the lower-bound. - - vb: an [N x T] tensor of terms in the lower-bound. - - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. - - mse: an [N x T] tensor of epsilon MSEs for each timestep. - """ - device = x_start.device - batch_size = x_start.shape[0] - - vb = [] - xstart_mse = [] - mse = [] - for t in list(range(self.num_timesteps))[::-1]: - t_batch = th.tensor([t] * batch_size, device=device) - noise = th.randn_like(x_start) - x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) - # Calculate VLB term at the current timestep - with th.no_grad(): - out = self._vb_terms_bpd( - model, - x_start=x_start, - x_t=x_t, - t=t_batch, - clip_denoised=clip_denoised, - model_kwargs=model_kwargs, - ) - vb.append(out["output"]) - xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) - eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) - mse.append(mean_flat((eps - noise) ** 2)) - - vb = th.stack(vb, dim=1) - xstart_mse = th.stack(xstart_mse, dim=1) - mse = th.stack(mse, dim=1) - - prior_bpd = self._prior_bpd(x_start) - total_bpd = vb.sum(dim=1) + prior_bpd - return { - "total_bpd": total_bpd, - "prior_bpd": prior_bpd, - "vb": vb, - "xstart_mse": xstart_mse, - "mse": mse, - } - - -class SpacedDiffusion(GaussianDiffusion): - """ - A diffusion process which can skip steps in a base diffusion process. - - :param use_timesteps: a collection (sequence or set) of timesteps from the - original diffusion process to retain. - :param kwargs: the kwargs to create the base diffusion process. - """ - - def __init__(self, use_timesteps, **kwargs): - self.use_timesteps = set(use_timesteps) - self.timestep_map = [] - self.original_num_steps = len(kwargs["betas"]) - - base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa - last_alpha_cumprod = 1.0 - new_betas = [] - for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): - if i in self.use_timesteps: - new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) - last_alpha_cumprod = alpha_cumprod - self.timestep_map.append(i) - kwargs["betas"] = np.array(new_betas) - super().__init__(**kwargs) - - def p_mean_variance(self, model, *args, **kwargs): # pylint: disable=signature-differs - return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) - - def training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs - return super().training_losses(self._wrap_model(model), *args, **kwargs) - - def autoregressive_training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs - return super().autoregressive_training_losses(self._wrap_model(model, True), *args, **kwargs) - - def condition_mean(self, cond_fn, *args, **kwargs): - return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) - - def condition_score(self, cond_fn, *args, **kwargs): - return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) - - def _wrap_model(self, model, autoregressive=False): - if isinstance(model, _WrappedModel) or isinstance(model, _WrappedAutoregressiveModel): - return model - mod = _WrappedAutoregressiveModel if autoregressive else _WrappedModel - return mod(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps) - - def _scale_timesteps(self, t): - # Scaling is done by the wrapped model. - return t - - -def space_timesteps(num_timesteps, section_counts): - """ - Create a list of timesteps to use from an original diffusion process, - given the number of timesteps we want to take from equally-sized portions - of the original process. - - For example, if there's 300 timesteps and the section counts are [10,15,20] - then the first 100 timesteps are strided to be 10 timesteps, the second 100 - are strided to be 15 timesteps, and the final 100 are strided to be 20. - - If the stride is a string starting with "ddim", then the fixed striding - from the DDIM paper is used, and only one section is allowed. - - :param num_timesteps: the number of diffusion steps in the original - process to divide up. - :param section_counts: either a list of numbers, or a string containing - comma-separated numbers, indicating the step count - per section. As a special case, use "ddimN" where N - is a number of steps to use the striding from the - DDIM paper. - :return: a set of diffusion steps from the original process to use. - """ - if isinstance(section_counts, str): - if section_counts.startswith("ddim"): - desired_count = int(section_counts[len("ddim") :]) - for i in range(1, num_timesteps): - if len(range(0, num_timesteps, i)) == desired_count: - return set(range(0, num_timesteps, i)) - raise ValueError(f"cannot create exactly {num_timesteps} steps with an integer stride") - section_counts = [int(x) for x in section_counts.split(",")] - size_per = num_timesteps // len(section_counts) - extra = num_timesteps % len(section_counts) - start_idx = 0 - all_steps = [] - for i, section_count in enumerate(section_counts): - size = size_per + (1 if i < extra else 0) - if size < section_count: - raise ValueError(f"cannot divide section of {size} steps into {section_count}") - if section_count <= 1: - frac_stride = 1 - else: - frac_stride = (size - 1) / (section_count - 1) - cur_idx = 0.0 - taken_steps = [] - for _ in range(section_count): - taken_steps.append(start_idx + round(cur_idx)) - cur_idx += frac_stride - all_steps += taken_steps - start_idx += size - return set(all_steps) - - -class _WrappedModel: - def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps): - self.model = model - self.timestep_map = timestep_map - self.rescale_timesteps = rescale_timesteps - self.original_num_steps = original_num_steps - - def __call__(self, x, ts, **kwargs): - map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) - new_ts = map_tensor[ts] - if self.rescale_timesteps: - new_ts = new_ts.float() * (1000.0 / self.original_num_steps) - return self.model(x, new_ts, **kwargs) - - -class _WrappedAutoregressiveModel: - def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps): - self.model = model - self.timestep_map = timestep_map - self.rescale_timesteps = rescale_timesteps - self.original_num_steps = original_num_steps - - def __call__(self, x, x0, ts, **kwargs): - map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) - new_ts = map_tensor[ts] - if self.rescale_timesteps: - new_ts = new_ts.float() * (1000.0 / self.original_num_steps) - return self.model(x, x0, new_ts, **kwargs) - - -def _extract_into_tensor(arr, timesteps, broadcast_shape): - """ - Extract values from a 1-D numpy array for a batch of indices. - - :param arr: the 1-D numpy array. - :param timesteps: a tensor of indices into the array to extract. - :param broadcast_shape: a larger shape of K dimensions with the batch - dimension equal to the length of timesteps. - :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. - """ - res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() - while len(res.shape) < len(broadcast_shape): - res = res[..., None] - return res.expand(broadcast_shape) diff --git a/TTS/tts/layers/xtts/vocoder.py b/TTS/tts/layers/xtts/vocoder.py deleted file mode 100644 index 0f4991b8..00000000 --- a/TTS/tts/layers/xtts/vocoder.py +++ /dev/null @@ -1,385 +0,0 @@ -import json -from dataclasses import dataclass -from enum import Enum -from typing import Callable, Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F - -MAX_WAV_VALUE = 32768.0 - - -class KernelPredictor(torch.nn.Module): - """Kernel predictor for the location-variable convolutions""" - - def __init__( - self, - cond_channels, - conv_in_channels, - conv_out_channels, - conv_layers, - conv_kernel_size=3, - kpnet_hidden_channels=64, - kpnet_conv_size=3, - kpnet_dropout=0.0, - kpnet_nonlinear_activation="LeakyReLU", - kpnet_nonlinear_activation_params={"negative_slope": 0.1}, - ): - """ - Args: - cond_channels (int): number of channel for the conditioning sequence, - conv_in_channels (int): number of channel for the input sequence, - conv_out_channels (int): number of channel for the output sequence, - conv_layers (int): number of layers - """ - super().__init__() - - self.conv_in_channels = conv_in_channels - self.conv_out_channels = conv_out_channels - self.conv_kernel_size = conv_kernel_size - self.conv_layers = conv_layers - - kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w - kpnet_bias_channels = conv_out_channels * conv_layers # l_b - - self.input_conv = nn.Sequential( - nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - ) - - self.residual_convs = nn.ModuleList() - padding = (kpnet_conv_size - 1) // 2 - for _ in range(3): - self.residual_convs.append( - nn.Sequential( - nn.Dropout(kpnet_dropout), - nn.utils.weight_norm( - nn.Conv1d( - kpnet_hidden_channels, - kpnet_hidden_channels, - kpnet_conv_size, - padding=padding, - bias=True, - ) - ), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - nn.utils.weight_norm( - nn.Conv1d( - kpnet_hidden_channels, - kpnet_hidden_channels, - kpnet_conv_size, - padding=padding, - bias=True, - ) - ), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - ) - ) - self.kernel_conv = nn.utils.weight_norm( - nn.Conv1d( - kpnet_hidden_channels, - kpnet_kernel_channels, - kpnet_conv_size, - padding=padding, - bias=True, - ) - ) - self.bias_conv = nn.utils.weight_norm( - nn.Conv1d( - kpnet_hidden_channels, - kpnet_bias_channels, - kpnet_conv_size, - padding=padding, - bias=True, - ) - ) - - def forward(self, c): - """ - Args: - c (Tensor): the conditioning sequence (batch, cond_channels, cond_length) - """ - batch, _, cond_length = c.shape - c = self.input_conv(c) - for residual_conv in self.residual_convs: - residual_conv.to(c.device) - c = c + residual_conv(c) - k = self.kernel_conv(c) - b = self.bias_conv(c) - kernels = k.contiguous().view( - batch, - self.conv_layers, - self.conv_in_channels, - self.conv_out_channels, - self.conv_kernel_size, - cond_length, - ) - bias = b.contiguous().view( - batch, - self.conv_layers, - self.conv_out_channels, - cond_length, - ) - - return kernels, bias - - def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.input_conv[0]) - nn.utils.remove_weight_norm(self.kernel_conv) - nn.utils.remove_weight_norm(self.bias_conv) - for block in self.residual_convs: - nn.utils.remove_weight_norm(block[1]) - nn.utils.remove_weight_norm(block[3]) - - -class LVCBlock(torch.nn.Module): - """the location-variable convolutions""" - - def __init__( - self, - in_channels, - cond_channels, - stride, - dilations=[1, 3, 9, 27], - lReLU_slope=0.2, - conv_kernel_size=3, - cond_hop_length=256, - kpnet_hidden_channels=64, - kpnet_conv_size=3, - kpnet_dropout=0.0, - ): - super().__init__() - - self.cond_hop_length = cond_hop_length - self.conv_layers = len(dilations) - self.conv_kernel_size = conv_kernel_size - - self.kernel_predictor = KernelPredictor( - cond_channels=cond_channels, - conv_in_channels=in_channels, - conv_out_channels=2 * in_channels, - conv_layers=len(dilations), - conv_kernel_size=conv_kernel_size, - kpnet_hidden_channels=kpnet_hidden_channels, - kpnet_conv_size=kpnet_conv_size, - kpnet_dropout=kpnet_dropout, - kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope}, - ) - - self.convt_pre = nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( - nn.ConvTranspose1d( - in_channels, - in_channels, - 2 * stride, - stride=stride, - padding=stride // 2 + stride % 2, - output_padding=stride % 2, - ) - ), - ) - - self.conv_blocks = nn.ModuleList() - for dilation in dilations: - self.conv_blocks.append( - nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( - nn.Conv1d( - in_channels, - in_channels, - conv_kernel_size, - padding=dilation * (conv_kernel_size - 1) // 2, - dilation=dilation, - ) - ), - nn.LeakyReLU(lReLU_slope), - ) - ) - - def forward(self, x, c): - """forward propagation of the location-variable convolutions. - Args: - x (Tensor): the input sequence (batch, in_channels, in_length) - c (Tensor): the conditioning sequence (batch, cond_channels, cond_length) - - Returns: - Tensor: the output sequence (batch, in_channels, in_length) - """ - _, in_channels, _ = x.shape # (B, c_g, L') - - x = self.convt_pre(x) # (B, c_g, stride * L') - kernels, bias = self.kernel_predictor(c) - - for i, conv in enumerate(self.conv_blocks): - output = conv(x) # (B, c_g, stride * L') - - k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length) - b = bias[:, i, :, :] # (B, 2 * c_g, cond_length) - - output = self.location_variable_convolution( - output, k, b, hop_size=self.cond_hop_length - ) # (B, 2 * c_g, stride * L'): LVC - x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh( - output[:, in_channels:, :] - ) # (B, c_g, stride * L'): GAU - - return x - - def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256): - """perform location-variable convolution operation on the input sequence (x) using the local convolution kernl. - Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. - Args: - x (Tensor): the input sequence (batch, in_channels, in_length). - kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length) - bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length) - dilation (int): the dilation of convolution. - hop_size (int): the hop_size of the conditioning sequence. - Returns: - (Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length). - """ - batch, _, in_length = x.shape - batch, _, out_channels, kernel_size, kernel_length = kernel.shape - assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched" - - padding = dilation * int((kernel_size - 1) / 2) - x = F.pad(x, (padding, padding), "constant", 0) # (batch, in_channels, in_length + 2*padding) - x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding) - - if hop_size < dilation: - x = F.pad(x, (0, dilation), "constant", 0) - x = x.unfold( - 3, dilation, dilation - ) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation) - x = x[:, :, :, :, :hop_size] - x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation) - x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size) - - o = torch.einsum("bildsk,biokl->bolsd", x, kernel) - o = o.to(memory_format=torch.channels_last_3d) - bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) - o = o + bias - o = o.contiguous().view(batch, out_channels, -1) - - return o - - def remove_weight_norm(self): - self.kernel_predictor.remove_weight_norm() - nn.utils.remove_weight_norm(self.convt_pre[1]) - for block in self.conv_blocks: - nn.utils.remove_weight_norm(block[1]) - - -class UnivNetGenerator(nn.Module): - """ - UnivNet Generator - - Originally from https://github.com/mindslab-ai/univnet/blob/master/model/generator.py. - """ - - def __init__( - self, - noise_dim=64, - channel_size=32, - dilations=[1, 3, 9, 27], - strides=[8, 8, 4], - lReLU_slope=0.2, - kpnet_conv_size=3, - # Below are MEL configurations options that this generator requires. - hop_length=256, - n_mel_channels=100, - ): - super(UnivNetGenerator, self).__init__() - self.mel_channel = n_mel_channels - self.noise_dim = noise_dim - self.hop_length = hop_length - channel_size = channel_size - kpnet_conv_size = kpnet_conv_size - - self.res_stack = nn.ModuleList() - hop_length = 1 - for stride in strides: - hop_length = stride * hop_length - self.res_stack.append( - LVCBlock( - channel_size, - n_mel_channels, - stride=stride, - dilations=dilations, - lReLU_slope=lReLU_slope, - cond_hop_length=hop_length, - kpnet_conv_size=kpnet_conv_size, - ) - ) - - self.conv_pre = nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode="reflect")) - - self.conv_post = nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode="reflect")), - nn.Tanh(), - ) - - def forward(self, c, z): - """ - Args: - c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length) - z (Tensor): the noise sequence (batch, noise_dim, in_length) - - """ - z = self.conv_pre(z) # (B, c_g, L) - - for res_block in self.res_stack: - res_block.to(z.device) - z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i) - - z = self.conv_post(z) # (B, 1, L * 256) - - return z - - def eval(self, inference=False): - super(UnivNetGenerator, self).eval() - # don't remove weight norm while validation in training loop - if inference: - self.remove_weight_norm() - - def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.conv_pre) - - for layer in self.conv_post: - if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) - - for res_block in self.res_stack: - res_block.remove_weight_norm() - - def inference(self, c, z=None): - # pad input mel with zeros to cut artifact - # see https://github.com/seungwonpark/melgan/issues/8 - zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device) - mel = torch.cat((c, zero), dim=2) - - if z is None: - z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device) - - audio = self.forward(mel, z) - audio = audio[:, :, : -(self.hop_length * 10)] - audio = audio.clamp(min=-1, max=1) - return audio - - -if __name__ == "__main__": - model = UnivNetGenerator() - - c = torch.randn(3, 100, 10) - z = torch.randn(3, 64, 10) - print(c.shape) - - y = model(c, z) - print(y.shape) - assert y.shape == torch.Size([3, 1, 2560]) - - pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(pytorch_total_params) diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index 4aaf5261..f38dace2 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -252,7 +252,12 @@ class BaseTacotron(BaseTTS): def compute_capacitron_VAE_embedding(self, inputs, reference_mel_info, text_info=None, speaker_embedding=None): """Capacitron Variational Autoencoder""" - (VAE_outputs, posterior_distribution, prior_distribution, capacitron_beta,) = self.capacitron_vae_layer( + ( + VAE_outputs, + posterior_distribution, + prior_distribution, + capacitron_beta, + ) = self.capacitron_vae_layer( reference_mel_info, text_info, speaker_embedding, # pylint: disable=not-callable diff --git a/TTS/tts/models/tortoise.py b/TTS/tts/models/tortoise.py index c8cfcfdd..16644ff9 100644 --- a/TTS/tts/models/tortoise.py +++ b/TTS/tts/models/tortoise.py @@ -676,7 +676,12 @@ class Tortoise(BaseTTS): ), "Too much text provided. Break the text up into separate segments and re-try inference." if voice_samples is not None: - (auto_conditioning, diffusion_conditioning, _, _,) = self.get_conditioning_latents( + ( + auto_conditioning, + diffusion_conditioning, + _, + _, + ) = self.get_conditioning_latents( voice_samples, return_mels=True, latent_averaging_mode=latent_averaging_mode, diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 477f31bf..af94675b 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -9,13 +9,10 @@ import torchaudio from coqpit import Coqpit from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, wav_to_univnet_mel -from TTS.tts.layers.tortoise.diffusion_decoder import DiffusionTts -from TTS.tts.layers.xtts.diffusion import SpacedDiffusion, get_named_beta_schedule, space_timesteps from TTS.tts.layers.xtts.gpt import GPT from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder from TTS.tts.layers.xtts.stream_generator import init_stream_support from TTS.tts.layers.xtts.tokenizer import VoiceBpeTokenizer -from TTS.tts.layers.xtts.vocoder import UnivNetGenerator from TTS.tts.models.base_tts import BaseTTS from TTS.utils.io import load_fsspec @@ -168,12 +165,10 @@ class XttsAudioConfig(Coqpit): Args: sample_rate (int): The sample rate in which the GPT operates. - diffusion_sample_rate (int): The sample rate of the diffusion audio waveform. output_sample_rate (int): The sample rate of the output audio waveform. """ sample_rate: int = 22050 - diffusion_sample_rate: int = 24000 output_sample_rate: int = 24000 @@ -189,7 +184,6 @@ class XttsArgs(Coqpit): clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. decoder_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. - use_hifigan (bool, optional): Whether to use hifigan with implicit enhancement or diffusion + univnet as a decoder. Defaults to True. For GPT model: gpt_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. @@ -227,7 +221,6 @@ class XttsArgs(Coqpit): clvp_checkpoint: str = None decoder_checkpoint: str = None num_chars: int = 255 - use_hifigan: bool = True # XTTS GPT Encoder params tokenizer_file: str = "" @@ -324,32 +317,15 @@ class Xtts(BaseTTS): code_stride_len=self.args.gpt_code_stride_len, ) - if self.args.use_hifigan: - self.hifigan_decoder = HifiDecoder( - input_sample_rate=self.args.input_sample_rate, - output_sample_rate=self.args.output_sample_rate, - output_hop_length=self.args.output_hop_length, - ar_mel_length_compression=self.args.gpt_code_stride_len, - decoder_input_dim=self.args.decoder_input_dim, - d_vector_dim=self.args.d_vector_dim, - cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, - ) - - if not self.args.use_hifigan: - self.diffusion_decoder = DiffusionTts( - model_channels=self.args.diff_model_channels, - num_layers=self.args.diff_num_layers, - in_channels=self.args.diff_in_channels, - out_channels=self.args.diff_out_channels, - in_latent_channels=self.args.diff_in_latent_channels, - in_tokens=self.args.diff_in_tokens, - dropout=self.args.diff_dropout, - use_fp16=self.args.diff_use_fp16, - num_heads=self.args.diff_num_heads, - layer_drop=self.args.diff_layer_drop, - unconditioned_percentage=self.args.diff_unconditioned_percentage, - ) - self.vocoder = UnivNetGenerator() + self.hifigan_decoder = HifiDecoder( + input_sample_rate=self.args.input_sample_rate, + output_sample_rate=self.args.output_sample_rate, + output_hop_length=self.args.output_hop_length, + ar_mel_length_compression=self.args.gpt_code_stride_len, + decoder_input_dim=self.args.decoder_input_dim, + d_vector_dim=self.args.d_vector_dim, + cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, + ) @property def device(self): @@ -430,7 +406,6 @@ class Xtts(BaseTTS): sound_norm_refs=False, ): speaker_embedding = None - diffusion_cond_latents = None audio, sr = torchaudio.load(audio_path) audio = audio[:, : sr * max_ref_length].to(self.device) @@ -441,12 +416,9 @@ class Xtts(BaseTTS): if librosa_trim_db is not None: audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] - if self.args.use_hifigan or self.args.use_hifigan: - speaker_embedding = self.get_speaker_embedding(audio, sr) - else: - diffusion_cond_latents = self.get_diffusion_cond_latents(audio, sr) + speaker_embedding = self.get_speaker_embedding(audio, sr) gpt_cond_latents = self.get_gpt_cond_latents(audio, sr, length=gpt_cond_len) # [1, 1024, T] - return gpt_cond_latents, diffusion_cond_latents, speaker_embedding + return gpt_cond_latents, speaker_embedding def synthesize(self, text, config, speaker_wav, language, **kwargs): """Synthesize speech with the given input text. @@ -579,7 +551,7 @@ class Xtts(BaseTTS): Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length. Sample rate is 24kHz. """ - (gpt_cond_latent, diffusion_conditioning, speaker_embedding) = self.get_conditioning_latents( + (gpt_cond_latent, speaker_embedding) = self.get_conditioning_latents( audio_path=ref_audio_path, gpt_cond_len=gpt_cond_len, max_ref_length=max_ref_len, @@ -591,7 +563,6 @@ class Xtts(BaseTTS): language, gpt_cond_latent, speaker_embedding, - diffusion_conditioning, temperature=temperature, length_penalty=length_penalty, repetition_penalty=repetition_penalty, @@ -614,7 +585,6 @@ class Xtts(BaseTTS): language, gpt_cond_latent, speaker_embedding, - diffusion_conditioning, # GPT inference temperature=0.65, length_penalty=1, @@ -643,14 +613,6 @@ class Xtts(BaseTTS): text_tokens.shape[-1] < self.args.gpt_max_text_tokens ), " ❗ XTTS can only generate text with a maximum of 400 tokens." - if not self.args.use_hifigan: - diffuser = load_discrete_vocoder_diffuser( - desired_diffusion_steps=decoder_iterations, - cond_free=cond_free, - cond_free_k=cond_free_k, - sampler=decoder_sampler, - ) - with torch.no_grad(): gpt_codes = self.gpt.generate( cond_latents=gpt_cond_latent, @@ -692,29 +654,12 @@ class Xtts(BaseTTS): gpt_latents = gpt_latents[:, :k] break - if decoder == "hifigan": - assert ( - hasattr(self, "hifigan_decoder") and self.hifigan_decoder is not None - ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" - wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) - else: - assert hasattr( - self, "diffusion_decoder" - ), "You must disable hifigan decoders to use difffusion by setting `use_hifigan: false`" - mel = do_spectrogram_diffusion( - self.diffusion_decoder, - diffuser, - gpt_latents, - diffusion_conditioning, - temperature=diffusion_temperature, - ) - wav = self.vocoder.inference(mel) + wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) return { "wav": wav.cpu().numpy().squeeze(), "gpt_latents": gpt_latents, "speaker_embedding": speaker_embedding, - "diffusion_conditioning": diffusion_conditioning, } def handle_chunks(self, wav_gen, wav_gen_prev, wav_overlap, overlap_len): @@ -752,9 +697,6 @@ class Xtts(BaseTTS): decoder="hifigan", **hf_generate_kwargs, ): - assert hasattr( - self, "hifigan_decoder" - ), "`inference_stream` requires use_hifigan to be set to true in the config.model_args, diffusion is too slow to stream." text = text.strip().lower() text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) @@ -793,13 +735,7 @@ class Xtts(BaseTTS): if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): gpt_latents = torch.cat(all_latents, dim=0)[None, :] - if decoder == "hifigan": - assert hasattr( - self, "hifigan_decoder" - ), "You must enable hifigan decoder to use it by setting config `use_hifigan: true`" - wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) - else: - raise NotImplementedError("Diffusion for streaming inference not implemented.") + wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len ) @@ -827,10 +763,8 @@ class Xtts(BaseTTS): def get_compatible_checkpoint_state_dict(self, model_path): checkpoint = load_fsspec(model_path, map_location=torch.device("cpu"))["model"] - ignore_keys = ["diffusion_decoder", "vocoder"] if self.args.use_hifigan else [] - ignore_keys += [] if self.args.use_hifigan else ["hifigan_decoder"] # remove xtts gpt trainer extra keys - ignore_keys += ["torch_mel_spectrogram_style_encoder", "torch_mel_spectrogram_dvae", "dvae"] + ignore_keys = ["torch_mel_spectrogram_style_encoder", "torch_mel_spectrogram_dvae", "dvae"] for key in list(checkpoint.keys()): # check if it is from the coqui Trainer if so convert it if key.startswith("xtts."): @@ -889,12 +823,7 @@ class Xtts(BaseTTS): self.load_state_dict(checkpoint, strict=strict) if eval: - if hasattr(self, "hifigan_decoder"): - self.hifigan_decoder.eval() - if hasattr(self, "diffusion_decoder"): - self.diffusion_decoder.eval() - if hasattr(self, "vocoder"): - self.vocoder.eval() + self.hifigan_decoder.eval() self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache, use_deepspeed=use_deepspeed) self.gpt.eval() diff --git a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py index 9134be0d..268a0335 100644 --- a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py @@ -94,12 +94,9 @@ def main(): gpt_num_audio_tokens=8194, gpt_start_audio_token=8192, gpt_stop_audio_token=8193, - use_ne_hifigan=True, # if it is true it will keep the non-enhanced keys on the output checkpoint ) # define audio config - audio_config = XttsAudioConfig( - sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 - ) + audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) # training parameters config config = GPTTrainerConfig( output_path=OUT_PATH, diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py index ee6b22be..d94204ca 100644 --- a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -93,14 +93,11 @@ def main(): gpt_num_audio_tokens=8194, gpt_start_audio_token=8192, gpt_stop_audio_token=8193, - use_ne_hifigan=True, # if it is true it will keep the non-enhanced keys on the output checkpoint gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, ) # define audio config - audio_config = XttsAudioConfig( - sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 - ) + audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) # training parameters config config = GPTTrainerConfig( output_path=OUT_PATH, diff --git a/tests/xtts_tests/test_xtts_gpt_train.py b/tests/xtts_tests/test_xtts_gpt_train.py index 47b1dd7d..03514daa 100644 --- a/tests/xtts_tests/test_xtts_gpt_train.py +++ b/tests/xtts_tests/test_xtts_gpt_train.py @@ -86,11 +86,8 @@ model_args = GPTArgs( gpt_num_audio_tokens=8194, gpt_start_audio_token=8192, gpt_stop_audio_token=8193, - use_ne_hifigan=True, -) -audio_config = XttsAudioConfig( - sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 ) +audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) config = GPTTrainerConfig( epochs=1, output_path=OUT_PATH, diff --git a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py index 6b6f1330..80995038 100644 --- a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py +++ b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py @@ -86,11 +86,8 @@ model_args = GPTArgs( gpt_stop_audio_token=8193, gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, - use_ne_hifigan=True, -) -audio_config = XttsAudioConfig( - sample_rate=22050, dvae_sample_rate=22050, diffusion_sample_rate=24000, output_sample_rate=24000 ) +audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) config = GPTTrainerConfig( epochs=1, output_path=OUT_PATH, From 9942000c50f232c602a03068e1428b9706e5c2da Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 12:12:40 -0300 Subject: [PATCH 10/67] Update XTTS v2 recipe model files --- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py index d94204ca..47a52d57 100644 --- a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -53,15 +53,14 @@ if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE): print(" > Downloading DVAE files!") ModelManager._download_model_files([MEL_NORM_LINK, DVAE_CHECKPOINT_LINK], CHECKPOINTS_OUT_PATH, progress_bar=True) -# ToDo: Update links for XTTS v2.0 # Download XTTS v2.0 checkpoint if needed -TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v2.0/vocab.json" -XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v2.0/model.pth" +TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json" +XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth" # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. -TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file -XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, XTTS_CHECKPOINT_LINK.split("/")[-1]) # model.pth file +TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(TOKENIZER_FILE_LINK)) # vocab.json file +XTTS_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(XTTS_CHECKPOINT_LINK)) # model.pth file # download XTTS v2.0 files if needed if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT): From 459ad70dc89802e26296ceb7c2b3cd62dc0348e4 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 13:45:53 -0300 Subject: [PATCH 11/67] Add support for multiples speaker references on XTTS inference --- TTS/tts/models/xtts.py | 43 +++++++++++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index af94675b..800ff612 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -405,19 +405,37 @@ class Xtts(BaseTTS): librosa_trim_db=None, sound_norm_refs=False, ): + # deal with multiples references + if not isinstance(audio_path, list): + audio_paths = list(audio_path) + else: + audio_paths = audio_path + + speaker_embeddings = [] + audios = [] speaker_embedding = None + for file_path in audio_paths: + audio, sr = torchaudio.load(file_path) + audio = audio[:, : sr * max_ref_length].to(self.device) + if audio.shape[0] > 1: + audio = audio.mean(0, keepdim=True) + if sound_norm_refs: + audio = (audio / torch.abs(audio).max()) * 0.75 + if librosa_trim_db is not None: + audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] - audio, sr = torchaudio.load(audio_path) - audio = audio[:, : sr * max_ref_length].to(self.device) - if audio.shape[0] > 1: - audio = audio.mean(0, keepdim=True) - if sound_norm_refs: - audio = (audio / torch.abs(audio).max()) * 0.75 - if librosa_trim_db is not None: - audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] + speaker_embedding = self.get_speaker_embedding(audio, sr) + speaker_embeddings.append(speaker_embedding) + audios.append(audio) + + # use a merge of all references for gpt cond latents + full_audio = torch.cat(audios, dim=-1) + gpt_cond_latents = self.get_gpt_cond_latents(full_audio, sr, length=gpt_cond_len) # [1, 1024, T] + + if speaker_embeddings: + speaker_embedding = torch.stack(speaker_embeddings) + speaker_embedding = speaker_embedding.mean(dim=0) - speaker_embedding = self.get_speaker_embedding(audio, sr) - gpt_cond_latents = self.get_gpt_cond_latents(audio, sr, length=gpt_cond_len) # [1, 1024, T] return gpt_cond_latents, speaker_embedding def synthesize(self, text, config, speaker_wav, language, **kwargs): @@ -436,11 +454,6 @@ class Xtts(BaseTTS): as latents used at inference. """ - - # Make the synthesizer happy 🥳 - if isinstance(speaker_wav, list): - speaker_wav = speaker_wav[0] - return self.inference_with_config(text, config, ref_audio_path=speaker_wav, language=language, **kwargs) def inference_with_config(self, text, config, ref_audio_path, language, **kwargs): From 00294ffdf6fd0ab3a0fd4e13e63923768de9b385 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 14:21:38 -0300 Subject: [PATCH 12/67] Update XTTS docs --- TTS/tts/models/xtts.py | 4 ++-- docs/source/models/xtts.md | 41 ++++++++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 800ff612..ce968053 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -336,7 +336,7 @@ class Xtts(BaseTTS): """Compute the conditioning latents for the GPT model from the given audio. Args: - audio_path (str): Path to the audio file. + audio (tensor): audio tensor. sr (int): Sample rate of the audio. length (int): Length of the audio in seconds. Defaults to 3. """ @@ -444,7 +444,7 @@ class Xtts(BaseTTS): Args: text (str): Input text. config (XttsConfig): Config with inference parameters. - speaker_wav (str): Path to the speaker audio file for cloning. + speaker_wav (list): List of paths to the speaker audio files to be used for cloning. language (str): Language ID of the speaker. **kwargs: Inference settings. See `inference()`. diff --git a/docs/source/models/xtts.md b/docs/source/models/xtts.md index 1d034aea..8167a1d1 100644 --- a/docs/source/models/xtts.md +++ b/docs/source/models/xtts.md @@ -39,6 +39,7 @@ You can also mail us at info@coqui.ai. ### Inference #### 🐸TTS API +##### Single reference ```python from TTS.api import TTS tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) @@ -46,12 +47,25 @@ tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) # generate speech by cloning a voice using default settings tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", file_path="output.wav", - speaker_wav="/path/to/target/speaker.wav", + speaker_wav=["/path/to/target/speaker.wav"], + language="en") +``` + +##### Multiple references +```python +from TTS.api import TTS +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2", gpu=True) + +# generate speech by cloning a voice using default settings +tts.tts_to_file(text="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.", + file_path="output.wav", + speaker_wav=["/path/to/target/speaker.wav", "/path/to/target/speaker_2.wav", "/path/to/target/speaker_3.wav"], language="en") ``` #### 🐸TTS Command line +##### Single reference ```console tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 \ --text "Bugün okula gitmek istemiyorum." \ @@ -60,6 +74,25 @@ tts.tts_to_file(text="It took me quite a long time to develop a voice, and now t --use_cuda true ``` +##### Multiple references +```console + tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 \ + --text "Bugün okula gitmek istemiyorum." \ + --speaker_wav /path/to/target/speaker.wav /path/to/target/speaker_2.wav /path/to/target/speaker_3.wav \ + --language_idx tr \ + --use_cuda true +``` +or for all wav files in a directory you can use: + +```console + tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 \ + --text "Bugün okula gitmek istemiyorum." \ + --speaker_wav /path/to/target/*.wav \ + --language_idx tr \ + --use_cuda true +``` + + #### model directly If you want to be able to run with `use_deepspeed=True` and enjoy the speedup, you need to install deepspeed first. @@ -83,7 +116,7 @@ model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", use_deepspeed=Tru model.cuda() print("Computing speaker latents...") -gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path="reference.wav") +gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) print("Inference...") out = model.inference( @@ -120,7 +153,7 @@ model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", use_deepspeed=Tru model.cuda() print("Computing speaker latents...") -gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path="reference.wav") +gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) print("Inference...") t0 = time.time() @@ -177,7 +210,7 @@ model.load_checkpoint(config, checkpoint_path=XTTS_CHECKPOINT, vocab_path=TOKENI model.cuda() print("Computing speaker latents...") -gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=SPEAKER_REFERENCE) +gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=[SPEAKER_REFERENCE]) print("Inference...") out = model.inference( From 72b2bac0f8f96a256c6f0c073a73db7c07588526 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 14:46:57 -0300 Subject: [PATCH 13/67] Load reference in 24khz to avoid issued with multiple sr references --- TTS/tts/layers/xtts/trainer/dataset.py | 30 +-------------------- TTS/tts/models/xtts.py | 36 +++++++++++++++++++++++--- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/TTS/tts/layers/xtts/trainer/dataset.py b/TTS/tts/layers/xtts/trainer/dataset.py index 5d8b2ae6..8cb90ad0 100644 --- a/TTS/tts/layers/xtts/trainer/dataset.py +++ b/TTS/tts/layers/xtts/trainer/dataset.py @@ -2,13 +2,10 @@ import os import random import sys -import numpy as np import torch import torch.nn.functional as F import torch.utils.data -import torchaudio -from torchaudio.backend.soundfile_backend import load as torchaudio_soundfile_load -from torchaudio.backend.sox_io_backend import load as torchaudio_sox_load +from TTS.tts.models.xtts import load_audio torch.set_num_threads(1) @@ -50,31 +47,6 @@ def get_prompt_slice(gt_path, max_sample_length, min_sample_length, sample_rate, return rel_clip, rel_clip.shape[-1], cond_idxs -def load_audio(audiopath, sampling_rate): - # better load setting following: https://github.com/faroit/python_audio_loading_benchmark - if audiopath[-4:] == ".mp3": - # it uses torchaudio with sox backend to load mp3 - audio, lsr = torchaudio_sox_load(audiopath) - else: - # it uses torchaudio soundfile backend to load all the others data type - audio, lsr = torchaudio_soundfile_load(audiopath) - - # stereo to mono if needed - if audio.size(0) != 1: - audio = torch.mean(audio, dim=0, keepdim=True) - - if lsr != sampling_rate: - audio = torchaudio.functional.resample(audio, lsr, sampling_rate) - - # Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk. - # '10' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds. - if torch.any(audio > 10) or not torch.any(audio < 0): - print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}") - # clip audio invalid values - audio.clip_(-1, 1) - return audio - - class XTTSDataset(torch.utils.data.Dataset): def __init__(self, config, samples, tokenizer, sample_rate, is_eval=False): self.config = config diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index ce968053..fdaeb7de 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -67,6 +67,31 @@ def wav_to_mel_cloning( return mel +def load_audio(audiopath, sampling_rate): + # better load setting following: https://github.com/faroit/python_audio_loading_benchmark + if audiopath[-4:] == ".mp3": + # it uses torchaudio with sox backend to load mp3 + audio, lsr = torchaudio.backend.sox_io_backend.load(audiopath) + else: + # it uses torchaudio soundfile backend to load all the others data type + audio, lsr = torchaudio.backend.soundfile_backend.load(audiopath) + + # stereo to mono if needed + if audio.size(0) != 1: + audio = torch.mean(audio, dim=0, keepdim=True) + + if lsr != sampling_rate: + audio = torchaudio.functional.resample(audio, lsr, sampling_rate) + + # Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk. + # '10' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds. + if torch.any(audio > 10) or not torch.any(audio < 0): + print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}") + # clip audio invalid values + audio.clip_(-1, 1) + return audio + + def pad_or_truncate(t, length): """ Ensure a given tensor t has a specified sequence length by either padding it with zeros or clipping it. @@ -404,6 +429,7 @@ class Xtts(BaseTTS): max_ref_length=10, librosa_trim_db=None, sound_norm_refs=False, + load_sr=24000, ): # deal with multiples references if not isinstance(audio_path, list): @@ -415,8 +441,9 @@ class Xtts(BaseTTS): audios = [] speaker_embedding = None for file_path in audio_paths: - audio, sr = torchaudio.load(file_path) - audio = audio[:, : sr * max_ref_length].to(self.device) + # load the audio in 24khz to avoid issued with multiple sr references + audio = load_audio(file_path, load_sr) + audio = audio[:, : load_sr * max_ref_length].to(self.device) if audio.shape[0] > 1: audio = audio.mean(0, keepdim=True) if sound_norm_refs: @@ -424,13 +451,14 @@ class Xtts(BaseTTS): if librosa_trim_db is not None: audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] - speaker_embedding = self.get_speaker_embedding(audio, sr) + speaker_embedding = self.get_speaker_embedding(audio, load_sr) speaker_embeddings.append(speaker_embedding) + audios.append(audio) # use a merge of all references for gpt cond latents full_audio = torch.cat(audios, dim=-1) - gpt_cond_latents = self.get_gpt_cond_latents(full_audio, sr, length=gpt_cond_len) # [1, 1024, T] + gpt_cond_latents = self.get_gpt_cond_latents(full_audio, load_sr, length=gpt_cond_len) # [1, 1024, T] if speaker_embeddings: speaker_embedding = torch.stack(speaker_embeddings) From 1b6f8d0e46ca6a39a97dfd63af548e540a5377ca Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 14:52:09 -0300 Subject: [PATCH 14/67] Update unit tests and recipes --- TTS/tts/models/xtts.py | 2 +- recipes/ljspeech/xtts_v1/train_gpt_xtts.py | 4 ++-- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 4 ++-- tests/xtts_tests/test_xtts_gpt_train.py | 2 +- tests/xtts_tests/test_xtts_v2-0_gpt_train.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index fdaeb7de..e70799ec 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -433,7 +433,7 @@ class Xtts(BaseTTS): ): # deal with multiples references if not isinstance(audio_path, list): - audio_paths = list(audio_path) + audio_paths = [audio_path] else: audio_paths = audio_path diff --git a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py index 268a0335..a7ae40e2 100644 --- a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py @@ -71,9 +71,9 @@ if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT): # Training sentences generations -SPEAKER_REFERENCE = ( +SPEAKER_REFERENCE = [ "./tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences -) +] LANGUAGE = config_dataset.language diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py index 47a52d57..989b1936 100644 --- a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -71,9 +71,9 @@ if not os.path.isfile(TOKENIZER_FILE) or not os.path.isfile(XTTS_CHECKPOINT): # Training sentences generations -SPEAKER_REFERENCE = ( +SPEAKER_REFERENCE = [ "./tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences -) +] LANGUAGE = config_dataset.language diff --git a/tests/xtts_tests/test_xtts_gpt_train.py b/tests/xtts_tests/test_xtts_gpt_train.py index 03514daa..12c547d6 100644 --- a/tests/xtts_tests/test_xtts_gpt_train.py +++ b/tests/xtts_tests/test_xtts_gpt_train.py @@ -60,7 +60,7 @@ XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_s # Training sentences generations -SPEAKER_REFERENCE = "tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences +SPEAKER_REFERENCE = ["tests/data/ljspeech/wavs/LJ001-0002.wav"] # speaker reference to be used in training test sentences LANGUAGE = config_dataset.language diff --git a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py index 80995038..3d6ef60e 100644 --- a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py +++ b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py @@ -58,7 +58,7 @@ XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_s # Training sentences generations -SPEAKER_REFERENCE = "tests/data/ljspeech/wavs/LJ001-0002.wav" # speaker reference to be used in training test sentences +SPEAKER_REFERENCE = ["tests/data/ljspeech/wavs/LJ001-0002.wav"] # speaker reference to be used in training test sentences LANGUAGE = config_dataset.language From f444f296f2ccc6d9730d51c74f1ca6ffe4822517 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 15:09:33 -0300 Subject: [PATCH 15/67] Add multiples references on xtts inference tests --- tests/zoo_tests/test_models.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index 2f9399ad..79aef5cb 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -101,7 +101,9 @@ def test_xtts_streaming(): from TTS.tts.configs.xtts_config import XttsConfig from TTS.tts.models.xtts import Xtts - speaker_wav = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav") + speaker_wav = [os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav")] + speaker_wav_2 = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0002.wav") + speaker_wav.append(speaker_wav_2) model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v1") config = XttsConfig() config.load_json(os.path.join(model_path, "config.json")) @@ -131,20 +133,21 @@ def test_xtts_v2(): """XTTS is too big to run on github actions. We need to test it locally""" output_path = os.path.join(get_tests_output_path(), "output.wav") speaker_wav = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav") + speaker_wav_2 = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0002.wav") use_gpu = torch.cuda.is_available() if use_gpu: run_cli( "yes | " f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 " f'--text "This is an example." --out_path "{output_path}" --progress_bar False --use_cuda True ' - f'--speaker_wav "{speaker_wav}" --language_idx "en"' + f'--speaker_wav "{speaker_wav}" "{speaker_wav_2}" "--language_idx "en"' ) else: run_cli( "yes | " f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 " f'--text "This is an example." --out_path "{output_path}" --progress_bar False ' - f'--speaker_wav "{speaker_wav}" --language_idx "en"' + f'--speaker_wav "{speaker_wav}" "{speaker_wav_2}" --language_idx "en"' ) @@ -153,7 +156,7 @@ def test_xtts_v2_streaming(): from TTS.tts.configs.xtts_config import XttsConfig from TTS.tts.models.xtts import Xtts - speaker_wav = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav") + speaker_wav = [os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav")] model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v2") config = XttsConfig() config.load_json(os.path.join(model_path, "config.json")) From b146de4ce8b6252eb0869f25dc9717b7cbc85765 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 15:22:18 -0300 Subject: [PATCH 16/67] Bug fix on XTTS v2.0 Trainer --- TTS/tts/layers/xtts/trainer/gpt_trainer.py | 3 +-- tests/xtts_tests/test_xtts_v2-0_gpt_train.py | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/TTS/tts/layers/xtts/trainer/gpt_trainer.py b/TTS/tts/layers/xtts/trainer/gpt_trainer.py index ef32a4ab..80e06340 100644 --- a/TTS/tts/layers/xtts/trainer/gpt_trainer.py +++ b/TTS/tts/layers/xtts/trainer/gpt_trainer.py @@ -237,8 +237,7 @@ class GPTTrainer(BaseTTS): self.config, s_info["speaker_wav"], s_info["language"], - gpt_cond_len=3, - decoder="ne_hifigan", + gpt_cond_len=3 )["wav"] test_audios["{}-audio".format(idx)] = wav diff --git a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py index 3d6ef60e..b19b7210 100644 --- a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py +++ b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py @@ -87,7 +87,9 @@ model_args = GPTArgs( gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, ) + audio_config = XttsAudioConfig(sample_rate=22050, dvae_sample_rate=22050, output_sample_rate=24000) + config = GPTTrainerConfig( epochs=1, output_path=OUT_PATH, From 09fb317e6d5961d6d5934e02b475e84277b0ea66 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 17:36:32 -0300 Subject: [PATCH 17/67] Remove unused code --- TTS/tts/models/xtts.py | 108 ----------------------------------------- 1 file changed, 108 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index e70799ec..4ab00270 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -111,78 +111,6 @@ def pad_or_truncate(t, length): return tp -def load_discrete_vocoder_diffuser( - trained_diffusion_steps=4000, - desired_diffusion_steps=200, - cond_free=True, - cond_free_k=1, - sampler="ddim", -): - """ - Load a GaussianDiffusion instance configured for use as a decoder. - - Args: - trained_diffusion_steps (int): The number of diffusion steps used during training. - desired_diffusion_steps (int): The number of diffusion steps to use during inference. - cond_free (bool): Whether to use a conditioning-free model. - cond_free_k (int): The number of samples to use for conditioning-free models. - sampler (str): The name of the sampler to use. - - Returns: - A SpacedDiffusion instance configured with the given parameters. - """ - return SpacedDiffusion( - use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), - model_mean_type="epsilon", - model_var_type="learned_range", - loss_type="mse", - betas=get_named_beta_schedule("linear", trained_diffusion_steps), - conditioning_free=cond_free, - conditioning_free_k=cond_free_k, - sampler=sampler, - ) - - -def do_spectrogram_diffusion( - diffusion_model, - diffuser, - latents, - conditioning_latents, - temperature=1, -): - """ - Generate a mel-spectrogram using a diffusion model and a diffuser. - - Args: - diffusion_model (nn.Module): A diffusion model that converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. - diffuser (Diffuser): A diffuser that generates a mel-spectrogram from noise. - latents (torch.Tensor): A tensor of shape (batch_size, seq_len, code_size) containing the input spectrogram codes. - conditioning_latents (torch.Tensor): A tensor of shape (batch_size, code_size) containing the conditioning codes. - temperature (float, optional): The temperature of the noise used by the diffuser. Defaults to 1. - - Returns: - torch.Tensor: A tensor of shape (batch_size, mel_channels, mel_seq_len) containing the generated mel-spectrogram. - """ - with torch.no_grad(): - output_seq_len = ( - latents.shape[1] * 4 * 24000 // 22050 - ) # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. - output_shape = (latents.shape[0], 100, output_seq_len) - precomputed_embeddings = diffusion_model.timestep_independent( - latents, conditioning_latents, output_seq_len, False - ) - - noise = torch.randn(output_shape, device=latents.device) * temperature - mel = diffuser.sample_loop( - diffusion_model, - output_shape, - noise=noise, - model_kwargs={"precomputed_aligned_embeddings": precomputed_embeddings}, - progress=False, - ) - return denormalize_tacotron_mel(mel)[:, :, :output_seq_len] - - @dataclass class XttsAudioConfig(Coqpit): """ @@ -563,27 +491,6 @@ class Xtts(BaseTTS): gpt_cond_len: (int) Length of the audio used for cloning. If audio is shorter, then audio length is used else the first `gpt_cond_len` secs is used. Defaults to 6 seconds. - decoder_iterations: (int) Number of diffusion steps to perform. [0,4000]. More steps means the network has - more chances to iteratively refine the output, which should theoretically mean a higher quality output. - Generally a value above 250 is not noticeably better, however. Defaults to 100. - - cond_free: (bool) Whether or not to perform conditioning-free diffusion. Conditioning-free diffusion - performs two forward passes for each diffusion step: one with the outputs of the autoregressive model - and one with no conditioning priors. The output of the two is blended according to the cond_free_k - value below. Conditioning-free diffusion is the real deal, and dramatically improves realism. - Defaults to True. - - cond_free_k: (float) Knob that determines how to balance the conditioning free signal with the - conditioning-present signal. [0,inf]. As cond_free_k increases, the output becomes dominated by the - conditioning-free signal. Defaults to 2.0. - - diffusion_temperature: (float) Controls the variance of the noise fed into the diffusion model. [0,1]. - Values at 0 re the "mean" prediction of the diffusion network and will sound bland and smeared. - Defaults to 1.0. - - decoder: (str) Selects the decoder to use between ("hifigan", "diffusion") - Defaults to hifigan - hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive transformer. Extra keyword args fed to this function get forwarded directly to that API. Documentation here: https://huggingface.co/docs/transformers/internal/generation_utils @@ -610,12 +517,6 @@ class Xtts(BaseTTS): top_k=top_k, top_p=top_p, do_sample=do_sample, - decoder_iterations=decoder_iterations, - cond_free=cond_free, - cond_free_k=cond_free_k, - diffusion_temperature=diffusion_temperature, - decoder_sampler=decoder_sampler, - decoder=decoder, **hf_generate_kwargs, ) @@ -633,13 +534,6 @@ class Xtts(BaseTTS): top_k=50, top_p=0.85, do_sample=True, - # Decoder inference - decoder_iterations=100, - cond_free=True, - cond_free_k=2, - diffusion_temperature=1.0, - decoder_sampler="ddim", - decoder="hifigan", num_beams=1, **hf_generate_kwargs, ): @@ -734,8 +628,6 @@ class Xtts(BaseTTS): top_k=50, top_p=0.85, do_sample=True, - # Decoder inference - decoder="hifigan", **hf_generate_kwargs, ): text = text.strip().lower() From cabff9f323d5e45acba629b33f1885d01adb85ed Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 17:47:14 -0300 Subject: [PATCH 18/67] Update XTTS v2.0 recipe --- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py index 989b1936..fa421749 100644 --- a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -40,6 +40,7 @@ CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/") os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True) +# ToDo: update DVAE checkpoint # DVAE files DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/dvae.pth" MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/mel_stats.pth" @@ -89,9 +90,9 @@ def main(): dvae_checkpoint=DVAE_CHECKPOINT, xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune tokenizer_file=TOKENIZER_FILE, - gpt_num_audio_tokens=8194, - gpt_start_audio_token=8192, - gpt_stop_audio_token=8193, + gpt_num_audio_tokens=1024, + gpt_start_audio_token=1025, + gpt_stop_audio_token=1026, gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, ) From 13243df526287366632e667dda2718effa749195 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 19:10:21 -0300 Subject: [PATCH 19/67] Update XTTS v1.1 files --- TTS/.models.json | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/TTS/.models.json b/TTS/.models.json index b33e4fd3..7d7f68e0 100644 --- a/TTS/.models.json +++ b/TTS/.models.json @@ -10,6 +10,7 @@ "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json", "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/hash.md5" ], + "model_hash": "ae9e4b39e095fd5728fe7f7931eccoqui", "default_vocoder": null, "commit": "480a6cdf7", "license": "CPML", @@ -32,12 +33,12 @@ "xtts_v1.1": { "description": "XTTS-v1.1 by Coqui with 14 languages, cross-language voice cloning and reference leak fixed.", "hf_url": [ - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/model.pth", - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/config.json", - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/vocab.json", - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/hash.md5" + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/model.pth", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/config.json", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/vocab.json", + "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/hash.md5" ], - "model_hash": "ae9e4b39e095fd5728fe7f7931ec66ad", + "model_hash": "7c62beaf58d39b729de287330dc254e7b515677416839b649a50e7cf74c3df59", "default_vocoder": null, "commit": "82910a63", "license": "CPML", From 2470599d187e2d7972536a1815808f3a29a4bce4 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 19:12:04 -0300 Subject: [PATCH 20/67] Drop XTTS v1 --- TTS/.models.json | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/TTS/.models.json b/TTS/.models.json index 7d7f68e0..5b35d4e2 100644 --- a/TTS/.models.json +++ b/TTS/.models.json @@ -17,19 +17,6 @@ "contact": "info@coqui.ai", "tos_required": true }, - "xtts_v1": { - "description": "XTTS-v1 by Coqui with 13 languages and cross-language voice cloning.", - "hf_url": [ - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/hifigan/model.pth", - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/hifigan/config.json", - "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/hifigan/vocab.json" - ], - "default_vocoder": null, - "commit": "e5140314", - "license": "CPML", - "contact": "info@coqui.ai", - "tos_required": true - }, "xtts_v1.1": { "description": "XTTS-v1.1 by Coqui with 14 languages, cross-language voice cloning and reference leak fixed.", "hf_url": [ From 905900afc9566978a973ca4ef97d3461d01479d3 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 19:14:50 -0300 Subject: [PATCH 21/67] Update XTTS v1.1 recipe --- recipes/ljspeech/xtts_v1/train_gpt_xtts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py index a7ae40e2..7d8f4064 100644 --- a/recipes/ljspeech/xtts_v1/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v1/train_gpt_xtts.py @@ -41,8 +41,8 @@ os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True) # DVAE files -DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/dvae.pth" -MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/mel_stats.pth" +DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/dvae.pth" +MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/mel_stats.pth" # Set the path to the downloaded files DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, DVAE_CHECKPOINT_LINK.split("/")[-1]) @@ -55,8 +55,8 @@ if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE): # Download XTTS v1.1 checkpoint if needed -TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/vocab.json" -XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/model.pth" +TOKENIZER_FILE_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/vocab.json" +XTTS_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.2/model.pth" # XTTS transfer learning parameters: You we need to provide the paths of XTTS model checkpoint that you want to do the fine tuning. TOKENIZER_FILE = os.path.join(CHECKPOINTS_OUT_PATH, TOKENIZER_FILE_LINK.split("/")[-1]) # vocab.json file From 5f9ab6cfaa119dfb29899c10f0b1b7f37ec8078f Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Mon, 6 Nov 2023 19:22:34 -0300 Subject: [PATCH 22/67] Fix style Co-authored-by: Aarni Koskela --- TTS/tts/layers/xtts/trainer/gpt_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/tts/layers/xtts/trainer/gpt_trainer.py b/TTS/tts/layers/xtts/trainer/gpt_trainer.py index 80e06340..005b30be 100644 --- a/TTS/tts/layers/xtts/trainer/gpt_trainer.py +++ b/TTS/tts/layers/xtts/trainer/gpt_trainer.py @@ -237,7 +237,7 @@ class GPTTrainer(BaseTTS): self.config, s_info["speaker_wav"], s_info["language"], - gpt_cond_len=3 + gpt_cond_len=3, )["wav"] test_audios["{}-audio".format(idx)] = wav From cbdbc44e0f15752e439437d7078538f229e1ec99 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Tue, 7 Nov 2023 10:16:44 -0300 Subject: [PATCH 23/67] Fix XTTS v2.0 training recipe (#3154) * Fix XTTS v2.0 training recipe * Update XTTS v2 model hash --- TTS/.models.json | 2 +- recipes/ljspeech/xtts_v2/train_gpt_xtts.py | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/TTS/.models.json b/TTS/.models.json index 5b35d4e2..13da715b 100644 --- a/TTS/.models.json +++ b/TTS/.models.json @@ -10,7 +10,7 @@ "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json", "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/hash.md5" ], - "model_hash": "ae9e4b39e095fd5728fe7f7931eccoqui", + "model_hash": "6a09d1ad43896f06041ed8195956c9698f13b6189dc80f1c74bdc2b8e8d15324", "default_vocoder": null, "commit": "480a6cdf7", "license": "CPML", diff --git a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py index fa421749..62691738 100644 --- a/recipes/ljspeech/xtts_v2/train_gpt_xtts.py +++ b/recipes/ljspeech/xtts_v2/train_gpt_xtts.py @@ -40,14 +40,13 @@ CHECKPOINTS_OUT_PATH = os.path.join(OUT_PATH, "XTTS_v2.0_original_model_files/") os.makedirs(CHECKPOINTS_OUT_PATH, exist_ok=True) -# ToDo: update DVAE checkpoint # DVAE files -DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/dvae.pth" -MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v1/v1.1.1/mel_stats.pth" +DVAE_CHECKPOINT_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/dvae.pth" +MEL_NORM_LINK = "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/mel_stats.pth" # Set the path to the downloaded files -DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, DVAE_CHECKPOINT_LINK.split("/")[-1]) -MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, MEL_NORM_LINK.split("/")[-1]) +DVAE_CHECKPOINT = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(DVAE_CHECKPOINT_LINK)) +MEL_NORM_FILE = os.path.join(CHECKPOINTS_OUT_PATH, os.path.basename(MEL_NORM_LINK)) # download DVAE files if needed if not os.path.isfile(DVAE_CHECKPOINT) or not os.path.isfile(MEL_NORM_FILE): @@ -90,9 +89,9 @@ def main(): dvae_checkpoint=DVAE_CHECKPOINT, xtts_checkpoint=XTTS_CHECKPOINT, # checkpoint path of the model that you want to fine-tune tokenizer_file=TOKENIZER_FILE, - gpt_num_audio_tokens=1024, - gpt_start_audio_token=1025, - gpt_stop_audio_token=1026, + gpt_num_audio_tokens=1026, + gpt_start_audio_token=1024, + gpt_stop_audio_token=1025, gpt_use_masking_gt_prompt_approach=True, gpt_use_perceiver_resampler=True, ) From f846a9f3007ab69e5e6664b6ab23675a49f2e058 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Tue, 7 Nov 2023 14:17:36 +0100 Subject: [PATCH 24/67] Update to v0.20.1 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 5a03fb73..847e9aef 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.0 +0.20.1 From ce1a39a9a4d106c68320c3cb00954fbf69b17a87 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Wed, 8 Nov 2023 10:24:23 +0100 Subject: [PATCH 25/67] Add char limit warn (#3130) * Add char limit warning * Adding v2 langs * cached_property for cutlet * Fix import --- TTS/tts/layers/xtts/tokenizer.py | 42 +++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 4f2da02d..4c7ae6e3 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -8,6 +8,7 @@ from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words from tokenizers import Tokenizer +from functools import cached_property from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words @@ -535,11 +536,50 @@ DEFAULT_VOCAB_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), " class VoiceBpeTokenizer: def __init__(self, vocab_file=None): self.tokenizer = None - self.katsu = None if vocab_file is not None: self.tokenizer = Tokenizer.from_file(vocab_file) + self.char_limits = { + "en": 250, + "de": 253, + "fr": 273, + "es": 239, + "it": 213, + "pt": 203, + "pl": 224, + "zh-cn": 82, + "ar": 166, + "cs": 186, + "ru": 182, + "nl": 251, + "tr": 226, + "ja": 71, + "hu": 224, + "ko": 95, + } + + @cached_property + def katsu(self): + import cutlet + return cutlet.Cutlet() + + def check_input_length(self, txt, lang): + limit = self.char_limits.get(lang, 250) + if len(txt) > limit: + print(f"[!] Warning: The text length exceeds the character limit of {limit} for language '{lang}', this might cause truncated audio.") + + def preprocess_text(self, txt, lang): + if lang in ["en", "es", "fr", "de", "pt", "it", "pl", "ar", "cs", "ru", "nl", "tr", "zh-cn"]: + txt = multilingual_cleaners(txt, lang) + if lang == "zh-cn": + txt = chinese_transliterate(txt) + elif lang == "ja": + txt = japanese_cleaners(txt, self.katsu) + else: + raise NotImplementedError() + return txt def encode(self, txt, lang): + self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) txt = f"[{lang}]{txt}" txt = txt.replace(" ", "[SPACE]") From a24ebcd8a6be0a233cf3bb3dfd23916b276dd591 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 8 Nov 2023 10:51:23 +0100 Subject: [PATCH 26/67] Fix coqui api (#3168) --- README.md | 10 ++------ TTS/api.py | 6 ++--- TTS/bin/synthesize.py | 4 +-- TTS/cs_api.py | 54 ++++++++++------------------------------ docs/source/inference.md | 11 ++------ 5 files changed, 22 insertions(+), 63 deletions(-) diff --git a/README.md b/README.md index 1a9285eb..353db7cf 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## 🐸Coqui.ai News - 📣 ⓍTTSv2 is here with 16 languages and better performance across the board. - 📣 ⓍTTS fine-tuning code is out. Check the [example recipes](https://github.com/coqui-ai/TTS/tree/dev/recipes/ljspeech). -- 📣 ⓍTTS can now stream with <200ms latency. +- 📣 ⓍTTS can now stream with <200ms latency. - 📣 ⓍTTS, our production TTS model that can speak 13 languages, is released [Blog Post](https://coqui.ai/blog/tts/open_xtts), [Demo](https://huggingface.co/spaces/coqui/xtts), [Docs](https://tts.readthedocs.io/en/dev/models/xtts.html) - 📣 [🐶Bark](https://github.com/suno-ai/bark) is now available for inference with unconstrained voice cloning. [Docs](https://tts.readthedocs.io/en/dev/models/bark.html) - 📣 You can use [~1100 Fairseq models](https://github.com/facebookresearch/fairseq/tree/main/examples/mms) with 🐸TTS. @@ -267,19 +267,13 @@ models = TTS(cs_api_model="XTTS").list_models() # Init TTS with the target studio speaker tts = TTS(model_name="coqui_studio/en/Torcull Diarmuid/coqui_studio", progress_bar=False) # Run TTS -tts.tts_to_file(text="This is a test.", file_path=OUTPUT_PATH) +tts.tts_to_file(text="This is a test.", language="en", file_path=OUTPUT_PATH) # V1 model models = TTS(cs_api_model="V1").list_models() # Run TTS with emotion and speed control # Emotion control only works with V1 model tts.tts_to_file(text="This is a test.", file_path=OUTPUT_PATH, emotion="Happy", speed=1.5) - -# XTTS-multilingual -models = TTS(cs_api_model="XTTS-multilingual").list_models() -# Run TTS with emotion and speed control -# Emotion control only works with V1 model -tts.tts_to_file(text="Das ist ein Test.", file_path=OUTPUT_PATH, language="de", speed=1.0) ``` #### Example text to speech using **Fairseq models in ~1100 languages** 🤯. diff --git a/TTS/api.py b/TTS/api.py index 5d1fbb5a..c8600dcd 100644 --- a/TTS/api.py +++ b/TTS/api.py @@ -60,7 +60,7 @@ class TTS(nn.Module): vocoder_config_path (str, optional): Path to the vocoder config. Defaults to None. progress_bar (bool, optional): Whether to pring a progress bar while downloading a model. Defaults to True. cs_api_model (str, optional): Name of the model to use for the Coqui Studio API. Available models are - "XTTS", "XTTS-multilingual", "V1". You can also use `TTS.cs_api.CS_API" for more control. + "XTTS", "V1". You can also use `TTS.cs_api.CS_API" for more control. Defaults to "XTTS". gpu (bool, optional): Enable/disable GPU. Some models might be too slow on CPU. Defaults to False. """ @@ -275,7 +275,7 @@ class TTS(nn.Module): speaker_name (str, optional): Speaker name from Coqui Studio. Defaults to None. language (str): Language of the text. If None, the default language of the speaker is used. Language is only - supported by `XTTS-multilang` model. Currently supports en, de, es, fr, it, pt, pl. Defaults to "en". + supported by `XTTS` model. emotion (str, optional): Emotion of the speaker. One of "Neutral", "Happy", "Sad", "Angry", "Dull". Emotions are only available with "V1" model. Defaults to None. @@ -321,7 +321,7 @@ class TTS(nn.Module): Speaker name for multi-speaker. You can check whether loaded model is multi-speaker by `tts.is_multi_speaker` and list speakers by `tts.speakers`. Defaults to None. language (str): Language of the text. If None, the default language of the speaker is used. Language is only - supported by `XTTS-multilang` model. Currently supports en, de, es, fr, it, pt, pl. Defaults to "en". + supported by `XTTS` model. speaker_wav (str, optional): Path to a reference wav file to use for voice cloning with supporting models like YourTTS. Defaults to None. diff --git a/TTS/bin/synthesize.py b/TTS/bin/synthesize.py index ef41c8e1..ddfe35d2 100755 --- a/TTS/bin/synthesize.py +++ b/TTS/bin/synthesize.py @@ -227,7 +227,7 @@ def main(): parser.add_argument( "--cs_model", type=str, - help="Name of the 🐸Coqui Studio model. Available models are `XTTS`, `XTTS-multilingual`, `V1`.", + help="Name of the 🐸Coqui Studio model. Available models are `XTTS`, `V1`.", ) parser.add_argument( "--emotion", @@ -238,7 +238,7 @@ def main(): parser.add_argument( "--language", type=str, - help="Language to condition the model with. Only available for 🐸Coqui Studio `XTTS-multilingual` model.", + help="Language to condition the model with. Only available for 🐸Coqui Studio `XTTS` model.", default=None, ) parser.add_argument( diff --git a/TTS/cs_api.py b/TTS/cs_api.py index 4a44b535..c45f9d08 100644 --- a/TTS/cs_api.py +++ b/TTS/cs_api.py @@ -43,7 +43,7 @@ class CS_API: Args: api_token (str): 🐸Coqui Studio API token. If not provided, it will be read from the environment variable `COQUI_STUDIO_TOKEN`. - model (str): 🐸Coqui Studio model. It can be either `V1`, `XTTS`, or `XTTS-multilang`. Default is `XTTS`. + model (str): 🐸Coqui Studio model. It can be either `V1`, `XTTS`. Default is `XTTS`. Example listing all available speakers: @@ -65,7 +65,7 @@ class CS_API: Example with multi-language model: >>> from TTS.api import CS_API - >>> tts = CS_API(model="XTTS-multilang") + >>> tts = CS_API(model="XTTS") >>> wav, sr = api.tts("Hello world", speaker_name=tts.speakers[0].name, language="en") """ @@ -78,16 +78,12 @@ class CS_API: "XTTS": { "list_speakers": "https://app.coqui.ai/api/v2/speakers", "synthesize": "https://app.coqui.ai/api/v2/samples/xtts/render/", - "list_voices": "https://app.coqui.ai/api/v2/voices/xtts/", - }, - "XTTS-multilang": { - "list_speakers": "https://app.coqui.ai/api/v2/speakers", - "synthesize": "https://app.coqui.ai/api/v2/samples/multilingual/render/", - "list_voices": "https://app.coqui.ai/api/v2/voices/xtts/", + "list_voices": "https://app.coqui.ai/api/v2/voices/xtts", }, } - SUPPORTED_LANGUAGES = ["en", "es", "de", "fr", "it", "pt", "pl"] + + SUPPORTED_LANGUAGES = ["en", "es", "de", "fr", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja"] def __init__(self, api_token=None, model="XTTS"): self.api_token = api_token @@ -139,7 +135,7 @@ class CS_API: self._check_token() conn = http.client.HTTPSConnection("app.coqui.ai") url = self.MODEL_ENDPOINTS[self.model]["list_speakers"] - conn.request("GET", f"{url}?per_page=100", headers=self.headers) + conn.request("GET", f"{url}?page=1&per_page=100", headers=self.headers) res = conn.getresponse() data = res.read() return [Speaker(s) for s in json.loads(data)["result"]] @@ -148,7 +144,7 @@ class CS_API: """List custom voices created by the user.""" conn = http.client.HTTPSConnection("app.coqui.ai") url = self.MODEL_ENDPOINTS[self.model]["list_voices"] - conn.request("GET", f"{url}", headers=self.headers) + conn.request("GET", f"{url}?page=1&per_page=100", headers=self.headers) res = conn.getresponse() data = res.read() return [Speaker(s, True) for s in json.loads(data)["result"]] @@ -197,14 +193,6 @@ class CS_API: } ) elif model == "XTTS": - payload.update( - { - "name": speaker.name, - "text": text, - "speed": speed, - } - ) - elif model == "XTTS-multilang": payload.update( { "name": speaker.name, @@ -226,13 +214,10 @@ class CS_API: assert language is None, "❗ language is not supported for V1 model." elif self.model == "XTTS": assert emotion is None, f"❗ Emotions are not supported for XTTS model. Use V1 model." - assert language is None, "❗ Language is not supported for XTTS model. Use XTTS-multilang model." - elif self.model == "XTTS-multilang": - assert emotion is None, f"❗ Emotions are not supported for XTTS-multilang model. Use V1 model." - assert language is not None, "❗ Language is required for XTTS-multilang model." + assert language is not None, "❗ Language is required for XTTS model." assert ( language in self.SUPPORTED_LANGUAGES - ), f"❗ Language {language} is not yet supported. Use one of: en, es, de, fr, it, pt, pl" + ), f"❗ Language {language} is not yet supported. Check https://docs.coqui.ai/reference/samples_xtts_create." return text, speaker_name, speaker_id, emotion, speed, language def tts( @@ -255,7 +240,7 @@ class CS_API: supported by `V1` model. Defaults to None. speed (float): Speed of the speech. 1.0 is normal speed. language (str): Language of the text. If None, the default language of the speaker is used. Language is only - supported by `XTTS-multilang` model. Currently supports en, de, es, fr, it, pt, pl. Defaults to "en". + supported by `XTTS` model. See https://docs.coqui.ai/reference/samples_xtts_create for supported languages. """ self._check_token() self.ping_api() @@ -305,7 +290,7 @@ class CS_API: speed (float): Speed of the speech. 1.0 is normal speed. pipe_out (BytesIO, optional): Flag to stdout the generated TTS wav file for shell pipe. language (str): Language of the text. If None, the default language of the speaker is used. Language is only - supported by `XTTS-multilang` model. Currently supports en, de, es, fr, it, pt, pl. Defaults to "en". + supported by `XTTS` model. Currently supports en, de, es, fr, it, pt, pl. Defaults to "en". file_path (str): Path to save the file. If None, a temporary file is created. """ if file_path is None: @@ -323,20 +308,7 @@ if __name__ == "__main__": print(api.list_speakers_as_tts_models()) ts = time.time() - wav, sr = api.tts("It took me quite a long time to develop a voice.", speaker_name=api.speakers[0].name) + wav, sr = api.tts("It took me quite a long time to develop a voice.", language="en", speaker_name=api.speakers[0].name) print(f" [i] XTTS took {time.time() - ts:.2f}s") - filepath = api.tts_to_file(text="Hello world!", speaker_name=api.speakers[0].name, file_path="output.wav") - - api = CS_API(model="XTTS-multilang") - print(api.speakers) - - ts = time.time() - wav, sr = api.tts( - "It took me quite a long time to develop a voice.", speaker_name=api.speakers[0].name, language="en" - ) - print(f" [i] XTTS took {time.time() - ts:.2f}s") - - filepath = api.tts_to_file( - text="Hello world!", speaker_name=api.speakers[0].name, file_path="output.wav", language="en" - ) + filepath = api.tts_to_file(text="Hello world!", speaker_name=api.speakers[0].name, language="en", file_path="output.wav") diff --git a/docs/source/inference.md b/docs/source/inference.md index 4de9ecdd..b40445ae 100644 --- a/docs/source/inference.md +++ b/docs/source/inference.md @@ -198,19 +198,12 @@ from TTS.api import CS_API # Init 🐸 Coqui Studio API # you can either set the API token as an environment variable `COQUI_STUDIO_TOKEN` or pass it as an argument. -# XTTS - Best quality and life-like speech in EN +# XTTS - Best quality and life-like speech in multiple languages. See https://docs.coqui.ai/reference/samples_xtts_create for supported languages. api = CS_API(api_token=, model="XTTS") api.speakers # all the speakers are available with all the models. api.list_speakers() api.list_voices() -wav, sample_rate = api.tts(text="This is a test.", speaker=api.speakers[0].name, emotion="Happy", speed=1.5) - -# XTTS-multilingual - Multilingual XTTS with [en, de, es, fr, it, pt, ...] (more langs coming soon) -api = CS_API(api_token=, model="XTTS-multilingual") -api.speakers -api.list_speakers() -api.list_voices() -wav, sample_rate = api.tts(text="This is a test.", speaker=api.speakers[0].name, emotion="Happy", speed=1.5) +wav, sample_rate = api.tts(text="This is a test.", speaker=api.speakers[0].name, emotion="Happy", language="en", speed=1.5) # V1 - Fast and lightweight TTS in EN with emotion control. api = CS_API(api_token=, model="V1") From cc6e9fcaa72a6ba7255a1a39c77ffdb5b7bc7e4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 8 Nov 2023 11:13:58 +0100 Subject: [PATCH 27/67] Fix #3153 (#3169) --- TTS/vocoder/layers/losses.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/TTS/vocoder/layers/losses.py b/TTS/vocoder/layers/losses.py index befc43cc..74cfc726 100644 --- a/TTS/vocoder/layers/losses.py +++ b/TTS/vocoder/layers/losses.py @@ -195,10 +195,10 @@ def _apply_D_loss(scores_fake, scores_real, loss_func): if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): - total_loss, real_loss, fake_loss = loss_func(score_fake=score_fake, score_real=score_real) + total_loss, real_loss_, fake_loss_ = loss_func(score_fake=score_fake, score_real=score_real) loss += total_loss - real_loss += real_loss - fake_loss += fake_loss + real_loss += real_loss_ + fake_loss += fake_loss_ # normalize loss values with number of scales (discriminators) loss /= len(scores_fake) real_loss /= len(scores_real) From 99edd6daa38f929c703e29f6dfcdcdd8f5260b0d Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Wed, 8 Nov 2023 11:29:01 +0100 Subject: [PATCH 28/67] Fix ModelManager.list_models() (#3128) * fix(utils.manage): remove hard-coded model_type variable * refactor(utils.manage): address lint issues, fix typos Addressed the following: TTS/utils/manage.py:307:12: R1705: Unnecessary "else" after "return" (no-else-return) TTS/utils/manage.py:308:21: W1514: Using open without explicitly specifying an encoding (unspecified-encoding) TTS/utils/manage.py:299:4: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements) TTS/utils/manage.py:299:4: R0201: Method could be a function (no-self-use) TTS/utils/manage.py:314:4: R0201: Method could be a function (no-self-use) --- TTS/utils/manage.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/TTS/utils/manage.py b/TTS/utils/manage.py index c732e1f5..1cd437e6 100644 --- a/TTS/utils/manage.py +++ b/TTS/utils/manage.py @@ -109,7 +109,6 @@ class ModelManager(object): def _list_for_model_type(self, model_type): models_name_list = [] model_count = 1 - model_type = "tts_models" models_name_list.extend(self._list_models(model_type, model_count)) return models_name_list @@ -298,22 +297,22 @@ class ModelManager(object): model_item = self.set_model_url(model_item) return model_item, model_full_name, model, md5hash - def ask_tos(self, model_full_path): + @staticmethod + def ask_tos(model_full_path): """Ask the user to agree to the terms of service""" tos_path = os.path.join(model_full_path, "tos_agreed.txt") - if not os.path.exists(tos_path): - print(" > You must agree to the terms of service to use this model.") - print(" | > Please see the terms of service at https://coqui.ai/cpml.txt") - print(' | > "I have read, understood and agreed the Terms and Conditions." - [y/n]') - answer = input(" | | > ") - if answer.lower() == "y": - with open(tos_path, "w") as f: - f.write("I have read, understood ad agree the Terms and Conditions.") - return True - else: - return False + print(" > You must agree to the terms of service to use this model.") + print(" | > Please see the terms of service at https://coqui.ai/cpml.txt") + print(' | > "I have read, understood and agreed to the Terms and Conditions." - [y/n]') + answer = input(" | | > ") + if answer.lower() == "y": + with open(tos_path, "w", encoding="utf-8") as f: + f.write("I have read, understood and agreed to the Terms and Conditions.") + return True + return False - def tos_agreed(self, model_item, model_full_path): + @staticmethod + def tos_agreed(model_item, model_full_path): """Check if the user has agreed to the terms of service""" if "tos_required" in model_item and model_item["tos_required"]: tos_path = os.path.join(model_full_path, "tos_agreed.txt") From 78a596618a4deb21ef1058e911c15334b81b0669 Mon Sep 17 00:00:00 2001 From: Gorkem Date: Wed, 8 Nov 2023 13:32:02 +0300 Subject: [PATCH 29/67] Fix for exception on streaming if last chunk empty (#3160) --- TTS/tts/models/xtts.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 4ab00270..a8a574c0 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -603,10 +603,21 @@ class Xtts(BaseTTS): if wav_gen_prev is not None: wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len) : -overlap_len] if wav_overlap is not None: - crossfade_wav = wav_chunk[:overlap_len] - crossfade_wav = crossfade_wav * torch.linspace(0.0, 1.0, overlap_len).to(crossfade_wav.device) - wav_chunk[:overlap_len] = wav_overlap * torch.linspace(1.0, 0.0, overlap_len).to(wav_overlap.device) - wav_chunk[:overlap_len] += crossfade_wav + # cross fade the overlap section + if overlap_len > len(wav_chunk): + # wav_chunk is smaller than overlap_len, pass on last wav_gen + if wav_gen_prev is not None: + wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len):] + else: + # not expecting will hit here as problem happens on last chunk + wav_chunk = wav_gen[-overlap_len:] + return wav_chunk, wav_gen, None + else: + crossfade_wav = wav_chunk[:overlap_len] + crossfade_wav = crossfade_wav * torch.linspace(0.0, 1.0, overlap_len).to(crossfade_wav.device) + wav_chunk[:overlap_len] = wav_overlap * torch.linspace(1.0, 0.0, overlap_len).to(wav_overlap.device) + wav_chunk[:overlap_len] += crossfade_wav + wav_overlap = wav_gen[-overlap_len:] wav_gen_prev = wav_gen return wav_chunk, wav_gen_prev, wav_overlap From 03ad90135bb70d1ca6b46b3b7f3e89563aa65af6 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Wed, 8 Nov 2023 13:47:33 +0100 Subject: [PATCH 30/67] Add lang code in XTTS doc (#3158) * Add lang code in XTTS doc * Remove ununsed config and args * update docs * woops --- TTS/tts/configs/xtts_config.py | 22 -------------------- TTS/tts/models/xtts.py | 37 ---------------------------------- docs/source/models/xtts.md | 11 ++++------ 3 files changed, 4 insertions(+), 66 deletions(-) diff --git a/TTS/tts/configs/xtts_config.py b/TTS/tts/configs/xtts_config.py index ea95faf5..2d3edaf4 100644 --- a/TTS/tts/configs/xtts_config.py +++ b/TTS/tts/configs/xtts_config.py @@ -37,29 +37,11 @@ class XttsConfig(BaseTTSConfig): If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. Defaults to `0.8`. - cond_free_k (float): - Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf]. - As cond_free_k increases, the output becomes dominated by the conditioning-free signal. - Formula is: output=cond_present_output*(cond_free_k+1)-cond_absenct_output*cond_free_k. Defaults to `2.0`. - - diffusion_temperature (float): - Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0 - are the "mean" prediction of the diffusion network and will sound bland and smeared. - Defaults to `1.0`. - num_gpt_outputs (int): Number of samples taken from the autoregressive model, all of which are filtered using CLVP. As XTTS is a probabilistic model, more samples means a higher probability of creating something "great". Defaults to `16`. - decoder_iterations (int): - Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine - the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better, - however. Defaults to `30`. - - decoder_sampler (str): - Diffusion sampler to be used. `ddim` or `dpm++2m`. Defaults to `ddim`. - gpt_cond_len (int): Secs audio to be used as conditioning for the autoregressive model. Defaults to `3`. @@ -110,11 +92,7 @@ class XttsConfig(BaseTTSConfig): repetition_penalty: float = 2.0 top_k: int = 50 top_p: float = 0.85 - cond_free_k: float = 2.0 - diffusion_temperature: float = 1.0 num_gpt_outputs: int = 1 - decoder_iterations: int = 30 - decoder_sampler: str = "ddim" # cloning gpt_cond_len: int = 3 diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index a8a574c0..7cc9836a 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -152,19 +152,6 @@ class XttsArgs(Coqpit): gpt_code_stride_len (int, optional): The hop_size of dvae and consequently of the gpt output. Defaults to 1024. gpt_use_masking_gt_prompt_approach (bool, optional): If True, it will use ground truth as prompt and it will mask the loss to avoid repetition. Defaults to True. gpt_use_perceiver_resampler (bool, optional): If True, it will use perceiver resampler from flamingo paper - https://arxiv.org/abs/2204.14198. Defaults to False. - - For DiffTTS model: - diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. - diff_num_layers (int, optional): The number of layers for the DiffTTS model. Defaults to 10. - diff_in_channels (int, optional): The input channels for the DiffTTS model. Defaults to 100. - diff_out_channels (int, optional): The output channels for the DiffTTS model. Defaults to 200. - diff_in_latent_channels (int, optional): The input latent channels for the DiffTTS model. Defaults to 1024. - diff_in_tokens (int, optional): The input tokens for the DiffTTS model. Defaults to 8193. - diff_dropout (int, optional): The dropout percentage for the DiffTTS model. Defaults to 0. - diff_use_fp16 (bool, optional): Whether to use fp16 for the DiffTTS model. Defaults to False. - diff_num_heads (int, optional): The number of heads for the DiffTTS model. Defaults to 16. - diff_layer_drop (int, optional): The layer dropout percentage for the DiffTTS model. Defaults to 0. - diff_unconditioned_percentage (int, optional): The percentage of unconditioned inputs for the DiffTTS model. Defaults to 0. """ gpt_batch_size: int = 1 @@ -193,19 +180,6 @@ class XttsArgs(Coqpit): gpt_use_masking_gt_prompt_approach: bool = True gpt_use_perceiver_resampler: bool = False - # Diffusion Decoder params - diff_model_channels: int = 1024 - diff_num_layers: int = 10 - diff_in_channels: int = 100 - diff_out_channels: int = 200 - diff_in_latent_channels: int = 1024 - diff_in_tokens: int = 8193 - diff_dropout: int = 0 - diff_use_fp16: bool = False - diff_num_heads: int = 16 - diff_layer_drop: int = 0 - diff_unconditioned_percentage: int = 0 - # HifiGAN Decoder params input_sample_rate: int = 22050 output_sample_rate: int = 24000 @@ -426,10 +400,6 @@ class Xtts(BaseTTS): "repetition_penalty": config.repetition_penalty, "top_k": config.top_k, "top_p": config.top_p, - "cond_free_k": config.cond_free_k, - "diffusion_temperature": config.diffusion_temperature, - "decoder_iterations": config.decoder_iterations, - "decoder_sampler": config.decoder_sampler, "gpt_cond_len": config.gpt_cond_len, "max_ref_len": config.max_ref_len, "sound_norm_refs": config.sound_norm_refs, @@ -454,13 +424,6 @@ class Xtts(BaseTTS): gpt_cond_len=6, max_ref_len=10, sound_norm_refs=False, - # Decoder inference - decoder_iterations=100, - cond_free=True, - cond_free_k=2, - diffusion_temperature=1.0, - decoder_sampler="ddim", - decoder="hifigan", **hf_generate_kwargs, ): """ diff --git a/docs/source/models/xtts.md b/docs/source/models/xtts.md index 8167a1d1..03e44af1 100644 --- a/docs/source/models/xtts.md +++ b/docs/source/models/xtts.md @@ -24,8 +24,7 @@ a few tricks to make it faster and support streaming inference. Current implementation only supports inference. ### Languages -As of now, XTTS-v2 supports 16 languages: English, Spanish, French, German, Italian, Portuguese, -Polish, Turkish, Russian, Dutch, Czech, Arabic, Chinese (Simplified), Japanese, Hungarian, Korean +As of now, XTTS-v2 supports 16 languages: English (en), Spanish (es), French (fr), German (de), Italian (it), Portuguese (pt), Polish (pl), Turkish (tr), Russian (ru), Dutch (nl), Czech (cs), Arabic (ar), Chinese (zh-cn), Japanese (ja), Hungarian (hu) and Korean (ko). Stay tuned as we continue to add support for more languages. If you have any language requests, please feel free to reach out. @@ -116,7 +115,7 @@ model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", use_deepspeed=Tru model.cuda() print("Computing speaker latents...") -gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) +gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) print("Inference...") out = model.inference( @@ -124,7 +123,6 @@ out = model.inference( "en", gpt_cond_latent, speaker_embedding, - diffusion_conditioning, temperature=0.7, # Add custom parameters here ) torchaudio.save("xtts.wav", torch.tensor(out["wav"]).unsqueeze(0), 24000) @@ -153,7 +151,7 @@ model.load_checkpoint(config, checkpoint_dir="/path/to/xtts/", use_deepspeed=Tru model.cuda() print("Computing speaker latents...") -gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) +gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=["reference.wav"]) print("Inference...") t0 = time.time() @@ -210,7 +208,7 @@ model.load_checkpoint(config, checkpoint_path=XTTS_CHECKPOINT, vocab_path=TOKENI model.cuda() print("Computing speaker latents...") -gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=[SPEAKER_REFERENCE]) +gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=[SPEAKER_REFERENCE]) print("Inference...") out = model.inference( @@ -218,7 +216,6 @@ out = model.inference( "en", gpt_cond_latent, speaker_embedding, - diffusion_conditioning, temperature=0.7, # Add custom parameters here ) torchaudio.save(OUTPUT_WAV_PATH, torch.tensor(out["wav"]).unsqueeze(0), 24000) From 58cb0d8dd0a67e0d599d264987e518d823a78f46 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Wed, 8 Nov 2023 14:51:42 +0100 Subject: [PATCH 31/67] Remove v1 doc and tests (#3172) * remove v1 in inference.md * remove v1 in README.md * Update test_models.py --- README.md | 2 +- docs/source/inference.md | 4 ++-- tests/zoo_tests/test_models.py | 7 +++---- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 353db7cf..935627e5 100644 --- a/README.md +++ b/README.md @@ -205,7 +205,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu" print(TTS().list_models()) # Init TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1").to(device) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device) # Run TTS # ❗ Since this model is multi-lingual voice cloning model, we must set the target speaker_wav and language diff --git a/docs/source/inference.md b/docs/source/inference.md index b40445ae..611a2445 100644 --- a/docs/source/inference.md +++ b/docs/source/inference.md @@ -124,7 +124,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu" print(TTS().list_models()) # Init TTS -tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1").to(device) +tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device) # Run TTS # ❗ Since this model is multi-lingual voice cloning model, we must set the target speaker_wav and language @@ -231,4 +231,4 @@ api.tts_with_vc_to_file( speaker_wav="target/speaker.wav", file_path="ouptut.wav" ) -``` \ No newline at end of file +``` diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index 79aef5cb..d1c6b67c 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -14,7 +14,6 @@ from TTS.utils.manage import ModelManager MODELS_WITH_SEP_TESTS = [ "tts_models/multilingual/multi-dataset/bark", "tts_models/en/multi-dataset/tortoise-v2", - "tts_models/multilingual/multi-dataset/xtts_v1", "tts_models/multilingual/multi-dataset/xtts_v1.1", "tts_models/multilingual/multi-dataset/xtts_v2", ] @@ -83,14 +82,14 @@ def test_xtts(): if use_gpu: run_cli( "yes | " - f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v1 " + f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v1.1 " f'--text "This is an example." --out_path "{output_path}" --progress_bar False --use_cuda True ' f'--speaker_wav "{speaker_wav}" --language_idx "en"' ) else: run_cli( "yes | " - f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v1 " + f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v1.1 " f'--text "This is an example." --out_path "{output_path}" --progress_bar False ' f'--speaker_wav "{speaker_wav}" --language_idx "en"' ) @@ -104,7 +103,7 @@ def test_xtts_streaming(): speaker_wav = [os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0001.wav")] speaker_wav_2 = os.path.join(get_tests_data_path(), "ljspeech", "wavs", "LJ001-0002.wav") speaker_wav.append(speaker_wav_2) - model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v1") + model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v1.1") config = XttsConfig() config.load_json(os.path.join(model_path, "config.json")) model = Xtts.init_from_config(config) From 46d9c27212939aa54b22f9df842c753de67b1f34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 8 Nov 2023 16:07:56 +0100 Subject: [PATCH 32/67] Update to v0.20.2 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 847e9aef..727d97b9 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.1 +0.20.2 From 66a1e248d03f2cd3cc2ae27b46ebcc9add91a223 Mon Sep 17 00:00:00 2001 From: Gorkem Date: Thu, 9 Nov 2023 18:28:39 +0300 Subject: [PATCH 33/67] torchaudio should use proper backend to load audio (#3179) --- TTS/tts/models/xtts.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 7cc9836a..656a80bc 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -69,12 +69,9 @@ def wav_to_mel_cloning( def load_audio(audiopath, sampling_rate): # better load setting following: https://github.com/faroit/python_audio_loading_benchmark - if audiopath[-4:] == ".mp3": - # it uses torchaudio with sox backend to load mp3 - audio, lsr = torchaudio.backend.sox_io_backend.load(audiopath) - else: - # it uses torchaudio soundfile backend to load all the others data type - audio, lsr = torchaudio.backend.soundfile_backend.load(audiopath) + + # torchaudio should chose proper backend to load audio depending on platform + audio, lsr = torchaudio.load(audiopath) # stereo to mono if needed if audio.size(0) != 1: From 1b9c400bca7bd92d8f2bcee853c8008b7b16834d Mon Sep 17 00:00:00 2001 From: Matthew Boakes Date: Thu, 9 Nov 2023 15:31:03 +0000 Subject: [PATCH 34/67] PyTorch 2.1 Updates (Weight Norm and TorchAudio I/O) (#3176) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Replaced PyTorch weight_norm With parametrizations.weight_norm * TorchAudio: Migrating The I/O Functions To Use The Dispatcher Mechanism * Corrected Code Style --------- Co-authored-by: Eren Gölge --- TTS/tts/layers/delightful_tts/conv_layers.py | 13 +++--- .../layers/delightful_tts/kernel_predictor.py | 23 +++++----- TTS/tts/layers/generic/wavenet.py | 13 +++--- TTS/tts/layers/glow_tts/glow.py | 2 +- TTS/tts/layers/tortoise/vocoder.py | 42 ++++++++++--------- TTS/tts/layers/vits/discriminator.py | 2 +- TTS/tts/layers/xtts/hifigan_decoder.py | 19 +++++---- TTS/tts/models/xtts.py | 3 +- TTS/vc/models/freevc.py | 10 +++-- TTS/vc/modules/freevc/modules.py | 28 ++++++------- TTS/vc/modules/freevc/wavlm/wavlm.py | 2 +- TTS/vocoder/layers/hifigan.py | 23 +++++----- TTS/vocoder/layers/melgan.py | 11 ++--- TTS/vocoder/layers/wavegrad.py | 17 ++++---- TTS/vocoder/models/hifigan_discriminator.py | 4 +- TTS/vocoder/models/hifigan_generator.py | 19 +++++---- TTS/vocoder/models/melgan_discriminator.py | 2 +- TTS/vocoder/models/melgan_generator.py | 4 +- .../models/parallel_wavegan_discriminator.py | 9 ++-- .../models/parallel_wavegan_generator.py | 5 ++- TTS/vocoder/models/univnet_discriminator.py | 3 +- TTS/vocoder/models/univnet_generator.py | 5 ++- TTS/vocoder/models/wavegrad.py | 15 +++---- requirements.txt | 2 +- 24 files changed, 147 insertions(+), 129 deletions(-) diff --git a/TTS/tts/layers/delightful_tts/conv_layers.py b/TTS/tts/layers/delightful_tts/conv_layers.py index 354a0336..fb9aa449 100644 --- a/TTS/tts/layers/delightful_tts/conv_layers.py +++ b/TTS/tts/layers/delightful_tts/conv_layers.py @@ -3,6 +3,7 @@ from typing import Tuple import torch import torch.nn as nn # pylint: disable=consider-using-from-import import torch.nn.functional as F +from torch.nn.utils import parametrize from TTS.tts.layers.delightful_tts.kernel_predictor import KernelPredictor @@ -73,7 +74,7 @@ class ConvNorm(nn.Module): ) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init.calculate_gain(w_init_gain)) if self.use_weight_norm: - self.conv = nn.utils.weight_norm(self.conv) + self.conv = nn.utils.parametrizations.weight_norm(self.conv) def forward(self, signal, mask=None): conv_signal = self.conv(signal) @@ -113,7 +114,7 @@ class ConvLSTMLinear(nn.Module): dilation=1, w_init_gain="relu", ) - conv_layer = nn.utils.weight_norm(conv_layer.conv, name="weight") + conv_layer = nn.utils.parametrizations.weight_norm(conv_layer.conv, name="weight") convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) @@ -567,7 +568,7 @@ class LVCBlock(torch.nn.Module): self.convt_pre = nn.Sequential( nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.ConvTranspose1d( in_channels, in_channels, @@ -584,7 +585,7 @@ class LVCBlock(torch.nn.Module): self.conv_blocks.append( nn.Sequential( nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( in_channels, in_channels, @@ -665,6 +666,6 @@ class LVCBlock(torch.nn.Module): def remove_weight_norm(self): self.kernel_predictor.remove_weight_norm() - nn.utils.remove_weight_norm(self.convt_pre[1]) + parametrize.remove_parametrizations(self.convt_pre[1], "weight") for block in self.conv_blocks: - nn.utils.remove_weight_norm(block[1]) + parametrize.remove_parametrizations(block[1], "weight") diff --git a/TTS/tts/layers/delightful_tts/kernel_predictor.py b/TTS/tts/layers/delightful_tts/kernel_predictor.py index 19dfd57e..96c550b6 100644 --- a/TTS/tts/layers/delightful_tts/kernel_predictor.py +++ b/TTS/tts/layers/delightful_tts/kernel_predictor.py @@ -1,4 +1,5 @@ import torch.nn as nn # pylint: disable=consider-using-from-import +from torch.nn.utils import parametrize class KernelPredictor(nn.Module): @@ -36,7 +37,9 @@ class KernelPredictor(nn.Module): kpnet_bias_channels = conv_out_channels * conv_layers # l_b self.input_conv = nn.Sequential( - nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)), + nn.utils.parametrizations.weight_norm( + nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True) + ), getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), ) @@ -46,7 +49,7 @@ class KernelPredictor(nn.Module): self.residual_convs.append( nn.Sequential( nn.Dropout(kpnet_dropout), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_hidden_channels, @@ -56,7 +59,7 @@ class KernelPredictor(nn.Module): ) ), getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_hidden_channels, @@ -68,7 +71,7 @@ class KernelPredictor(nn.Module): getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), ) ) - self.kernel_conv = nn.utils.weight_norm( + self.kernel_conv = nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_kernel_channels, @@ -77,7 +80,7 @@ class KernelPredictor(nn.Module): bias=True, ) ) - self.bias_conv = nn.utils.weight_norm( + self.bias_conv = nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_bias_channels, @@ -117,9 +120,9 @@ class KernelPredictor(nn.Module): return kernels, bias def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.input_conv[0]) - nn.utils.remove_weight_norm(self.kernel_conv) - nn.utils.remove_weight_norm(self.bias_conv) + parametrize.remove_parametrizations(self.input_conv[0], "weight") + parametrize.remove_parametrizations(self.kernel_conv, "weight") + parametrize.remove_parametrizations(self.bias_conv, "weight") for block in self.residual_convs: - nn.utils.remove_weight_norm(block[1]) - nn.utils.remove_weight_norm(block[3]) + parametrize.remove_parametrizations(block[1], "weight") + parametrize.remove_parametrizations(block[3], "weight") diff --git a/TTS/tts/layers/generic/wavenet.py b/TTS/tts/layers/generic/wavenet.py index bc89da4f..f8de63b4 100644 --- a/TTS/tts/layers/generic/wavenet.py +++ b/TTS/tts/layers/generic/wavenet.py @@ -1,5 +1,6 @@ import torch from torch import nn +from torch.nn.utils import parametrize @torch.jit.script @@ -62,7 +63,7 @@ class WN(torch.nn.Module): # init conditioning layer if c_in_channels > 0: cond_layer = torch.nn.Conv1d(c_in_channels, 2 * hidden_channels * num_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + self.cond_layer = torch.nn.utils.parametrizations.weight_norm(cond_layer, name="weight") # intermediate layers for i in range(num_layers): dilation = dilation_rate**i @@ -75,7 +76,7 @@ class WN(torch.nn.Module): in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + in_layer = torch.nn.utils.parametrizations.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) if i < num_layers - 1: @@ -84,7 +85,7 @@ class WN(torch.nn.Module): res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + res_skip_layer = torch.nn.utils.parametrizations.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) # setup weight norm if not weight_norm: @@ -115,11 +116,11 @@ class WN(torch.nn.Module): def remove_weight_norm(self): if self.c_in_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) + parametrize.remove_parametrizations(self.cond_layer, "weight") for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) + parametrize.remove_parametrizations(l, "weight") for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) + parametrize.remove_parametrizations(l, "weight") class WNBlocks(nn.Module): diff --git a/TTS/tts/layers/glow_tts/glow.py b/TTS/tts/layers/glow_tts/glow.py index 273c62a5..b02c3118 100644 --- a/TTS/tts/layers/glow_tts/glow.py +++ b/TTS/tts/layers/glow_tts/glow.py @@ -186,7 +186,7 @@ class CouplingBlock(nn.Module): self.sigmoid_scale = sigmoid_scale # input layer start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1) - start = torch.nn.utils.weight_norm(start) + start = torch.nn.utils.parametrizations.weight_norm(start) self.start = start # output layer # Initializing last layer to 0 makes the affine coupling layers diff --git a/TTS/tts/layers/tortoise/vocoder.py b/TTS/tts/layers/tortoise/vocoder.py index 47365eb5..a5200c26 100644 --- a/TTS/tts/layers/tortoise/vocoder.py +++ b/TTS/tts/layers/tortoise/vocoder.py @@ -1,4 +1,3 @@ -import json from dataclasses import dataclass from enum import Enum from typing import Callable, Optional @@ -6,6 +5,7 @@ from typing import Callable, Optional import torch import torch.nn as nn import torch.nn.functional as F +import torch.nn.utils.parametrize as parametrize MAX_WAV_VALUE = 32768.0 @@ -44,7 +44,9 @@ class KernelPredictor(torch.nn.Module): kpnet_bias_channels = conv_out_channels * conv_layers # l_b self.input_conv = nn.Sequential( - nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)), + nn.utils.parametrizations.weight_norm( + nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True) + ), getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), ) @@ -54,7 +56,7 @@ class KernelPredictor(torch.nn.Module): self.residual_convs.append( nn.Sequential( nn.Dropout(kpnet_dropout), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_hidden_channels, @@ -64,7 +66,7 @@ class KernelPredictor(torch.nn.Module): ) ), getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_hidden_channels, @@ -76,7 +78,7 @@ class KernelPredictor(torch.nn.Module): getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), ) ) - self.kernel_conv = nn.utils.weight_norm( + self.kernel_conv = nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_kernel_channels, @@ -85,7 +87,7 @@ class KernelPredictor(torch.nn.Module): bias=True, ) ) - self.bias_conv = nn.utils.weight_norm( + self.bias_conv = nn.utils.parametrizations.weight_norm( nn.Conv1d( kpnet_hidden_channels, kpnet_bias_channels, @@ -125,12 +127,12 @@ class KernelPredictor(torch.nn.Module): return kernels, bias def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.input_conv[0]) - nn.utils.remove_weight_norm(self.kernel_conv) - nn.utils.remove_weight_norm(self.bias_conv) + parametrize.remove_parametrizations(self.input_conv[0], "weight") + parametrize.remove_parametrizations(self.kernel_conv, "weight") + parametrize.remove_parametrizations(self.bias_conv) for block in self.residual_convs: - nn.utils.remove_weight_norm(block[1]) - nn.utils.remove_weight_norm(block[3]) + parametrize.remove_parametrizations(block[1], "weight") + parametrize.remove_parametrizations(block[3], "weight") class LVCBlock(torch.nn.Module): @@ -169,7 +171,7 @@ class LVCBlock(torch.nn.Module): self.convt_pre = nn.Sequential( nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.ConvTranspose1d( in_channels, in_channels, @@ -186,7 +188,7 @@ class LVCBlock(torch.nn.Module): self.conv_blocks.append( nn.Sequential( nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm( + nn.utils.parametrizations.weight_norm( nn.Conv1d( in_channels, in_channels, @@ -267,9 +269,9 @@ class LVCBlock(torch.nn.Module): def remove_weight_norm(self): self.kernel_predictor.remove_weight_norm() - nn.utils.remove_weight_norm(self.convt_pre[1]) + parametrize.remove_parametrizations(self.convt_pre[1], "weight") for block in self.conv_blocks: - nn.utils.remove_weight_norm(block[1]) + parametrize.remove_parametrizations(block[1], "weight") class UnivNetGenerator(nn.Module): @@ -314,11 +316,13 @@ class UnivNetGenerator(nn.Module): ) ) - self.conv_pre = nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode="reflect")) + self.conv_pre = nn.utils.parametrizations.weight_norm( + nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode="reflect") + ) self.conv_post = nn.Sequential( nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode="reflect")), + nn.utils.parametrizations.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode="reflect")), nn.Tanh(), ) @@ -346,11 +350,11 @@ class UnivNetGenerator(nn.Module): self.remove_weight_norm() def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.conv_pre) + parametrize.remove_parametrizations(self.conv_pre, "weight") for layer in self.conv_post: if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) + parametrize.remove_parametrizations(layer, "weight") for res_block in self.res_stack: res_block.remove_weight_norm() diff --git a/TTS/tts/layers/vits/discriminator.py b/TTS/tts/layers/vits/discriminator.py index 148f283c..c27d11be 100644 --- a/TTS/tts/layers/vits/discriminator.py +++ b/TTS/tts/layers/vits/discriminator.py @@ -14,7 +14,7 @@ class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super().__init__() - norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.weight_norm + norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.parametrizations.weight_norm self.convs = nn.ModuleList( [ norm_f(Conv1d(1, 16, 15, 1, padding=7)), diff --git a/TTS/tts/layers/xtts/hifigan_decoder.py b/TTS/tts/layers/xtts/hifigan_decoder.py index 5fcff870..9add7826 100644 --- a/TTS/tts/layers/xtts/hifigan_decoder.py +++ b/TTS/tts/layers/xtts/hifigan_decoder.py @@ -3,7 +3,8 @@ import torchaudio from torch import nn from torch.nn import Conv1d, ConvTranspose1d from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations from TTS.utils.io import load_fsspec @@ -120,9 +121,9 @@ class ResBlock1(torch.nn.Module): def remove_weight_norm(self): for l in self.convs1: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.convs2: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class ResBlock2(torch.nn.Module): @@ -176,7 +177,7 @@ class ResBlock2(torch.nn.Module): def remove_weight_norm(self): for l in self.convs: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class HifiganGenerator(torch.nn.Module): @@ -251,10 +252,10 @@ class HifiganGenerator(torch.nn.Module): self.cond_layer = nn.Conv1d(cond_channels, upsample_initial_channel, 1) if not conv_pre_weight_norm: - remove_weight_norm(self.conv_pre) + remove_parametrizations(self.conv_pre, "weight") if not conv_post_weight_norm: - remove_weight_norm(self.conv_post) + remove_parametrizations(self.conv_post, "weight") if self.cond_in_each_up_layer: self.conds = nn.ModuleList() @@ -317,11 +318,11 @@ class HifiganGenerator(torch.nn.Module): def remove_weight_norm(self): print("Removing weight norm...") for l in self.ups: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.resblocks: l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) + remove_parametrizations(self.conv_pre, "weight") + remove_parametrizations(self.conv_post, "weight") def load_checkpoint( self, config, checkpoint_path, eval=False, cache=False diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 656a80bc..f41bcfb9 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -1,5 +1,4 @@ import os -from contextlib import contextmanager from dataclasses import dataclass import librosa @@ -8,7 +7,7 @@ import torch.nn.functional as F import torchaudio from coqpit import Coqpit -from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, wav_to_univnet_mel +from TTS.tts.layers.tortoise.audio_utils import wav_to_univnet_mel from TTS.tts.layers.xtts.gpt import GPT from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder from TTS.tts.layers.xtts.stream_generator import init_stream_support diff --git a/TTS/vc/models/freevc.py b/TTS/vc/models/freevc.py index fd53a77f..8bb99892 100644 --- a/TTS/vc/models/freevc.py +++ b/TTS/vc/models/freevc.py @@ -5,9 +5,11 @@ import numpy as np import torch from coqpit import Coqpit from torch import nn -from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d +from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm +from torch.nn.utils import spectral_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations import TTS.vc.modules.freevc.commons as commons import TTS.vc.modules.freevc.modules as modules @@ -152,9 +154,9 @@ class Generator(torch.nn.Module): def remove_weight_norm(self): print("Removing weight norm...") for l in self.ups: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.resblocks: - l.remove_weight_norm() + remove_parametrizations(l, "weight") class DiscriminatorP(torch.nn.Module): diff --git a/TTS/vc/modules/freevc/modules.py b/TTS/vc/modules/freevc/modules.py index 0503a13c..9bb54990 100644 --- a/TTS/vc/modules/freevc/modules.py +++ b/TTS/vc/modules/freevc/modules.py @@ -1,13 +1,9 @@ -import copy -import math - -import numpy as np -import scipy import torch from torch import nn -from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d +from torch.nn import Conv1d from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations import TTS.vc.modules.freevc.commons as commons from TTS.vc.modules.freevc.commons import get_padding, init_weights @@ -122,7 +118,7 @@ class WN(torch.nn.Module): if gin_channels != 0: cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") + self.cond_layer = torch.nn.utils.parametrizations.weight_norm(cond_layer, name="weight") for i in range(n_layers): dilation = dilation_rate**i @@ -130,7 +126,7 @@ class WN(torch.nn.Module): in_layer = torch.nn.Conv1d( hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") + in_layer = torch.nn.utils.parametrizations.weight_norm(in_layer, name="weight") self.in_layers.append(in_layer) # last one is not necessary @@ -140,7 +136,7 @@ class WN(torch.nn.Module): res_skip_channels = hidden_channels res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") + res_skip_layer = torch.nn.utils.parametrizations.weight_norm(res_skip_layer, name="weight") self.res_skip_layers.append(res_skip_layer) def forward(self, x, x_mask, g=None, **kwargs): @@ -172,11 +168,11 @@ class WN(torch.nn.Module): def remove_weight_norm(self): if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) + remove_parametrizations(self.cond_layer, "weight") for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) + remove_parametrizations(l, "weight") class ResBlock1(torch.nn.Module): @@ -250,9 +246,9 @@ class ResBlock1(torch.nn.Module): def remove_weight_norm(self): for l in self.convs1: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.convs2: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class ResBlock2(torch.nn.Module): @@ -297,7 +293,7 @@ class ResBlock2(torch.nn.Module): def remove_weight_norm(self): for l in self.convs: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class Log(nn.Module): diff --git a/TTS/vc/modules/freevc/wavlm/wavlm.py b/TTS/vc/modules/freevc/wavlm/wavlm.py index 7efb11bf..fc93bd4f 100644 --- a/TTS/vc/modules/freevc/wavlm/wavlm.py +++ b/TTS/vc/modules/freevc/wavlm/wavlm.py @@ -497,7 +497,7 @@ class TransformerEncoder(nn.Module): nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) - self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2) + self.pos_conv = nn.utils.parametrizations.weight_norm(self.pos_conv, name="weight", dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) if hasattr(args, "relative_position_embedding"): diff --git a/TTS/vocoder/layers/hifigan.py b/TTS/vocoder/layers/hifigan.py index f5120072..8dd75133 100644 --- a/TTS/vocoder/layers/hifigan.py +++ b/TTS/vocoder/layers/hifigan.py @@ -1,4 +1,5 @@ from torch import nn +from torch.nn.utils.parametrize import remove_parametrizations # pylint: disable=dangerous-default-value @@ -10,14 +11,16 @@ class ResStack(nn.Module): resstack += [ nn.LeakyReLU(0.2), nn.ReflectionPad1d(dilation), - nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=kernel, dilation=dilation)), + nn.utils.parametrizations.weight_norm( + nn.Conv1d(channel, channel, kernel_size=kernel, dilation=dilation) + ), nn.LeakyReLU(0.2), nn.ReflectionPad1d(padding), - nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)), + nn.utils.parametrizations.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)), ] self.resstack = nn.Sequential(*resstack) - self.shortcut = nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)) + self.shortcut = nn.utils.parametrizations.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)) def forward(self, x): x1 = self.shortcut(x) @@ -25,13 +28,13 @@ class ResStack(nn.Module): return x1 + x2 def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.shortcut) - nn.utils.remove_weight_norm(self.resstack[2]) - nn.utils.remove_weight_norm(self.resstack[5]) - nn.utils.remove_weight_norm(self.resstack[8]) - nn.utils.remove_weight_norm(self.resstack[11]) - nn.utils.remove_weight_norm(self.resstack[14]) - nn.utils.remove_weight_norm(self.resstack[17]) + remove_parametrizations(self.shortcut, "weight") + remove_parametrizations(self.resstack[2], "weight") + remove_parametrizations(self.resstack[5], "weight") + remove_parametrizations(self.resstack[8], "weight") + remove_parametrizations(self.resstack[11], "weight") + remove_parametrizations(self.resstack[14], "weight") + remove_parametrizations(self.resstack[17], "weight") class MRF(nn.Module): diff --git a/TTS/vocoder/layers/melgan.py b/TTS/vocoder/layers/melgan.py index 4bb328e9..7ad41a0f 100644 --- a/TTS/vocoder/layers/melgan.py +++ b/TTS/vocoder/layers/melgan.py @@ -1,5 +1,6 @@ from torch import nn -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations class ResidualStack(nn.Module): @@ -27,7 +28,7 @@ class ResidualStack(nn.Module): ] self.shortcuts = nn.ModuleList( - [weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)) for i in range(num_res_blocks)] + [weight_norm(nn.Conv1d(channels, channels, kernel_size=1, bias=True)) for _ in range(num_res_blocks)] ) def forward(self, x): @@ -37,6 +38,6 @@ class ResidualStack(nn.Module): def remove_weight_norm(self): for block, shortcut in zip(self.blocks, self.shortcuts): - nn.utils.remove_weight_norm(block[2]) - nn.utils.remove_weight_norm(block[4]) - nn.utils.remove_weight_norm(shortcut) + remove_parametrizations(block[2], "weight") + remove_parametrizations(block[4], "weight") + remove_parametrizations(shortcut, "weight") diff --git a/TTS/vocoder/layers/wavegrad.py b/TTS/vocoder/layers/wavegrad.py index 24b905f9..9f1512c6 100644 --- a/TTS/vocoder/layers/wavegrad.py +++ b/TTS/vocoder/layers/wavegrad.py @@ -1,7 +1,8 @@ import torch import torch.nn.functional as F from torch import nn -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations class Conv1d(nn.Conv1d): @@ -56,8 +57,8 @@ class FiLM(nn.Module): return shift, scale def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.input_conv) - nn.utils.remove_weight_norm(self.output_conv) + remove_parametrizations(self.input_conv, "weight") + remove_parametrizations(self.output_conv, "weight") def apply_weight_norm(self): self.input_conv = weight_norm(self.input_conv) @@ -111,13 +112,13 @@ class UBlock(nn.Module): return o def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.res_block) + remove_parametrizations(self.res_block, "weight") for _, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") for _, layer in enumerate(self.out_block): if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") def apply_weight_norm(self): self.res_block = weight_norm(self.res_block) @@ -153,10 +154,10 @@ class DBlock(nn.Module): return o + res def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.res_block) + remove_parametrizations(self.res_block, "weight") for _, layer in enumerate(self.main_block): if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") def apply_weight_norm(self): self.res_block = weight_norm(self.res_block) diff --git a/TTS/vocoder/models/hifigan_discriminator.py b/TTS/vocoder/models/hifigan_discriminator.py index ca5eaf40..7447a5fb 100644 --- a/TTS/vocoder/models/hifigan_discriminator.py +++ b/TTS/vocoder/models/hifigan_discriminator.py @@ -30,7 +30,7 @@ class DiscriminatorP(torch.nn.Module): super().__init__() self.period = period get_padding = lambda k, d: int((k * d - d) / 2) - norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.weight_norm + norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.parametrizations.weight_norm self.convs = nn.ModuleList( [ norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), @@ -125,7 +125,7 @@ class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super().__init__() - norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.weight_norm + norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.parametrizations.weight_norm self.convs = nn.ModuleList( [ norm_f(nn.Conv1d(1, 128, 15, 1, padding=7)), diff --git a/TTS/vocoder/models/hifigan_generator.py b/TTS/vocoder/models/hifigan_generator.py index 4916d1e6..92475322 100644 --- a/TTS/vocoder/models/hifigan_generator.py +++ b/TTS/vocoder/models/hifigan_generator.py @@ -3,7 +3,8 @@ import torch from torch import nn from torch.nn import Conv1d, ConvTranspose1d from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations from TTS.utils.io import load_fsspec @@ -99,9 +100,9 @@ class ResBlock1(torch.nn.Module): def remove_weight_norm(self): for l in self.convs1: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.convs2: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class ResBlock2(torch.nn.Module): @@ -155,7 +156,7 @@ class ResBlock2(torch.nn.Module): def remove_weight_norm(self): for l in self.convs: - remove_weight_norm(l) + remove_parametrizations(l, "weight") class HifiganGenerator(torch.nn.Module): @@ -227,10 +228,10 @@ class HifiganGenerator(torch.nn.Module): self.cond_layer = nn.Conv1d(cond_channels, upsample_initial_channel, 1) if not conv_pre_weight_norm: - remove_weight_norm(self.conv_pre) + remove_parametrizations(self.conv_pre, "weight") if not conv_post_weight_norm: - remove_weight_norm(self.conv_post) + remove_parametrizations(self.conv_post, "weight") def forward(self, x, g=None): """ @@ -283,11 +284,11 @@ class HifiganGenerator(torch.nn.Module): def remove_weight_norm(self): print("Removing weight norm...") for l in self.ups: - remove_weight_norm(l) + remove_parametrizations(l, "weight") for l in self.resblocks: l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) + remove_parametrizations(self.conv_pre, "weight") + remove_parametrizations(self.conv_post, "weight") def load_checkpoint( self, config, checkpoint_path, eval=False, cache=False diff --git a/TTS/vocoder/models/melgan_discriminator.py b/TTS/vocoder/models/melgan_discriminator.py index 14f00c59..e41467da 100644 --- a/TTS/vocoder/models/melgan_discriminator.py +++ b/TTS/vocoder/models/melgan_discriminator.py @@ -1,6 +1,6 @@ import numpy as np from torch import nn -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm class MelganDiscriminator(nn.Module): diff --git a/TTS/vocoder/models/melgan_generator.py b/TTS/vocoder/models/melgan_generator.py index 989797f0..bb3fee78 100644 --- a/TTS/vocoder/models/melgan_generator.py +++ b/TTS/vocoder/models/melgan_generator.py @@ -1,6 +1,6 @@ import torch from torch import nn -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm from TTS.utils.io import load_fsspec from TTS.vocoder.layers.melgan import ResidualStack @@ -80,7 +80,7 @@ class MelganGenerator(nn.Module): for _, layer in enumerate(self.layers): if len(layer.state_dict()) != 0: try: - nn.utils.remove_weight_norm(layer) + nn.utils.parametrize.remove_parametrizations(layer, "weight") except ValueError: layer.remove_weight_norm() diff --git a/TTS/vocoder/models/parallel_wavegan_discriminator.py b/TTS/vocoder/models/parallel_wavegan_discriminator.py index adf1bdae..d02af75f 100644 --- a/TTS/vocoder/models/parallel_wavegan_discriminator.py +++ b/TTS/vocoder/models/parallel_wavegan_discriminator.py @@ -2,6 +2,7 @@ import math import torch from torch import nn +from torch.nn.utils.parametrize import remove_parametrizations from TTS.vocoder.layers.parallel_wavegan import ResidualBlock @@ -68,7 +69,7 @@ class ParallelWaveganDiscriminator(nn.Module): def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): - torch.nn.utils.weight_norm(m) + torch.nn.utils.parametrizations.weight_norm(m) self.apply(_apply_weight_norm) @@ -76,7 +77,7 @@ class ParallelWaveganDiscriminator(nn.Module): def _remove_weight_norm(m): try: # print(f"Weight norm is removed from {m}.") - nn.utils.remove_weight_norm(m) + remove_parametrizations(m, "weight") except ValueError: # this module didn't have weight norm return @@ -171,7 +172,7 @@ class ResidualParallelWaveganDiscriminator(nn.Module): def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): - torch.nn.utils.weight_norm(m) + torch.nn.utils.parametrizations.weight_norm(m) self.apply(_apply_weight_norm) @@ -179,7 +180,7 @@ class ResidualParallelWaveganDiscriminator(nn.Module): def _remove_weight_norm(m): try: print(f"Weight norm is removed from {m}.") - nn.utils.remove_weight_norm(m) + remove_parametrizations(m, "weight") except ValueError: # this module didn't have weight norm return diff --git a/TTS/vocoder/models/parallel_wavegan_generator.py b/TTS/vocoder/models/parallel_wavegan_generator.py index 5587fb72..8338d946 100644 --- a/TTS/vocoder/models/parallel_wavegan_generator.py +++ b/TTS/vocoder/models/parallel_wavegan_generator.py @@ -2,6 +2,7 @@ import math import numpy as np import torch +from torch.nn.utils.parametrize import remove_parametrizations from TTS.utils.io import load_fsspec from TTS.vocoder.layers.parallel_wavegan import ResidualBlock @@ -126,7 +127,7 @@ class ParallelWaveganGenerator(torch.nn.Module): def _remove_weight_norm(m): try: # print(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) + remove_parametrizations(m, "weight") except ValueError: # this module didn't have weight norm return @@ -135,7 +136,7 @@ class ParallelWaveganGenerator(torch.nn.Module): def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): - torch.nn.utils.weight_norm(m) + torch.nn.utils.parametrizations.weight_norm(m) # print(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm) diff --git a/TTS/vocoder/models/univnet_discriminator.py b/TTS/vocoder/models/univnet_discriminator.py index 4c09520c..497d67ac 100644 --- a/TTS/vocoder/models/univnet_discriminator.py +++ b/TTS/vocoder/models/univnet_discriminator.py @@ -1,7 +1,8 @@ import torch import torch.nn.functional as F from torch import nn -from torch.nn.utils import spectral_norm, weight_norm +from torch.nn.utils import spectral_norm +from torch.nn.utils.parametrizations import weight_norm from TTS.utils.audio.torch_transforms import TorchSTFT from TTS.vocoder.models.hifigan_discriminator import MultiPeriodDiscriminator diff --git a/TTS/vocoder/models/univnet_generator.py b/TTS/vocoder/models/univnet_generator.py index 2ee28c7b..5e66b70d 100644 --- a/TTS/vocoder/models/univnet_generator.py +++ b/TTS/vocoder/models/univnet_generator.py @@ -3,6 +3,7 @@ from typing import List import numpy as np import torch import torch.nn.functional as F +from torch.nn.utils import parametrize from TTS.vocoder.layers.lvc_block import LVCBlock @@ -113,7 +114,7 @@ class UnivnetGenerator(torch.nn.Module): def _remove_weight_norm(m): try: # print(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) + parametrize.remove_parametrizations(m, "weight") except ValueError: # this module didn't have weight norm return @@ -124,7 +125,7 @@ class UnivnetGenerator(torch.nn.Module): def _apply_weight_norm(m): if isinstance(m, (torch.nn.Conv1d, torch.nn.Conv2d)): - torch.nn.utils.weight_norm(m) + torch.nn.utils.parametrizations.weight_norm(m) # print(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm) diff --git a/TTS/vocoder/models/wavegrad.py b/TTS/vocoder/models/wavegrad.py index a0f9221a..c1166e09 100644 --- a/TTS/vocoder/models/wavegrad.py +++ b/TTS/vocoder/models/wavegrad.py @@ -5,7 +5,8 @@ import numpy as np import torch from coqpit import Coqpit from torch import nn -from torch.nn.utils import weight_norm +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from trainer.trainer_utils import get_optimizer, get_scheduler @@ -178,27 +179,27 @@ class Wavegrad(BaseVocoder): for _, layer in enumerate(self.dblocks): if len(layer.state_dict()) != 0: try: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") except ValueError: layer.remove_weight_norm() for _, layer in enumerate(self.film): if len(layer.state_dict()) != 0: try: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") except ValueError: layer.remove_weight_norm() for _, layer in enumerate(self.ublocks): if len(layer.state_dict()) != 0: try: - nn.utils.remove_weight_norm(layer) + remove_parametrizations(layer, "weight") except ValueError: layer.remove_weight_norm() - nn.utils.remove_weight_norm(self.x_conv) - nn.utils.remove_weight_norm(self.out_conv) - nn.utils.remove_weight_norm(self.y_conv) + remove_parametrizations(self.x_conv, "weight") + remove_parametrizations(self.out_conv, "weight") + remove_parametrizations(self.y_conv, "weight") def apply_weight_norm(self): for _, layer in enumerate(self.dblocks): diff --git a/requirements.txt b/requirements.txt index 04343c84..53e8af59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ numpy==1.22.0;python_version<="3.10" numpy==1.24.3;python_version>"3.10" cython==0.29.30 scipy>=1.11.2 -torch>=1.7 +torch>=2.1 torchaudio soundfile==0.12.* librosa==0.10.* From a8e9163fb30324624765e4dcb3244cd7e2bbeb05 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 9 Nov 2023 17:32:12 +0200 Subject: [PATCH 35/67] xtts/tokenizer: merge duplicate implementations of preprocess_text (#3170) This was found via ruff: > F811 Redefinition of unused `preprocess_text` from line 570 --- TTS/tts/layers/xtts/tokenizer.py | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 4c7ae6e3..edb09042 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -568,14 +568,16 @@ class VoiceBpeTokenizer: print(f"[!] Warning: The text length exceeds the character limit of {limit} for language '{lang}', this might cause truncated audio.") def preprocess_text(self, txt, lang): - if lang in ["en", "es", "fr", "de", "pt", "it", "pl", "ar", "cs", "ru", "nl", "tr", "zh-cn"]: + if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh", "zh-cn"}: txt = multilingual_cleaners(txt, lang) - if lang == "zh-cn": + if lang in {"zh", "zh-cn"}: txt = chinese_transliterate(txt) elif lang == "ja": txt = japanese_cleaners(txt, self.katsu) + elif lang == "ko": + txt = korean_cleaners(txt) else: - raise NotImplementedError() + raise NotImplementedError(f"Language '{lang}' is not supported.") return txt def encode(self, txt, lang): @@ -594,23 +596,6 @@ class VoiceBpeTokenizer: txt = txt.replace("[UNK]", "") return txt - def preprocess_text(self, txt, lang): - if lang in ["en", "es", "fr", "de", "pt", "it", "pl", "zh", "ar", "cs", "ru", "nl", "tr", "hu"]: - txt = multilingual_cleaners(txt, lang) - elif lang == "ja": - if self.katsu is None: - import cutlet - - self.katsu = cutlet.Cutlet() - txt = japanese_cleaners(txt, self.katsu) - elif lang == "zh-cn" or lang == "zh": - txt = chinese_transliterate(txt) - elif lang == "ko": - txt = korean_cleaners(txt) - else: - raise NotImplementedError() - return txt - def __len__(self): return self.tokenizer.get_vocab_size() From 3b1e7038bc36abfa62ae4e4299c9df85bcfa5fdd Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 9 Nov 2023 16:49:52 +0100 Subject: [PATCH 36/67] fix(formatters): set missing root_path attribute (#3182) Fixes #2778 --- TTS/tts/datasets/formatters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TTS/tts/datasets/formatters.py b/TTS/tts/datasets/formatters.py index fbf6881f..053444b0 100644 --- a/TTS/tts/datasets/formatters.py +++ b/TTS/tts/datasets/formatters.py @@ -280,7 +280,7 @@ def css10(root_path, meta_file, **kwargs): # pylint: disable=unused-argument cols = line.split("|") wav_file = os.path.join(root_path, cols[0]) text = cols[1] - items.append({"text": text, "audio_file": wav_file, "speaker_name": speaker_name}) + items.append({"text": text, "audio_file": wav_file, "speaker_name": speaker_name, "root_path": root_path}) return items @@ -294,7 +294,7 @@ def nancy(root_path, meta_file, **kwargs): # pylint: disable=unused-argument utt_id = line.split()[1] text = line[line.find('"') + 1 : line.rfind('"') - 1] wav_file = os.path.join(root_path, "wavn", utt_id + ".wav") - items.append({"text": text, "audio_file": wav_file, "speaker_name": speaker_name}) + items.append({"text": text, "audio_file": wav_file, "speaker_name": speaker_name, "root_path": root_path}) return items From 6f1cba2f81de6c97e81c0b2030b631e588047968 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 9 Nov 2023 17:41:37 +0100 Subject: [PATCH 37/67] Update to v0.20.3 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 727d97b9..144996ed 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.2 +0.20.3 From a16360af859732eedcb6c0faaa1a57081c33c9be Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 13 Nov 2023 13:00:08 +0100 Subject: [PATCH 38/67] Implement chunking gpt_cond --- TTS/tts/configs/xtts_config.py | 10 +++- TTS/tts/models/xtts.py | 101 +++++++++++++++++++++++---------- 2 files changed, 78 insertions(+), 33 deletions(-) diff --git a/TTS/tts/configs/xtts_config.py b/TTS/tts/configs/xtts_config.py index 2d3edaf4..e8ab07da 100644 --- a/TTS/tts/configs/xtts_config.py +++ b/TTS/tts/configs/xtts_config.py @@ -43,7 +43,12 @@ class XttsConfig(BaseTTSConfig): Defaults to `16`. gpt_cond_len (int): - Secs audio to be used as conditioning for the autoregressive model. Defaults to `3`. + Secs audio to be used as conditioning for the autoregressive model. Defaults to `12`. + + gpt_cond_chunk_len (int): + Audio chunk size in secs. Audio is split into chunks and latents are extracted for each chunk. Then the + latents are averaged. Chunking improves the stability. It must be <= gpt_cond_len. + If gpt_cond_len == gpt_cond_chunk_len, no chunking. Defaults to `4`. max_ref_len (int): Maximum number of seconds of audio to be used as conditioning for the decoder. Defaults to `10`. @@ -95,6 +100,7 @@ class XttsConfig(BaseTTSConfig): num_gpt_outputs: int = 1 # cloning - gpt_cond_len: int = 3 + gpt_cond_len: int = 12 + gpt_cond_chunk_len: int = 4 max_ref_len: int = 10 sound_norm_refs: bool = False diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index f41bcfb9..0f79ad69 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -255,39 +255,57 @@ class Xtts(BaseTTS): return next(self.parameters()).device @torch.inference_mode() - def get_gpt_cond_latents(self, audio, sr, length: int = 3): + def get_gpt_cond_latents(self, audio, sr, length: int = 30, chunk_length: int = 6): """Compute the conditioning latents for the GPT model from the given audio. Args: audio (tensor): audio tensor. sr (int): Sample rate of the audio. - length (int): Length of the audio in seconds. Defaults to 3. + length (int): Length of the audio in seconds. If < 0, use the whole audio. Defaults to 30. + chunk_length (int): Length of the audio chunks in seconds. When `length == chunk_length`, the whole audio + is being used without chunking. It must be < `length`. Defaults to 6. """ if sr != 22050: audio = torchaudio.functional.resample(audio, sr, 22050) - audio = audio[:, : 22050 * length] + if length > 0: + audio = audio[:, : 22050 * length] if self.args.gpt_use_perceiver_resampler: - n_fft = 2048 - hop_length = 256 - win_length = 1024 + style_embs = [] + for i in range(0, audio.shape[1], 22050 * chunk_length): + audio_chunk = audio[:, i : i + 22050 * chunk_length] + mel_chunk = wav_to_mel_cloning( + audio_chunk, + mel_norms=self.mel_stats.cpu(), + n_fft=2048, + hop_length=256, + win_length=1024, + power=2, + normalized=False, + sample_rate=22050, + f_min=0, + f_max=8000, + n_mels=80, + ) + style_emb = self.gpt.get_style_emb(mel_chunk.to(self.device), None) + style_embs.append(style_emb) + + # mean style embedding + cond_latent = torch.stack(style_embs).mean(dim=0) else: - n_fft = 4096 - hop_length = 1024 - win_length = 4096 - mel = wav_to_mel_cloning( - audio, - mel_norms=self.mel_stats.cpu(), - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - power=2, - normalized=False, - sample_rate=22050, - f_min=0, - f_max=8000, - n_mels=80, - ) - cond_latent = self.gpt.get_style_emb(mel.to(self.device)) + mel = wav_to_mel_cloning( + audio, + mel_norms=self.mel_stats.cpu(), + n_fft=4096, + hop_length=1024, + win_length=4096, + power=2, + normalized=False, + sample_rate=22050, + f_min=0, + f_max=8000, + n_mels=80, + ) + cond_latent = self.gpt.get_style_emb(mel.to(self.device)) return cond_latent.transpose(1, 2) @torch.inference_mode() @@ -323,12 +341,24 @@ class Xtts(BaseTTS): def get_conditioning_latents( self, audio_path, + max_ref_length=30, gpt_cond_len=6, - max_ref_length=10, + gpt_cond_chunk_len=6, librosa_trim_db=None, sound_norm_refs=False, - load_sr=24000, + load_sr=22050, ): + """Get the conditioning latents for the GPT model from the given audio. + + Args: + audio_path (str or List[str]): Path to reference audio file(s). + max_ref_length (int): Maximum length of each reference audio in seconds. Defaults to 30. + gpt_cond_len (int): Length of the audio used for gpt latents. Defaults to 6. + gpt_cond_chunk_len (int): Chunk length used for gpt latents. It must be <= gpt_conf_len. Defaults to 6. + librosa_trim_db (int, optional): Trim the audio using this value. If None, not trimming. Defaults to None. + sound_norm_refs (bool, optional): Whether to normalize the audio. Defaults to False. + load_sr (int, optional): Sample rate to load the audio. Defaults to 24000. + """ # deal with multiples references if not isinstance(audio_path, list): audio_paths = [audio_path] @@ -349,14 +379,17 @@ class Xtts(BaseTTS): if librosa_trim_db is not None: audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] + # compute latents for the decoder speaker_embedding = self.get_speaker_embedding(audio, load_sr) speaker_embeddings.append(speaker_embedding) audios.append(audio) - # use a merge of all references for gpt cond latents + # merge all the audios and compute the latents for the gpt full_audio = torch.cat(audios, dim=-1) - gpt_cond_latents = self.get_gpt_cond_latents(full_audio, load_sr, length=gpt_cond_len) # [1, 1024, T] + gpt_cond_latents = self.get_gpt_cond_latents( + full_audio, load_sr, length=gpt_cond_len, chunk_length=gpt_cond_chunk_len + ) # [1, 1024, T] if speaker_embeddings: speaker_embedding = torch.stack(speaker_embeddings) @@ -397,6 +430,7 @@ class Xtts(BaseTTS): "top_k": config.top_k, "top_p": config.top_p, "gpt_cond_len": config.gpt_cond_len, + "gpt_cond_chunk_len": config.gpt_cond_chunk_len, "max_ref_len": config.max_ref_len, "sound_norm_refs": config.sound_norm_refs, } @@ -417,7 +451,8 @@ class Xtts(BaseTTS): top_p=0.85, do_sample=True, # Cloning - gpt_cond_len=6, + gpt_cond_len=30, + gpt_cond_chunk_len=6, max_ref_len=10, sound_norm_refs=False, **hf_generate_kwargs, @@ -448,7 +483,10 @@ class Xtts(BaseTTS): (aka boring) outputs. Defaults to 0.8. gpt_cond_len: (int) Length of the audio used for cloning. If audio is shorter, then audio length is used - else the first `gpt_cond_len` secs is used. Defaults to 6 seconds. + else the first `gpt_cond_len` secs is used. Defaults to 30 seconds. + + gpt_cond_chunk_len: (int) Chunk length used for cloning. It must be <= `gpt_cond_len`. + If gpt_cond_len == gpt_cond_chunk_len, no chunking. Defaults to 6 seconds. hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive transformer. Extra keyword args fed to this function get forwarded directly to that API. Documentation @@ -461,6 +499,7 @@ class Xtts(BaseTTS): (gpt_cond_latent, speaker_embedding) = self.get_conditioning_latents( audio_path=ref_audio_path, gpt_cond_len=gpt_cond_len, + gpt_cond_chunk_len=gpt_cond_chunk_len, max_ref_length=max_ref_len, sound_norm_refs=sound_norm_refs, ) @@ -566,7 +605,7 @@ class Xtts(BaseTTS): if overlap_len > len(wav_chunk): # wav_chunk is smaller than overlap_len, pass on last wav_gen if wav_gen_prev is not None: - wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len):] + wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len) :] else: # not expecting will hit here as problem happens on last chunk wav_chunk = wav_gen[-overlap_len:] @@ -576,7 +615,7 @@ class Xtts(BaseTTS): crossfade_wav = crossfade_wav * torch.linspace(0.0, 1.0, overlap_len).to(crossfade_wav.device) wav_chunk[:overlap_len] = wav_overlap * torch.linspace(1.0, 0.0, overlap_len).to(wav_overlap.device) wav_chunk[:overlap_len] += crossfade_wav - + wav_overlap = wav_gen[-overlap_len:] wav_gen_prev = wav_gen return wav_chunk, wav_gen_prev, wav_overlap From b2682d39c5dd584bb30f6650e3a5b18e27cccf5b Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 13 Nov 2023 13:01:01 +0100 Subject: [PATCH 39/67] Make style --- TTS/cs_api.py | 9 +++++--- TTS/tts/layers/tortoise/dpm_solver.py | 23 +++++++++++++++----- TTS/tts/layers/xtts/tokenizer.py | 11 ++++++---- TTS/tts/layers/xtts/trainer/dataset.py | 1 + tests/xtts_tests/test_xtts_gpt_train.py | 4 +++- tests/xtts_tests/test_xtts_v2-0_gpt_train.py | 4 +++- 6 files changed, 37 insertions(+), 15 deletions(-) diff --git a/TTS/cs_api.py b/TTS/cs_api.py index c45f9d08..9dc6c30d 100644 --- a/TTS/cs_api.py +++ b/TTS/cs_api.py @@ -82,7 +82,6 @@ class CS_API: }, } - SUPPORTED_LANGUAGES = ["en", "es", "de", "fr", "it", "pt", "pl", "tr", "ru", "nl", "cs", "ar", "zh-cn", "ja"] def __init__(self, api_token=None, model="XTTS"): @@ -308,7 +307,11 @@ if __name__ == "__main__": print(api.list_speakers_as_tts_models()) ts = time.time() - wav, sr = api.tts("It took me quite a long time to develop a voice.", language="en", speaker_name=api.speakers[0].name) + wav, sr = api.tts( + "It took me quite a long time to develop a voice.", language="en", speaker_name=api.speakers[0].name + ) print(f" [i] XTTS took {time.time() - ts:.2f}s") - filepath = api.tts_to_file(text="Hello world!", speaker_name=api.speakers[0].name, language="en", file_path="output.wav") + filepath = api.tts_to_file( + text="Hello world!", speaker_name=api.speakers[0].name, language="en", file_path="output.wav" + ) diff --git a/TTS/tts/layers/tortoise/dpm_solver.py b/TTS/tts/layers/tortoise/dpm_solver.py index 2166eebb..c70888df 100644 --- a/TTS/tts/layers/tortoise/dpm_solver.py +++ b/TTS/tts/layers/tortoise/dpm_solver.py @@ -562,15 +562,21 @@ class DPM_Solver: if order == 3: K = steps // 3 + 1 if steps % 3 == 0: - orders = [3,] * ( + orders = [ + 3, + ] * ( K - 2 ) + [2, 1] elif steps % 3 == 1: - orders = [3,] * ( + orders = [ + 3, + ] * ( K - 1 ) + [1] else: - orders = [3,] * ( + orders = [ + 3, + ] * ( K - 1 ) + [2] elif order == 2: @@ -581,7 +587,9 @@ class DPM_Solver: ] * K else: K = steps // 2 + 1 - orders = [2,] * ( + orders = [ + 2, + ] * ( K - 1 ) + [1] elif order == 1: @@ -1440,7 +1448,10 @@ class DPM_Solver: model_prev_list[-1] = self.model_fn(x, t) elif method in ["singlestep", "singlestep_fixed"]: if method == "singlestep": - (timesteps_outer, orders,) = self.get_orders_and_timesteps_for_singlestep_solver( + ( + timesteps_outer, + orders, + ) = self.get_orders_and_timesteps_for_singlestep_solver( steps=steps, order=order, skip_type=skip_type, @@ -1548,4 +1559,4 @@ def expand_dims(v, dims): Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. """ - return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file + return v[(...,) + (None,) * (dims - 1)] diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index edb09042..211d0a93 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -1,6 +1,7 @@ import json import os import re +from functools import cached_property import pypinyin import torch @@ -8,7 +9,6 @@ from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words from tokenizers import Tokenizer -from functools import cached_property from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words @@ -560,19 +560,22 @@ class VoiceBpeTokenizer: @cached_property def katsu(self): import cutlet + return cutlet.Cutlet() - + def check_input_length(self, txt, lang): limit = self.char_limits.get(lang, 250) if len(txt) > limit: - print(f"[!] Warning: The text length exceeds the character limit of {limit} for language '{lang}', this might cause truncated audio.") + print( + f"[!] Warning: The text length exceeds the character limit of {limit} for language '{lang}', this might cause truncated audio." + ) def preprocess_text(self, txt, lang): if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh", "zh-cn"}: txt = multilingual_cleaners(txt, lang) if lang in {"zh", "zh-cn"}: txt = chinese_transliterate(txt) - elif lang == "ja": + elif lang == "ja": txt = japanese_cleaners(txt, self.katsu) elif lang == "ko": txt = korean_cleaners(txt) diff --git a/TTS/tts/layers/xtts/trainer/dataset.py b/TTS/tts/layers/xtts/trainer/dataset.py index 8cb90ad0..2f958cb5 100644 --- a/TTS/tts/layers/xtts/trainer/dataset.py +++ b/TTS/tts/layers/xtts/trainer/dataset.py @@ -5,6 +5,7 @@ import sys import torch import torch.nn.functional as F import torch.utils.data + from TTS.tts.models.xtts import load_audio torch.set_num_threads(1) diff --git a/tests/xtts_tests/test_xtts_gpt_train.py b/tests/xtts_tests/test_xtts_gpt_train.py index 12c547d6..b8b9a4e3 100644 --- a/tests/xtts_tests/test_xtts_gpt_train.py +++ b/tests/xtts_tests/test_xtts_gpt_train.py @@ -60,7 +60,9 @@ XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_s # Training sentences generations -SPEAKER_REFERENCE = ["tests/data/ljspeech/wavs/LJ001-0002.wav"] # speaker reference to be used in training test sentences +SPEAKER_REFERENCE = [ + "tests/data/ljspeech/wavs/LJ001-0002.wav" +] # speaker reference to be used in training test sentences LANGUAGE = config_dataset.language diff --git a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py index b19b7210..6663433c 100644 --- a/tests/xtts_tests/test_xtts_v2-0_gpt_train.py +++ b/tests/xtts_tests/test_xtts_v2-0_gpt_train.py @@ -58,7 +58,9 @@ XTTS_CHECKPOINT = None # "/raid/edresson/dev/Checkpoints/XTTS_evaluation/xtts_s # Training sentences generations -SPEAKER_REFERENCE = ["tests/data/ljspeech/wavs/LJ001-0002.wav"] # speaker reference to be used in training test sentences +SPEAKER_REFERENCE = [ + "tests/data/ljspeech/wavs/LJ001-0002.wav" +] # speaker reference to be used in training test sentences LANGUAGE = config_dataset.language From b85536b23f2c70488736044a2fea3ec9fd59cff4 Mon Sep 17 00:00:00 2001 From: WeberJulian Date: Mon, 13 Nov 2023 13:18:45 +0100 Subject: [PATCH 40/67] fix max generation length --- TTS/tts/layers/xtts/gpt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/TTS/tts/layers/xtts/gpt.py b/TTS/tts/layers/xtts/gpt.py index 683104d8..612da260 100644 --- a/TTS/tts/layers/xtts/gpt.py +++ b/TTS/tts/layers/xtts/gpt.py @@ -128,6 +128,7 @@ class GPT(nn.Module): self.heads = heads self.model_dim = model_dim self.max_conditioning_inputs = max_conditioning_inputs + self.max_gen_mel_tokens = max_mel_tokens - self.max_conditioning_inputs - 2 self.max_mel_tokens = -1 if max_mel_tokens == -1 else max_mel_tokens + 2 + self.max_conditioning_inputs self.max_text_tokens = -1 if max_text_tokens == -1 else max_text_tokens + 2 self.max_prompt_tokens = max_prompt_tokens @@ -598,7 +599,7 @@ class GPT(nn.Module): bos_token_id=self.start_audio_token, pad_token_id=self.stop_audio_token, eos_token_id=self.stop_audio_token, - max_length=self.max_mel_tokens, + max_length=self.max_gen_mel_tokens + gpt_inputs.shape[-1], **hf_generate_kwargs, ) if "return_dict_in_generate" in hf_generate_kwargs: @@ -611,7 +612,7 @@ class GPT(nn.Module): bos_token_id=self.start_audio_token, pad_token_id=self.stop_audio_token, eos_token_id=self.stop_audio_token, - max_length=self.max_mel_tokens, + max_length=self.max_gen_mel_tokens + fake_inputs.shape[-1], do_stream=True, **hf_generate_kwargs, ) From 92fa988aecc2937ac11927e7f0758bc94ee79ded Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Mon, 13 Nov 2023 13:44:06 +0100 Subject: [PATCH 41/67] Fixup --- TTS/tts/models/xtts.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 0f79ad69..b277c3ac 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -369,11 +369,8 @@ class Xtts(BaseTTS): audios = [] speaker_embedding = None for file_path in audio_paths: - # load the audio in 24khz to avoid issued with multiple sr references audio = load_audio(file_path, load_sr) audio = audio[:, : load_sr * max_ref_length].to(self.device) - if audio.shape[0] > 1: - audio = audio.mean(0, keepdim=True) if sound_norm_refs: audio = (audio / torch.abs(audio).max()) * 0.75 if librosa_trim_db is not None: From d96f3885d5486591cb0a00a8b74bd25bb21ee8d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 13 Nov 2023 17:07:25 +0100 Subject: [PATCH 42/67] Update to v0.20.4 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 144996ed..6dd46024 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.3 +0.20.4 From 04901fb2e4b74953bb5733205adcb7cb13655a06 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Tue, 14 Nov 2023 16:07:17 +0100 Subject: [PATCH 43/67] Add speed control for inference (#3214) * Add speed control for inference * Fix XTTS tests * Add speed control tests --- TTS/tts/models/xtts.py | 17 +++++++++++++++++ tests/zoo_tests/test_models.py | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index b277c3ac..91985912 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -530,8 +530,10 @@ class Xtts(BaseTTS): top_p=0.85, do_sample=True, num_beams=1, + speed=1.0, **hf_generate_kwargs, ): + length_scale = 1.0 / max(speed, 0.05) text = text.strip().lower() text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) @@ -584,6 +586,13 @@ class Xtts(BaseTTS): gpt_latents = gpt_latents[:, :k] break + if length_scale != 1.0: + gpt_latents = F.interpolate( + gpt_latents.transpose(1, 2), + scale_factor=length_scale, + mode="linear" + ).transpose(1, 2) + wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) return { @@ -634,8 +643,10 @@ class Xtts(BaseTTS): top_k=50, top_p=0.85, do_sample=True, + speed=1.0, **hf_generate_kwargs, ): + length_scale = 1.0 / max(speed, 0.05) text = text.strip().lower() text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) @@ -674,6 +685,12 @@ class Xtts(BaseTTS): if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): gpt_latents = torch.cat(all_latents, dim=0)[None, :] + if length_scale != 1.0: + gpt_latents = F.interpolate( + gpt_latents.transpose(1, 2), + scale_factor=length_scale, + mode="linear" + ).transpose(1, 2) wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index d1c6b67c..a5aad5c1 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -111,7 +111,7 @@ def test_xtts_streaming(): model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) print("Computing speaker latents...") - gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav) + gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav) print("Inference...") chunks = model.inference_stream( @@ -139,7 +139,7 @@ def test_xtts_v2(): "yes | " f"tts --model_name tts_models/multilingual/multi-dataset/xtts_v2 " f'--text "This is an example." --out_path "{output_path}" --progress_bar False --use_cuda True ' - f'--speaker_wav "{speaker_wav}" "{speaker_wav_2}" "--language_idx "en"' + f'--speaker_wav "{speaker_wav}" "{speaker_wav_2}" --language_idx "en"' ) else: run_cli( @@ -164,7 +164,7 @@ def test_xtts_v2_streaming(): model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) print("Computing speaker latents...") - gpt_cond_latent, _, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav) + gpt_cond_latent, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav) print("Inference...") chunks = model.inference_stream( @@ -179,6 +179,34 @@ def test_xtts_v2_streaming(): assert chunk.shape[-1] > 5000 wav_chuncks.append(chunk) assert len(wav_chuncks) > 1 + normal_len = sum([len(chunk) for chunk in wav_chuncks]) + + chunks = model.inference_stream( + "It took me quite a long time to develop a voice and now that I have it I am not going to be silent.", + "en", + gpt_cond_latent, + speaker_embedding, + speed=1.5 + ) + wav_chuncks = [] + for i, chunk in enumerate(chunks): + wav_chuncks.append(chunk) + fast_len = sum([len(chunk) for chunk in wav_chuncks]) + + chunks = model.inference_stream( + "It took me quite a long time to develop a voice and now that I have it I am not going to be silent.", + "en", + gpt_cond_latent, + speaker_embedding, + speed=0.66 + ) + wav_chuncks = [] + for i, chunk in enumerate(chunks): + wav_chuncks.append(chunk) + slow_len = sum([len(chunk) for chunk in wav_chuncks]) + + assert slow_len > normal_len + assert normal_len > fast_len def test_tortoise(): From 15f0ac57d6786e3b171df7a21ccce78d2000d31f Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 15 Nov 2023 21:59:56 +0900 Subject: [PATCH 44/67] Update README.md (#3215) Dicord -> Discord --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 935627e5..4e5855f9 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ 📚 Utilities for dataset analysis and curation. ______________________________________________________________________ -[![Dicord](https://img.shields.io/discord/1037326658807533628?color=%239B59B6&label=chat%20on%20discord)](https://discord.gg/5eXr5seRrv) +[![Discord](https://img.shields.io/discord/1037326658807533628?color=%239B59B6&label=chat%20on%20discord)](https://discord.gg/5eXr5seRrv) [![License]()](https://opensource.org/licenses/MPL-2.0) [![PyPI version](https://badge.fury.io/py/TTS.svg)](https://badge.fury.io/py/TTS) [![Covenant](https://camo.githubusercontent.com/7d620efaa3eac1c5b060ece5d6aacfcc8b81a74a04d05cd0398689c01c4463bb/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f436f6e7472696275746f72253230436f76656e616e742d76322e3025323061646f707465642d6666363962342e737667)](https://github.com/coqui-ai/TTS/blob/master/CODE_OF_CONDUCT.md) From 73a5bd08c0593feb135ad229ad628d6f85898ec0 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Wed, 15 Nov 2023 10:02:05 -0300 Subject: [PATCH 45/67] Fix XTTS GPT padding and inference issues (#3216) * Fix end artifact for fine tuning models * Bug fix on zh-cn inference * Remove ununsed code --- TTS/tts/layers/xtts/gpt.py | 11 +---------- TTS/tts/layers/xtts/tokenizer.py | 12 ++++++------ TTS/tts/models/xtts.py | 31 ------------------------------- 3 files changed, 7 insertions(+), 47 deletions(-) diff --git a/TTS/tts/layers/xtts/gpt.py b/TTS/tts/layers/xtts/gpt.py index 612da260..d914ebf9 100644 --- a/TTS/tts/layers/xtts/gpt.py +++ b/TTS/tts/layers/xtts/gpt.py @@ -426,15 +426,6 @@ class GPT(nn.Module): if max_mel_len > audio_codes.shape[-1]: audio_codes = F.pad(audio_codes, (0, max_mel_len - audio_codes.shape[-1])) - silence = True - for idx, l in enumerate(code_lengths): - length = l.item() - while silence: - if audio_codes[idx, length - 1] != 83: - break - length -= 1 - code_lengths[idx] = length - # 💖 Lovely assertions assert ( max_mel_len <= audio_codes.shape[-1] @@ -450,7 +441,7 @@ class GPT(nn.Module): audio_codes = F.pad(audio_codes[:, :max_mel_len], (0, 1), value=self.stop_audio_token) # Pad mel codes with stop_audio_token - audio_codes = self.set_mel_padding(audio_codes, code_lengths) + audio_codes = self.set_mel_padding(audio_codes, code_lengths - 3) # -3 to get the real code lengths without consider start and stop tokens that was not added yet # Build input and target tensors # Prepend start token to inputs and append stop token to targets diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 211d0a93..7726d829 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -115,7 +115,7 @@ _abbreviations = { # There are not many common abbreviations in Arabic as in English. ] ], - "zh": [ + "zh-cn": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # Chinese doesn't typically use abbreviations in the same way as Latin-based scripts. @@ -280,7 +280,7 @@ _symbols_multilingual = { ("°", " درجة "), ] ], - "zh": [ + "zh-cn": [ # Chinese (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ @@ -571,7 +571,7 @@ class VoiceBpeTokenizer: ) def preprocess_text(self, txt, lang): - if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh", "zh-cn"}: + if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh-cn", "zh-cn"}: txt = multilingual_cleaners(txt, lang) if lang in {"zh", "zh-cn"}: txt = chinese_transliterate(txt) @@ -682,8 +682,8 @@ def test_expand_numbers_multilingual(): ("Dat wordt dan $20 meneer.", "Dat wordt dan twintig dollar meneer.", "nl"), ("Dat wordt dan 20€ meneer.", "Dat wordt dan twintig euro meneer.", "nl"), # Chinese (Simplified) - ("在12.5秒内", "在十二点五秒内", "zh"), - ("有50名士兵", "有五十名士兵", "zh"), + ("在12.5秒内", "在十二点五秒内", "zh-cn"), + ("有50名士兵", "有五十名士兵", "zh-cn"), # ("那将是$20先生", '那将是二十美元先生', 'zh'), currency doesn't work # ("那将是20€先生", '那将是二十欧元先生', 'zh'), # Turkish @@ -764,7 +764,7 @@ def test_symbols_multilingual(): ("Ik heb 14% batterij", "Ik heb 14 procent batterij", "nl"), ("Ik zie je @ het feest", "Ik zie je bij het feest", "nl"), ("لدي 14% في البطارية", "لدي 14 في المئة في البطارية", "ar"), - ("我的电量为 14%", "我的电量为 14 百分之", "zh"), + ("我的电量为 14%", "我的电量为 14 百分之", "zh-cn"), ("Pilim %14 dolu.", "Pilim yüzde 14 dolu.", "tr"), ("Az akkumulátorom töltöttsége 14%", "Az akkumulátorom töltöttsége 14 százalék", "hu"), ("배터리 잔량이 14%입니다.", "배터리 잔량이 14 퍼센트입니다.", "ko"), diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 91985912..f37f0844 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -7,7 +7,6 @@ import torch.nn.functional as F import torchaudio from coqpit import Coqpit -from TTS.tts.layers.tortoise.audio_utils import wav_to_univnet_mel from TTS.tts.layers.xtts.gpt import GPT from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder from TTS.tts.layers.xtts.stream_generator import init_stream_support @@ -308,26 +307,6 @@ class Xtts(BaseTTS): cond_latent = self.gpt.get_style_emb(mel.to(self.device)) return cond_latent.transpose(1, 2) - @torch.inference_mode() - def get_diffusion_cond_latents(self, audio, sr): - from math import ceil - - diffusion_conds = [] - CHUNK_SIZE = 102400 - audio_24k = torchaudio.functional.resample(audio, sr, 24000) - for chunk in range(ceil(audio_24k.shape[1] / CHUNK_SIZE)): - current_sample = audio_24k[:, chunk * CHUNK_SIZE : (chunk + 1) * CHUNK_SIZE] - current_sample = pad_or_truncate(current_sample, CHUNK_SIZE) - cond_mel = wav_to_univnet_mel( - current_sample.to(self.device), - do_normalization=False, - device=self.device, - ) - diffusion_conds.append(cond_mel) - diffusion_conds = torch.stack(diffusion_conds, dim=1) - diffusion_latent = self.diffusion_decoder.get_conditioning(diffusion_conds) - return diffusion_latent - @torch.inference_mode() def get_speaker_embedding(self, audio, sr): audio_16k = torchaudio.functional.resample(audio, sr, 16000) @@ -575,16 +554,6 @@ class Xtts(BaseTTS): return_attentions=False, return_latent=True, ) - silence_token = 83 - ctokens = 0 - for k in range(gpt_codes.shape[-1]): - if gpt_codes[0, k] == silence_token: - ctokens += 1 - else: - ctokens = 0 - if ctokens > 8: - gpt_latents = gpt_latents[:, :k] - break if length_scale != 1.0: gpt_latents = F.interpolate( From 88630c60e5c35be65f7e75b7a2a27a2b63ac87ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 15 Nov 2023 14:02:51 +0100 Subject: [PATCH 46/67] Update to v0.20.5 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 6dd46024..1b619f34 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.4 +0.20.5 From 3c2d5a9e03040e081732a5e917464ddd74049c43 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 16 Nov 2023 10:57:06 +0100 Subject: [PATCH 47/67] Remove duplicate AudioProcessor code and fix ExtractTTSpectrogram.ipynb (#3230) * chore: remove unused argument * refactor(audio.processor): remove duplicate stft+griffin_lim * chore(audio.processor): remove unused compute_stft_paddings Same function available in numpy_transforms * refactor(audio.processor): remove duplicate db_to_amp * refactor(audio.processor): remove duplicate amp_to_db * refactor(audio.processor): remove duplicate linear_to_mel * refactor(audio.processor): remove duplicate mel_to_linear * refactor(audio.processor): remove duplicate build_mel_basis * refactor(audio.processor): remove duplicate stft_parameters * refactor(audio.processor): use pre-/deemphasis from numpy_transforms * refactor(audio.processor): use rms_volume_norm from numpy_transforms * chore(audio.processor): remove duplicate assert Already checked in numpy_transforms.compute_f0 * refactor(audio.processor): use find_endpoint from numpy_transforms * refactor(audio.processor): use trim_silence from numpy_transforms * refactor(audio.processor): use volume_norm from numpy_transforms * refactor(audio.processor): use load_wav from numpy_transforms * fix(bin.extract_tts_spectrograms): set quantization bits * fix(ExtractTTSpectrogram.ipynb): adapt to current TTS code Fixes #2447, #2574 * refactor(audio.processor): remove duplicate quantization methods --- TTS/bin/extract_tts_spectrograms.py | 11 +- TTS/utils/audio/numpy_transforms.py | 1 - TTS/utils/audio/processor.py | 314 ++++++--------------- TTS/vocoder/datasets/preprocess.py | 7 +- TTS/vocoder/datasets/wavernn_dataset.py | 6 +- TTS/vocoder/models/wavernn.py | 3 +- notebooks/ExtractTTSpectrogram.ipynb | 182 +++++------- tests/vocoder_tests/test_vocoder_losses.py | 3 +- 8 files changed, 177 insertions(+), 350 deletions(-) diff --git a/TTS/bin/extract_tts_spectrograms.py b/TTS/bin/extract_tts_spectrograms.py index 9eadee07..c6048626 100755 --- a/TTS/bin/extract_tts_spectrograms.py +++ b/TTS/bin/extract_tts_spectrograms.py @@ -15,6 +15,7 @@ from TTS.tts.models import setup_model from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.utils.audio import AudioProcessor +from TTS.utils.audio.numpy_transforms import quantize from TTS.utils.generic_utils import count_parameters use_cuda = torch.cuda.is_available() @@ -159,7 +160,7 @@ def inference( def extract_spectrograms( - data_loader, model, ap, output_path, quantized_wav=False, save_audio=False, debug=False, metada_name="metada.txt" + data_loader, model, ap, output_path, quantize_bits=0, save_audio=False, debug=False, metada_name="metada.txt" ): model.eval() export_metadata = [] @@ -196,8 +197,8 @@ def extract_spectrograms( _, wavq_path, mel_path, wav_gl_path, wav_path = set_filename(wav_file_path, output_path) # quantize and save wav - if quantized_wav: - wavq = ap.quantize(wav) + if quantize_bits > 0: + wavq = quantize(wav, quantize_bits) np.save(wavq_path, wavq) # save TTS mel @@ -263,7 +264,7 @@ def main(args): # pylint: disable=redefined-outer-name model, ap, args.output_path, - quantized_wav=args.quantized, + quantize_bits=args.quantize_bits, save_audio=args.save_audio, debug=args.debug, metada_name="metada.txt", @@ -277,7 +278,7 @@ if __name__ == "__main__": parser.add_argument("--output_path", type=str, help="Path to save mel specs", required=True) parser.add_argument("--debug", default=False, action="store_true", help="Save audio files for debug") parser.add_argument("--save_audio", default=False, action="store_true", help="Save audio files") - parser.add_argument("--quantized", action="store_true", help="Save quantized audio files") + parser.add_argument("--quantize_bits", type=int, default=0, help="Save quantized audio files if non-zero") parser.add_argument("--eval", type=bool, help="compute eval.", default=True) args = parser.parse_args() diff --git a/TTS/utils/audio/numpy_transforms.py b/TTS/utils/audio/numpy_transforms.py index b701e767..af88569f 100644 --- a/TTS/utils/audio/numpy_transforms.py +++ b/TTS/utils/audio/numpy_transforms.py @@ -201,7 +201,6 @@ def stft( def istft( *, y: np.ndarray = None, - fft_size: int = None, hop_length: int = None, win_length: int = None, window: str = "hann", diff --git a/TTS/utils/audio/processor.py b/TTS/utils/audio/processor.py index 4ceb7da4..c53bad56 100644 --- a/TTS/utils/audio/processor.py +++ b/TTS/utils/audio/processor.py @@ -5,10 +5,26 @@ import librosa import numpy as np import scipy.io.wavfile import scipy.signal -import soundfile as sf from TTS.tts.utils.helpers import StandardScaler -from TTS.utils.audio.numpy_transforms import compute_f0 +from TTS.utils.audio.numpy_transforms import ( + amp_to_db, + build_mel_basis, + compute_f0, + db_to_amp, + deemphasis, + find_endpoint, + griffin_lim, + load_wav, + mel_to_spec, + millisec_to_length, + preemphasis, + rms_volume_norm, + spec_to_mel, + stft, + trim_silence, + volume_norm, +) # pylint: disable=too-many-public-methods @@ -200,7 +216,9 @@ class AudioProcessor(object): # setup stft parameters if hop_length is None: # compute stft parameters from given time values - self.hop_length, self.win_length = self._stft_parameters() + self.win_length, self.hop_length = millisec_to_length( + frame_length_ms=self.frame_length_ms, frame_shift_ms=self.frame_shift_ms, sample_rate=self.sample_rate + ) else: # use stft parameters from config file self.hop_length = hop_length @@ -215,8 +233,13 @@ class AudioProcessor(object): for key, value in members.items(): print(" | > {}:{}".format(key, value)) # create spectrogram utils - self.mel_basis = self._build_mel_basis() - self.inv_mel_basis = np.linalg.pinv(self._build_mel_basis()) + self.mel_basis = build_mel_basis( + sample_rate=self.sample_rate, + fft_size=self.fft_size, + num_mels=self.num_mels, + mel_fmax=self.mel_fmax, + mel_fmin=self.mel_fmin, + ) # setup scaler if stats_path and signal_norm: mel_mean, mel_std, linear_mean, linear_std, _ = self.load_stats(stats_path) @@ -232,35 +255,6 @@ class AudioProcessor(object): return AudioProcessor(verbose=verbose, **config.audio) return AudioProcessor(verbose=verbose, **config) - ### setting up the parameters ### - def _build_mel_basis( - self, - ) -> np.ndarray: - """Build melspectrogram basis. - - Returns: - np.ndarray: melspectrogram basis. - """ - if self.mel_fmax is not None: - assert self.mel_fmax <= self.sample_rate // 2 - return librosa.filters.mel( - sr=self.sample_rate, n_fft=self.fft_size, n_mels=self.num_mels, fmin=self.mel_fmin, fmax=self.mel_fmax - ) - - def _stft_parameters( - self, - ) -> Tuple[int, int]: - """Compute the real STFT parameters from the time values. - - Returns: - Tuple[int, int]: hop length and window length for STFT. - """ - factor = self.frame_length_ms / self.frame_shift_ms - assert (factor).is_integer(), " [!] frame_shift_ms should divide frame_length_ms" - hop_length = int(self.frame_shift_ms / 1000.0 * self.sample_rate) - win_length = int(hop_length * factor) - return hop_length, win_length - ### normalization ### def normalize(self, S: np.ndarray) -> np.ndarray: """Normalize values into `[0, self.max_norm]` or `[-self.max_norm, self.max_norm]` @@ -386,31 +380,6 @@ class AudioProcessor(object): self.linear_scaler = StandardScaler() self.linear_scaler.set_stats(linear_mean, linear_std) - ### DB and AMP conversion ### - # pylint: disable=no-self-use - def _amp_to_db(self, x: np.ndarray) -> np.ndarray: - """Convert amplitude values to decibels. - - Args: - x (np.ndarray): Amplitude spectrogram. - - Returns: - np.ndarray: Decibels spectrogram. - """ - return self.spec_gain * _log(np.maximum(1e-5, x), self.base) - - # pylint: disable=no-self-use - def _db_to_amp(self, x: np.ndarray) -> np.ndarray: - """Convert decibels spectrogram to amplitude spectrogram. - - Args: - x (np.ndarray): Decibels spectrogram. - - Returns: - np.ndarray: Amplitude spectrogram. - """ - return _exp(x / self.spec_gain, self.base) - ### Preemphasis ### def apply_preemphasis(self, x: np.ndarray) -> np.ndarray: """Apply pre-emphasis to the audio signal. Useful to reduce the correlation between neighbouring signal values. @@ -424,32 +393,13 @@ class AudioProcessor(object): Returns: np.ndarray: Decorrelated audio signal. """ - if self.preemphasis == 0: - raise RuntimeError(" [!] Preemphasis is set 0.0.") - return scipy.signal.lfilter([1, -self.preemphasis], [1], x) + return preemphasis(x=x, coef=self.preemphasis) def apply_inv_preemphasis(self, x: np.ndarray) -> np.ndarray: """Reverse pre-emphasis.""" - if self.preemphasis == 0: - raise RuntimeError(" [!] Preemphasis is set 0.0.") - return scipy.signal.lfilter([1], [1, -self.preemphasis], x) + return deemphasis(x=x, coef=self.preemphasis) ### SPECTROGRAMs ### - def _linear_to_mel(self, spectrogram: np.ndarray) -> np.ndarray: - """Project a full scale spectrogram to a melspectrogram. - - Args: - spectrogram (np.ndarray): Full scale spectrogram. - - Returns: - np.ndarray: Melspectrogram - """ - return np.dot(self.mel_basis, spectrogram) - - def _mel_to_linear(self, mel_spec: np.ndarray) -> np.ndarray: - """Convert a melspectrogram to full scale spectrogram.""" - return np.maximum(1e-10, np.dot(self.inv_mel_basis, mel_spec)) - def spectrogram(self, y: np.ndarray) -> np.ndarray: """Compute a spectrogram from a waveform. @@ -460,11 +410,16 @@ class AudioProcessor(object): np.ndarray: Spectrogram. """ if self.preemphasis != 0: - D = self._stft(self.apply_preemphasis(y)) - else: - D = self._stft(y) + y = self.apply_preemphasis(y) + D = stft( + y=y, + fft_size=self.fft_size, + hop_length=self.hop_length, + win_length=self.win_length, + pad_mode=self.stft_pad_mode, + ) if self.do_amp_to_db_linear: - S = self._amp_to_db(np.abs(D)) + S = amp_to_db(x=np.abs(D), gain=self.spec_gain, base=self.base) else: S = np.abs(D) return self.normalize(S).astype(np.float32) @@ -472,32 +427,35 @@ class AudioProcessor(object): def melspectrogram(self, y: np.ndarray) -> np.ndarray: """Compute a melspectrogram from a waveform.""" if self.preemphasis != 0: - D = self._stft(self.apply_preemphasis(y)) - else: - D = self._stft(y) + y = self.apply_preemphasis(y) + D = stft( + y=y, + fft_size=self.fft_size, + hop_length=self.hop_length, + win_length=self.win_length, + pad_mode=self.stft_pad_mode, + ) + S = spec_to_mel(spec=np.abs(D), mel_basis=self.mel_basis) if self.do_amp_to_db_mel: - S = self._amp_to_db(self._linear_to_mel(np.abs(D))) - else: - S = self._linear_to_mel(np.abs(D)) + S = amp_to_db(x=S, gain=self.spec_gain, base=self.base) + return self.normalize(S).astype(np.float32) def inv_spectrogram(self, spectrogram: np.ndarray) -> np.ndarray: """Convert a spectrogram to a waveform using Griffi-Lim vocoder.""" S = self.denormalize(spectrogram) - S = self._db_to_amp(S) + S = db_to_amp(x=S, gain=self.spec_gain, base=self.base) # Reconstruct phase - if self.preemphasis != 0: - return self.apply_inv_preemphasis(self._griffin_lim(S**self.power)) - return self._griffin_lim(S**self.power) + W = self._griffin_lim(S**self.power) + return self.apply_inv_preemphasis(W) if self.preemphasis != 0 else W def inv_melspectrogram(self, mel_spectrogram: np.ndarray) -> np.ndarray: """Convert a melspectrogram to a waveform using Griffi-Lim vocoder.""" D = self.denormalize(mel_spectrogram) - S = self._db_to_amp(D) - S = self._mel_to_linear(S) # Convert back to linear - if self.preemphasis != 0: - return self.apply_inv_preemphasis(self._griffin_lim(S**self.power)) - return self._griffin_lim(S**self.power) + S = db_to_amp(x=D, gain=self.spec_gain, base=self.base) + S = mel_to_spec(mel=S, mel_basis=self.mel_basis) # Convert back to linear + W = self._griffin_lim(S**self.power) + return self.apply_inv_preemphasis(W) if self.preemphasis != 0 else W def out_linear_to_mel(self, linear_spec: np.ndarray) -> np.ndarray: """Convert a full scale linear spectrogram output of a network to a melspectrogram. @@ -509,60 +467,22 @@ class AudioProcessor(object): np.ndarray: Normalized melspectrogram. """ S = self.denormalize(linear_spec) - S = self._db_to_amp(S) - S = self._linear_to_mel(np.abs(S)) - S = self._amp_to_db(S) + S = db_to_amp(x=S, gain=self.spec_gain, base=self.base) + S = spec_to_mel(spec=np.abs(S), mel_basis=self.mel_basis) + S = amp_to_db(x=S, gain=self.spec_gain, base=self.base) mel = self.normalize(S) return mel - ### STFT and ISTFT ### - def _stft(self, y: np.ndarray) -> np.ndarray: - """Librosa STFT wrapper. - - Args: - y (np.ndarray): Audio signal. - - Returns: - np.ndarray: Complex number array. - """ - return librosa.stft( - y=y, - n_fft=self.fft_size, + def _griffin_lim(self, S): + return griffin_lim( + spec=S, + num_iter=self.griffin_lim_iters, hop_length=self.hop_length, win_length=self.win_length, + fft_size=self.fft_size, pad_mode=self.stft_pad_mode, - window="hann", - center=True, ) - def _istft(self, y: np.ndarray) -> np.ndarray: - """Librosa iSTFT wrapper.""" - return librosa.istft(y, hop_length=self.hop_length, win_length=self.win_length) - - def _griffin_lim(self, S): - angles = np.exp(2j * np.pi * np.random.rand(*S.shape)) - try: - S_complex = np.abs(S).astype(np.complex) - except AttributeError: # np.complex is deprecated since numpy 1.20.0 - S_complex = np.abs(S).astype(complex) - y = self._istft(S_complex * angles) - if not np.isfinite(y).all(): - print(" [!] Waveform is not finite everywhere. Skipping the GL.") - return np.array([0.0]) - for _ in range(self.griffin_lim_iters): - angles = np.exp(1j * np.angle(self._stft(y))) - y = self._istft(S_complex * angles) - return y - - def compute_stft_paddings(self, x, pad_sides=1): - """Compute paddings used by Librosa's STFT. Compute right padding (final frame) or both sides padding - (first and final frames)""" - assert pad_sides in (1, 2) - pad = (x.shape[0] // self.hop_length + 1) * self.hop_length - x.shape[0] - if pad_sides == 1: - return 0, pad - return pad // 2, pad // 2 + pad % 2 - def compute_f0(self, x: np.ndarray) -> np.ndarray: """Compute pitch (f0) of a waveform using the same parameters used for computing melspectrogram. @@ -581,8 +501,6 @@ class AudioProcessor(object): >>> wav = ap.load_wav(WAV_FILE, sr=ap.sample_rate)[:5 * ap.sample_rate] >>> pitch = ap.compute_f0(wav) """ - assert self.pitch_fmax is not None, " [!] Set `pitch_fmax` before caling `compute_f0`." - assert self.pitch_fmin is not None, " [!] Set `pitch_fmin` before caling `compute_f0`." # align F0 length to the spectrogram length if len(x) % self.hop_length == 0: x = np.pad(x, (0, self.hop_length // 2), mode=self.stft_pad_mode) @@ -612,21 +530,24 @@ class AudioProcessor(object): Returns: int: Last point without silence. """ - window_length = int(self.sample_rate * min_silence_sec) - hop_length = int(window_length / 4) - threshold = self._db_to_amp(-self.trim_db) - for x in range(hop_length, len(wav) - window_length, hop_length): - if np.max(wav[x : x + window_length]) < threshold: - return x + hop_length - return len(wav) + return find_endpoint( + wav=wav, + trim_db=self.trim_db, + sample_rate=self.sample_rate, + min_silence_sec=min_silence_sec, + gain=self.spec_gain, + base=self.base, + ) def trim_silence(self, wav): """Trim silent parts with a threshold and 0.01 sec margin""" - margin = int(self.sample_rate * 0.01) - wav = wav[margin:-margin] - return librosa.effects.trim(wav, top_db=self.trim_db, frame_length=self.win_length, hop_length=self.hop_length)[ - 0 - ] + return trim_silence( + wav=wav, + sample_rate=self.sample_rate, + trim_db=self.trim_db, + win_length=self.win_length, + hop_length=self.hop_length, + ) @staticmethod def sound_norm(x: np.ndarray) -> np.ndarray: @@ -638,13 +559,7 @@ class AudioProcessor(object): Returns: np.ndarray: Volume normalized waveform. """ - return x / abs(x).max() * 0.95 - - @staticmethod - def _rms_norm(wav, db_level=-27): - r = 10 ** (db_level / 20) - a = np.sqrt((len(wav) * (r**2)) / np.sum(wav**2)) - return wav * a + return volume_norm(x=x) def rms_volume_norm(self, x: np.ndarray, db_level: float = None) -> np.ndarray: """Normalize the volume based on RMS of the signal. @@ -657,9 +572,7 @@ class AudioProcessor(object): """ if db_level is None: db_level = self.db_level - assert -99 <= db_level <= 0, " [!] db_level should be between -99 and 0" - wav = self._rms_norm(x, db_level) - return wav + return rms_volume_norm(x=x, db_level=db_level) ### save and load ### def load_wav(self, filename: str, sr: int = None) -> np.ndarray: @@ -674,15 +587,10 @@ class AudioProcessor(object): Returns: np.ndarray: Loaded waveform. """ - if self.resample: - # loading with resampling. It is significantly slower. - x, sr = librosa.load(filename, sr=self.sample_rate) - elif sr is None: - # SF is faster than librosa for loading files - x, sr = sf.read(filename) - assert self.sample_rate == sr, "%s vs %s" % (self.sample_rate, sr) + if sr is not None: + x = load_wav(filename=filename, sample_rate=sr, resample=True) else: - x, sr = librosa.load(filename, sr=sr) + x = load_wav(filename=filename, sample_rate=self.sample_rate, resample=self.resample) if self.do_trim_silence: try: x = self.trim_silence(x) @@ -723,55 +631,3 @@ class AudioProcessor(object): filename (str): Path to the wav file. """ return librosa.get_duration(filename=filename) - - @staticmethod - def mulaw_encode(wav: np.ndarray, qc: int) -> np.ndarray: - mu = 2**qc - 1 - # wav_abs = np.minimum(np.abs(wav), 1.0) - signal = np.sign(wav) * np.log(1 + mu * np.abs(wav)) / np.log(1.0 + mu) - # Quantize signal to the specified number of levels. - signal = (signal + 1) / 2 * mu + 0.5 - return np.floor( - signal, - ) - - @staticmethod - def mulaw_decode(wav, qc): - """Recovers waveform from quantized values.""" - mu = 2**qc - 1 - x = np.sign(wav) / mu * ((1 + mu) ** np.abs(wav) - 1) - return x - - @staticmethod - def encode_16bits(x): - return np.clip(x * 2**15, -(2**15), 2**15 - 1).astype(np.int16) - - @staticmethod - def quantize(x: np.ndarray, bits: int) -> np.ndarray: - """Quantize a waveform to a given number of bits. - - Args: - x (np.ndarray): Waveform to quantize. Must be normalized into the range `[-1, 1]`. - bits (int): Number of quantization bits. - - Returns: - np.ndarray: Quantized waveform. - """ - return (x + 1.0) * (2**bits - 1) / 2 - - @staticmethod - def dequantize(x, bits): - """Dequantize a waveform from the given number of bits.""" - return 2 * x / (2**bits - 1) - 1 - - -def _log(x, base): - if base == 10: - return np.log10(x) - return np.log(x) - - -def _exp(x, base): - if base == 10: - return np.power(10, x) - return np.exp(x) diff --git a/TTS/vocoder/datasets/preprocess.py b/TTS/vocoder/datasets/preprocess.py index 0f69b812..503bb04b 100644 --- a/TTS/vocoder/datasets/preprocess.py +++ b/TTS/vocoder/datasets/preprocess.py @@ -7,6 +7,7 @@ from coqpit import Coqpit from tqdm import tqdm from TTS.utils.audio import AudioProcessor +from TTS.utils.audio.numpy_transforms import mulaw_encode, quantize def preprocess_wav_files(out_path: str, config: Coqpit, ap: AudioProcessor): @@ -29,7 +30,11 @@ def preprocess_wav_files(out_path: str, config: Coqpit, ap: AudioProcessor): mel = ap.melspectrogram(y) np.save(mel_path, mel) if isinstance(config.mode, int): - quant = ap.mulaw_encode(y, qc=config.mode) if config.model_args.mulaw else ap.quantize(y, bits=config.mode) + quant = ( + mulaw_encode(wav=y, mulaw_qc=config.mode) + if config.model_args.mulaw + else quantize(x=y, quantize_bits=config.mode) + ) np.save(quant_path, quant) diff --git a/TTS/vocoder/datasets/wavernn_dataset.py b/TTS/vocoder/datasets/wavernn_dataset.py index c3907964..a67c5b31 100644 --- a/TTS/vocoder/datasets/wavernn_dataset.py +++ b/TTS/vocoder/datasets/wavernn_dataset.py @@ -2,6 +2,8 @@ import numpy as np import torch from torch.utils.data import Dataset +from TTS.utils.audio.numpy_transforms import mulaw_encode, quantize + class WaveRNNDataset(Dataset): """ @@ -66,7 +68,9 @@ class WaveRNNDataset(Dataset): x_input = audio elif isinstance(self.mode, int): x_input = ( - self.ap.mulaw_encode(audio, qc=self.mode) if self.mulaw else self.ap.quantize(audio, bits=self.mode) + mulaw_encode(wav=audio, mulaw_qc=self.mode) + if self.mulaw + else quantize(x=audio, quantize_bits=self.mode) ) else: raise RuntimeError("Unknown dataset mode - ", self.mode) diff --git a/TTS/vocoder/models/wavernn.py b/TTS/vocoder/models/wavernn.py index 903f4b7e..7f74ba3e 100644 --- a/TTS/vocoder/models/wavernn.py +++ b/TTS/vocoder/models/wavernn.py @@ -13,6 +13,7 @@ from torch.utils.data.distributed import DistributedSampler from TTS.tts.utils.visual import plot_spectrogram from TTS.utils.audio import AudioProcessor +from TTS.utils.audio.numpy_transforms import mulaw_decode from TTS.utils.io import load_fsspec from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset from TTS.vocoder.layers.losses import WaveRNNLoss @@ -399,7 +400,7 @@ class Wavernn(BaseVocoder): output = output[0] if self.args.mulaw and isinstance(self.args.mode, int): - output = AudioProcessor.mulaw_decode(output, self.args.mode) + output = mulaw_decode(wav=output, mulaw_qc=self.args.mode) # Fade-out at the end to avoid signal cutting out suddenly fade_out = np.linspace(1, 0, 20 * self.config.audio.hop_length) diff --git a/notebooks/ExtractTTSpectrogram.ipynb b/notebooks/ExtractTTSpectrogram.ipynb index 9acc9929..0ec5f167 100644 --- a/notebooks/ExtractTTSpectrogram.ipynb +++ b/notebooks/ExtractTTSpectrogram.ipynb @@ -13,23 +13,28 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import sys\n", - "import torch\n", "import importlib\n", - "import numpy as np\n", - "from tqdm import tqdm\n", - "from torch.utils.data import DataLoader\n", - "import soundfile as sf\n", + "import os\n", "import pickle\n", + "\n", + "import numpy as np\n", + "import soundfile as sf\n", + "import torch\n", + "from matplotlib import pylab as plt\n", + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "\n", + "from TTS.config import load_config\n", + "from TTS.tts.configs.shared_configs import BaseDatasetConfig\n", + "from TTS.tts.datasets import load_tts_samples\n", "from TTS.tts.datasets.dataset import TTSDataset\n", "from TTS.tts.layers.losses import L1LossMasked\n", - "from TTS.utils.audio import AudioProcessor\n", - "from TTS.config import load_config\n", - "from TTS.tts.utils.visual import plot_spectrogram\n", - "from TTS.tts.utils.helpers import sequence_mask\n", "from TTS.tts.models import setup_model\n", - "from TTS.tts.utils.text.symbols import make_symbols, symbols, phonemes\n", + "from TTS.tts.utils.helpers import sequence_mask\n", + "from TTS.tts.utils.text.tokenizer import TTSTokenizer\n", + "from TTS.tts.utils.visual import plot_spectrogram\n", + "from TTS.utils.audio import AudioProcessor\n", + "from TTS.utils.audio.numpy_transforms import quantize\n", "\n", "%matplotlib inline\n", "\n", @@ -49,11 +54,9 @@ " file_name = wav_file.split('.')[0]\n", " os.makedirs(os.path.join(out_path, \"quant\"), exist_ok=True)\n", " os.makedirs(os.path.join(out_path, \"mel\"), exist_ok=True)\n", - " os.makedirs(os.path.join(out_path, \"wav_gl\"), exist_ok=True)\n", " wavq_path = os.path.join(out_path, \"quant\", file_name)\n", " mel_path = os.path.join(out_path, \"mel\", file_name)\n", - " wav_path = os.path.join(out_path, \"wav_gl\", file_name)\n", - " return file_name, wavq_path, mel_path, wav_path" + " return file_name, wavq_path, mel_path" ] }, { @@ -65,14 +68,14 @@ "# Paths and configurations\n", "OUT_PATH = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/specs2/\"\n", "DATA_PATH = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/\"\n", + "PHONEME_CACHE_PATH = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/phoneme_cache\"\n", "DATASET = \"ljspeech\"\n", "METADATA_FILE = \"metadata.csv\"\n", "CONFIG_PATH = \"/home/ubuntu/.local/share/tts/tts_models--en--ljspeech--tacotron2-DDC_ph/config.json\"\n", "MODEL_FILE = \"/home/ubuntu/.local/share/tts/tts_models--en--ljspeech--tacotron2-DDC_ph/model_file.pth\"\n", "BATCH_SIZE = 32\n", "\n", - "QUANTIZED_WAV = False\n", - "QUANTIZE_BIT = None\n", + "QUANTIZE_BITS = 0 # if non-zero, quantize wav files with the given number of bits\n", "DRY_RUN = False # if False, does not generate output files, only computes loss and visuals.\n", "\n", "# Check CUDA availability\n", @@ -80,10 +83,10 @@ "print(\" > CUDA enabled: \", use_cuda)\n", "\n", "# Load the configuration\n", + "dataset_config = BaseDatasetConfig(formatter=DATASET, meta_file_train=METADATA_FILE, path=DATA_PATH)\n", "C = load_config(CONFIG_PATH)\n", "C.audio['do_trim_silence'] = False # IMPORTANT!!!!!!!!!!!!!!! disable to align mel specs with the wav files\n", - "ap = AudioProcessor(bits=QUANTIZE_BIT, **C.audio)\n", - "print(C['r'])" + "ap = AudioProcessor(**C.audio)" ] }, { @@ -92,12 +95,10 @@ "metadata": {}, "outputs": [], "source": [ - "# If the vocabulary was passed, replace the default\n", - "if 'characters' in C and C['characters']:\n", - " symbols, phonemes = make_symbols(**C.characters)\n", + "# Initialize the tokenizer\n", + "tokenizer, C = TTSTokenizer.init_from_config(C)\n", "\n", "# Load the model\n", - "num_chars = len(phonemes) if C.use_phonemes else len(symbols)\n", "# TODO: multiple speakers\n", "model = setup_model(C)\n", "model.load_checkpoint(C, MODEL_FILE, eval=True)" @@ -109,42 +110,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Load the preprocessor based on the dataset\n", - "preprocessor = importlib.import_module(\"TTS.tts.datasets.formatters\")\n", - "preprocessor = getattr(preprocessor, DATASET.lower())\n", - "meta_data = preprocessor(DATA_PATH, METADATA_FILE)\n", + "# Load data instances\n", + "meta_data_train, meta_data_eval = load_tts_samples(dataset_config)\n", + "meta_data = meta_data_train + meta_data_eval\n", + "\n", "dataset = TTSDataset(\n", - " C,\n", - " C.text_cleaner,\n", - " False,\n", - " ap,\n", - " meta_data,\n", - " characters=C.get('characters', None),\n", - " use_phonemes=C.use_phonemes,\n", - " phoneme_cache_path=C.phoneme_cache_path,\n", - " enable_eos_bos=C.enable_eos_bos_chars,\n", + " outputs_per_step=C[\"r\"],\n", + " compute_linear_spec=False,\n", + " ap=ap,\n", + " samples=meta_data,\n", + " tokenizer=tokenizer,\n", + " phoneme_cache_path=PHONEME_CACHE_PATH,\n", ")\n", "loader = DataLoader(\n", " dataset, batch_size=BATCH_SIZE, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False\n", - ")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize lists for storing results\n", - "file_idxs = []\n", - "metadata = []\n", - "losses = []\n", - "postnet_losses = []\n", - "criterion = L1LossMasked(seq_len_norm=C.seq_len_norm)\n", - "\n", - "# Create log file\n", - "log_file_path = os.path.join(OUT_PATH, \"log.txt\")\n", - "log_file = open(log_file_path, \"w\")" + ")" ] }, { @@ -160,26 +140,33 @@ "metadata": {}, "outputs": [], "source": [ + "# Initialize lists for storing results\n", + "file_idxs = []\n", + "metadata = []\n", + "losses = []\n", + "postnet_losses = []\n", + "criterion = L1LossMasked(seq_len_norm=C.seq_len_norm)\n", + "\n", "# Start processing with a progress bar\n", - "with torch.no_grad():\n", + "log_file_path = os.path.join(OUT_PATH, \"log.txt\")\n", + "with torch.no_grad() and open(log_file_path, \"w\") as log_file:\n", " for data in tqdm(loader, desc=\"Processing\"):\n", " try:\n", - " # setup input data\n", - " text_input, text_lengths, _, linear_input, mel_input, mel_lengths, stop_targets, item_idx = data\n", - "\n", " # dispatch data to GPU\n", " if use_cuda:\n", - " text_input = text_input.cuda()\n", - " text_lengths = text_lengths.cuda()\n", - " mel_input = mel_input.cuda()\n", - " mel_lengths = mel_lengths.cuda()\n", + " data[\"token_id\"] = data[\"token_id\"].cuda()\n", + " data[\"token_id_lengths\"] = data[\"token_id_lengths\"].cuda()\n", + " data[\"mel\"] = data[\"mel\"].cuda()\n", + " data[\"mel_lengths\"] = data[\"mel_lengths\"].cuda()\n", "\n", - " mask = sequence_mask(text_lengths)\n", - " mel_outputs, postnet_outputs, alignments, stop_tokens = model.forward(text_input, text_lengths, mel_input)\n", + " mask = sequence_mask(data[\"token_id_lengths\"])\n", + " outputs = model.forward(data[\"token_id\"], data[\"token_id_lengths\"], data[\"mel\"])\n", + " mel_outputs = outputs[\"decoder_outputs\"]\n", + " postnet_outputs = outputs[\"model_outputs\"]\n", "\n", " # compute loss\n", - " loss = criterion(mel_outputs, mel_input, mel_lengths)\n", - " loss_postnet = criterion(postnet_outputs, mel_input, mel_lengths)\n", + " loss = criterion(mel_outputs, data[\"mel\"], data[\"mel_lengths\"])\n", + " loss_postnet = criterion(postnet_outputs, data[\"mel\"], data[\"mel_lengths\"])\n", " losses.append(loss.item())\n", " postnet_losses.append(loss_postnet.item())\n", "\n", @@ -193,28 +180,27 @@ " postnet_outputs = torch.stack(mel_specs)\n", " elif C.model == \"Tacotron2\":\n", " postnet_outputs = postnet_outputs.detach().cpu().numpy()\n", - " alignments = alignments.detach().cpu().numpy()\n", + " alignments = outputs[\"alignments\"].detach().cpu().numpy()\n", "\n", " if not DRY_RUN:\n", - " for idx in range(text_input.shape[0]):\n", - " wav_file_path = item_idx[idx]\n", + " for idx in range(data[\"token_id\"].shape[0]):\n", + " wav_file_path = data[\"item_idxs\"][idx]\n", " wav = ap.load_wav(wav_file_path)\n", - " file_name, wavq_path, mel_path, wav_path = set_filename(wav_file_path, OUT_PATH)\n", + " file_name, wavq_path, mel_path = set_filename(wav_file_path, OUT_PATH)\n", " file_idxs.append(file_name)\n", "\n", " # quantize and save wav\n", - " if QUANTIZED_WAV:\n", - " wavq = ap.quantize(wav)\n", + " if QUANTIZE_BITS > 0:\n", + " wavq = quantize(wav, QUANTIZE_BITS)\n", " np.save(wavq_path, wavq)\n", "\n", " # save TTS mel\n", " mel = postnet_outputs[idx]\n", - " mel_length = mel_lengths[idx]\n", + " mel_length = data[\"mel_lengths\"][idx]\n", " mel = mel[:mel_length, :].T\n", " np.save(mel_path, mel)\n", "\n", " metadata.append([wav_file_path, mel_path])\n", - "\n", " except Exception as e:\n", " log_file.write(f\"Error processing data: {str(e)}\\n\")\n", "\n", @@ -224,35 +210,20 @@ " log_file.write(f\"Mean Loss: {mean_loss}\\n\")\n", " log_file.write(f\"Mean Postnet Loss: {mean_postnet_loss}\\n\")\n", "\n", - "# Close the log file\n", - "log_file.close()\n", - "\n", "# For wavernn\n", "if not DRY_RUN:\n", " pickle.dump(file_idxs, open(os.path.join(OUT_PATH, \"dataset_ids.pkl\"), \"wb\"))\n", "\n", "# For pwgan\n", "with open(os.path.join(OUT_PATH, \"metadata.txt\"), \"w\") as f:\n", - " for data in metadata:\n", - " f.write(f\"{data[0]}|{data[1]+'.npy'}\\n\")\n", + " for wav_file_path, mel_path in metadata:\n", + " f.write(f\"{wav_file_path[0]}|{mel_path[1]+'.npy'}\\n\")\n", "\n", "# Print mean losses\n", "print(f\"Mean Loss: {mean_loss}\")\n", "print(f\"Mean Postnet Loss: {mean_postnet_loss}\")" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# for pwgan\n", - "with open(os.path.join(OUT_PATH, \"metadata.txt\"), \"w\") as f:\n", - " for data in metadata:\n", - " f.write(f\"{data[0]}|{data[1]+'.npy'}\\n\")" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -267,7 +238,7 @@ "outputs": [], "source": [ "idx = 1\n", - "ap.melspectrogram(ap.load_wav(item_idx[idx])).shape" + "ap.melspectrogram(ap.load_wav(data[\"item_idxs\"][idx])).shape" ] }, { @@ -276,10 +247,9 @@ "metadata": {}, "outputs": [], "source": [ - "import soundfile as sf\n", - "wav, sr = sf.read(item_idx[idx])\n", - "mel_postnet = postnet_outputs[idx][:mel_lengths[idx], :]\n", - "mel_decoder = mel_outputs[idx][:mel_lengths[idx], :].detach().cpu().numpy()\n", + "wav, sr = sf.read(data[\"item_idxs\"][idx])\n", + "mel_postnet = postnet_outputs[idx][:data[\"mel_lengths\"][idx], :]\n", + "mel_decoder = mel_outputs[idx][:data[\"mel_lengths\"][idx], :].detach().cpu().numpy()\n", "mel_truth = ap.melspectrogram(wav)\n", "print(mel_truth.shape)" ] @@ -291,7 +261,7 @@ "outputs": [], "source": [ "# plot posnet output\n", - "print(mel_postnet[:mel_lengths[idx], :].shape)\n", + "print(mel_postnet[:data[\"mel_lengths\"][idx], :].shape)\n", "plot_spectrogram(mel_postnet, ap)" ] }, @@ -324,10 +294,9 @@ "outputs": [], "source": [ "# postnet, decoder diff\n", - "from matplotlib import pylab as plt\n", "mel_diff = mel_decoder - mel_postnet\n", "plt.figure(figsize=(16, 10))\n", - "plt.imshow(abs(mel_diff[:mel_lengths[idx],:]).T,aspect=\"auto\", origin=\"lower\");\n", + "plt.imshow(abs(mel_diff[:data[\"mel_lengths\"][idx],:]).T,aspect=\"auto\", origin=\"lower\")\n", "plt.colorbar()\n", "plt.tight_layout()" ] @@ -339,10 +308,9 @@ "outputs": [], "source": [ "# PLOT GT SPECTROGRAM diff\n", - "from matplotlib import pylab as plt\n", "mel_diff2 = mel_truth.T - mel_decoder\n", "plt.figure(figsize=(16, 10))\n", - "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\");\n", + "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\")\n", "plt.colorbar()\n", "plt.tight_layout()" ] @@ -354,21 +322,13 @@ "outputs": [], "source": [ "# PLOT GT SPECTROGRAM diff\n", - "from matplotlib import pylab as plt\n", "mel = postnet_outputs[idx]\n", "mel_diff2 = mel_truth.T - mel[:mel_truth.shape[1]]\n", "plt.figure(figsize=(16, 10))\n", - "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\");\n", + "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\")\n", "plt.colorbar()\n", "plt.tight_layout()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/tests/vocoder_tests/test_vocoder_losses.py b/tests/vocoder_tests/test_vocoder_losses.py index 2a35aa2e..95501c2d 100644 --- a/tests/vocoder_tests/test_vocoder_losses.py +++ b/tests/vocoder_tests/test_vocoder_losses.py @@ -5,6 +5,7 @@ import torch from tests import get_tests_input_path, get_tests_output_path, get_tests_path from TTS.config import BaseAudioConfig from TTS.utils.audio import AudioProcessor +from TTS.utils.audio.numpy_transforms import stft from TTS.vocoder.layers.losses import MelganFeatureLoss, MultiScaleSTFTLoss, STFTLoss, TorchSTFT TESTS_PATH = get_tests_path() @@ -21,7 +22,7 @@ def test_torch_stft(): torch_stft = TorchSTFT(ap.fft_size, ap.hop_length, ap.win_length) # librosa stft wav = ap.load_wav(WAV_FILE) - M_librosa = abs(ap._stft(wav)) # pylint: disable=protected-access + M_librosa = abs(stft(y=wav, fft_size=ap.fft_size, hop_length=ap.hop_length, win_length=ap.win_length)) # torch stft wav = torch.from_numpy(wav[None, :]).float() M_torch = torch_stft(wav) From 675f98355077310f4f0fbb88fdb135730d0426f2 Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Thu, 16 Nov 2023 11:01:11 +0100 Subject: [PATCH 48/67] Add sentence splitting (#3227) * Add sentence spliting * update requirements * update default args v2 * Add spanish * Fix return gpt_latents * Update requirements * Fix requirements --- TTS/tts/layers/xtts/tokenizer.py | 78 +++++++++-- TTS/tts/models/xtts.py | 225 +++++++++++++++++-------------- requirements.txt | 1 + 3 files changed, 190 insertions(+), 114 deletions(-) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 7726d829..56eb78ae 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -1,10 +1,10 @@ -import json import os import re -from functools import cached_property - -import pypinyin import torch +import pypinyin +import textwrap + +from functools import cached_property from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words @@ -12,6 +12,61 @@ from tokenizers import Tokenizer from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words +from spacy.lang.en import English +from spacy.lang.zh import Chinese +from spacy.lang.ja import Japanese +from spacy.lang.ar import Arabic +from spacy.lang.es import Spanish + + +def get_spacy_lang(lang): + if lang == "zh": + return Chinese() + elif lang == "ja": + return Japanese() + elif lang == "ar": + return Arabic() + elif lang == "es": + return Spanish() + else: + # For most languages, Enlish does the job + return English() + +def split_sentence(text, lang, text_split_length=250): + """Preprocess the input text""" + text_splits = [] + if text_split_length is not None and len(text) >= text_split_length: + text_splits.append("") + nlp = get_spacy_lang(lang) + nlp.add_pipe("sentencizer") + doc = nlp(text) + for sentence in doc.sents: + if len(text_splits[-1]) + len(str(sentence)) <= text_split_length: + # if the last sentence + the current sentence is less than the text_split_length + # then add the current sentence to the last sentence + text_splits[-1] += " " + str(sentence) + text_splits[-1] = text_splits[-1].lstrip() + elif len(str(sentence)) > text_split_length: + # if the current sentence is greater than the text_split_length + for line in textwrap.wrap( + str(sentence), + width=text_split_length, + drop_whitespace=True, + break_on_hyphens=False, + tabsize=1, + ): + text_splits.append(str(line)) + else: + text_splits.append(str(sentence)) + + if len(text_splits) > 1: + if text_splits[0] == "": + del text_splits[0] + else: + text_splits = [text.lstrip()] + + return text_splits + _whitespace_re = re.compile(r"\s+") # List of (regular expression, replacement) pairs for abbreviations: @@ -464,7 +519,7 @@ def _expand_number(m, lang="en"): def expand_numbers_multilingual(text, lang="en"): - if lang == "zh" or lang == "zh-cn": + if lang == "zh": text = zh_num2words()(text) else: if lang in ["en", "ru"]: @@ -525,7 +580,7 @@ def japanese_cleaners(text, katsu): return text -def korean_cleaners(text): +def korean_transliterate(text): r = Transliter(academic) return r.translit(text) @@ -546,7 +601,7 @@ class VoiceBpeTokenizer: "it": 213, "pt": 203, "pl": 224, - "zh-cn": 82, + "zh": 82, "ar": 166, "cs": 186, "ru": 182, @@ -571,19 +626,20 @@ class VoiceBpeTokenizer: ) def preprocess_text(self, txt, lang): - if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh-cn", "zh-cn"}: + if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh-cn", "ko"}: txt = multilingual_cleaners(txt, lang) - if lang in {"zh", "zh-cn"}: + if lang == "zh": txt = chinese_transliterate(txt) + if lang == "ko": + txt = korean_transliterate(txt) elif lang == "ja": txt = japanese_cleaners(txt, self.katsu) - elif lang == "ko": - txt = korean_cleaners(txt) else: raise NotImplementedError(f"Language '{lang}' is not supported.") return txt def encode(self, txt, lang): + lang = lang.split("-")[0] # remove the region self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) txt = f"[{lang}]{txt}" diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index f37f0844..5ccb26c3 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -10,7 +10,7 @@ from coqpit import Coqpit from TTS.tts.layers.xtts.gpt import GPT from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder from TTS.tts.layers.xtts.stream_generator import init_stream_support -from TTS.tts.layers.xtts.tokenizer import VoiceBpeTokenizer +from TTS.tts.layers.xtts.tokenizer import VoiceBpeTokenizer, split_sentence from TTS.tts.models.base_tts import BaseTTS from TTS.utils.io import load_fsspec @@ -420,9 +420,9 @@ class Xtts(BaseTTS): ref_audio_path, language, # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, + temperature=0.75, + length_penalty=1.0, + repetition_penalty=10.0, top_k=50, top_p=0.85, do_sample=True, @@ -502,71 +502,78 @@ class Xtts(BaseTTS): gpt_cond_latent, speaker_embedding, # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, + temperature=0.75, + length_penalty=1.0, + repetition_penalty=10.0, top_k=50, top_p=0.85, do_sample=True, num_beams=1, speed=1.0, + enable_text_splitting=False, **hf_generate_kwargs, ): + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) - text = text.strip().lower() - text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) + if enable_text_splitting: + text = split_sentence(text, language, self.tokenizer.char_limits[language]) + else: + text = [text] + + wavs = [] + gpt_latents_list = [] + for sent in text: + sent = sent.strip().lower() + text_tokens = torch.IntTensor(self.tokenizer.encode(sent, lang=language)).unsqueeze(0).to(self.device) - # print(" > Input text: ", text) - # print(" > Input text preprocessed: ",self.tokenizer.preprocess_text(text, language)) - # print(" > Input tokens: ", text_tokens) - # print(" > Decoded text: ", self.tokenizer.decode(text_tokens[0].cpu().numpy())) - assert ( - text_tokens.shape[-1] < self.args.gpt_max_text_tokens - ), " ❗ XTTS can only generate text with a maximum of 400 tokens." + assert ( + text_tokens.shape[-1] < self.args.gpt_max_text_tokens + ), " ❗ XTTS can only generate text with a maximum of 400 tokens." - with torch.no_grad(): - gpt_codes = self.gpt.generate( - cond_latents=gpt_cond_latent, - text_inputs=text_tokens, - input_tokens=None, - do_sample=do_sample, - top_p=top_p, - top_k=top_k, - temperature=temperature, - num_return_sequences=self.gpt_batch_size, - num_beams=num_beams, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - output_attentions=False, - **hf_generate_kwargs, - ) - expected_output_len = torch.tensor( - [gpt_codes.shape[-1] * self.gpt.code_stride_len], device=text_tokens.device - ) + with torch.no_grad(): + gpt_codes = self.gpt.generate( + cond_latents=gpt_cond_latent, + text_inputs=text_tokens, + input_tokens=None, + do_sample=do_sample, + top_p=top_p, + top_k=top_k, + temperature=temperature, + num_return_sequences=self.gpt_batch_size, + num_beams=num_beams, + length_penalty=length_penalty, + repetition_penalty=repetition_penalty, + output_attentions=False, + **hf_generate_kwargs, + ) + expected_output_len = torch.tensor( + [gpt_codes.shape[-1] * self.gpt.code_stride_len], device=text_tokens.device + ) - text_len = torch.tensor([text_tokens.shape[-1]], device=self.device) - gpt_latents = self.gpt( - text_tokens, - text_len, - gpt_codes, - expected_output_len, - cond_latents=gpt_cond_latent, - return_attentions=False, - return_latent=True, - ) + text_len = torch.tensor([text_tokens.shape[-1]], device=self.device) + gpt_latents = self.gpt( + text_tokens, + text_len, + gpt_codes, + expected_output_len, + cond_latents=gpt_cond_latent, + return_attentions=False, + return_latent=True, + ) - if length_scale != 1.0: - gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" - ).transpose(1, 2) + if length_scale != 1.0: + gpt_latents = F.interpolate( + gpt_latents.transpose(1, 2), + scale_factor=length_scale, + mode="linear" + ).transpose(1, 2) - wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) + gpt_latents_list.append(gpt_latents.cpu()) + wavs.append(self.hifigan_decoder(gpt_latents, g=speaker_embedding).cpu().squeeze()) return { - "wav": wav.cpu().numpy().squeeze(), - "gpt_latents": gpt_latents, + "wav": torch.cat(wavs, dim=0).numpy(), + "gpt_latents": torch.cat(gpt_latents_list, dim=1).numpy(), "speaker_embedding": speaker_embedding, } @@ -606,66 +613,78 @@ class Xtts(BaseTTS): stream_chunk_size=20, overlap_wav_len=1024, # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, + temperature=0.75, + length_penalty=1.0, + repetition_penalty=10.0, top_k=50, top_p=0.85, do_sample=True, speed=1.0, + enable_text_splitting=False, **hf_generate_kwargs, ): + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) - text = text.strip().lower() - text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) + if enable_text_splitting: + text = split_sentence(text, language, self.tokenizer.char_limits[language]) + else: + text = [text] - fake_inputs = self.gpt.compute_embeddings( - gpt_cond_latent.to(self.device), - text_tokens, - ) - gpt_generator = self.gpt.get_generator( - fake_inputs=fake_inputs, - top_k=top_k, - top_p=top_p, - temperature=temperature, - do_sample=do_sample, - num_beams=1, - num_return_sequences=1, - length_penalty=float(length_penalty), - repetition_penalty=float(repetition_penalty), - output_attentions=False, - output_hidden_states=True, - **hf_generate_kwargs, - ) + for sent in text: + sent = sent.strip().lower() + text_tokens = torch.IntTensor(self.tokenizer.encode(sent, lang=language)).unsqueeze(0).to(self.device) - last_tokens = [] - all_latents = [] - wav_gen_prev = None - wav_overlap = None - is_end = False + assert ( + text_tokens.shape[-1] < self.args.gpt_max_text_tokens + ), " ❗ XTTS can only generate text with a maximum of 400 tokens." - while not is_end: - try: - x, latent = next(gpt_generator) - last_tokens += [x] - all_latents += [latent] - except StopIteration: - is_end = True + fake_inputs = self.gpt.compute_embeddings( + gpt_cond_latent.to(self.device), + text_tokens, + ) + gpt_generator = self.gpt.get_generator( + fake_inputs=fake_inputs, + top_k=top_k, + top_p=top_p, + temperature=temperature, + do_sample=do_sample, + num_beams=1, + num_return_sequences=1, + length_penalty=float(length_penalty), + repetition_penalty=float(repetition_penalty), + output_attentions=False, + output_hidden_states=True, + **hf_generate_kwargs, + ) - if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): - gpt_latents = torch.cat(all_latents, dim=0)[None, :] - if length_scale != 1.0: - gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" - ).transpose(1, 2) - wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) - wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( - wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len - ) - last_tokens = [] - yield wav_chunk + last_tokens = [] + all_latents = [] + wav_gen_prev = None + wav_overlap = None + is_end = False + + while not is_end: + try: + x, latent = next(gpt_generator) + last_tokens += [x] + all_latents += [latent] + except StopIteration: + is_end = True + + if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): + gpt_latents = torch.cat(all_latents, dim=0)[None, :] + if length_scale != 1.0: + gpt_latents = F.interpolate( + gpt_latents.transpose(1, 2), + scale_factor=length_scale, + mode="linear" + ).transpose(1, 2) + wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) + wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( + wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len + ) + last_tokens = [] + yield wav_chunk def forward(self): raise NotImplementedError( diff --git a/requirements.txt b/requirements.txt index 53e8af59..836de40a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,3 +54,4 @@ encodec==0.1.* # deps for XTTS unidecode==1.3.* num2words +spacy[ja]>=3 \ No newline at end of file From fbc18b8c34e2a1129e8e573cbd4cf104983408ce Mon Sep 17 00:00:00 2001 From: Julian Weber Date: Thu, 16 Nov 2023 17:51:37 +0100 Subject: [PATCH 49/67] Fix zh bug (#3238) --- TTS/tts/layers/xtts/tokenizer.py | 14 ++++++++------ TTS/tts/models/xtts.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 56eb78ae..1ef655a3 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -170,7 +170,7 @@ _abbreviations = { # There are not many common abbreviations in Arabic as in English. ] ], - "zh-cn": [ + "zh": [ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) for x in [ # Chinese doesn't typically use abbreviations in the same way as Latin-based scripts. @@ -335,7 +335,7 @@ _symbols_multilingual = { ("°", " درجة "), ] ], - "zh-cn": [ + "zh": [ # Chinese (re.compile(r"%s" % re.escape(x[0]), re.IGNORECASE), x[1]) for x in [ @@ -619,6 +619,7 @@ class VoiceBpeTokenizer: return cutlet.Cutlet() def check_input_length(self, txt, lang): + lang = lang.split("-")[0] # remove the region limit = self.char_limits.get(lang, 250) if len(txt) > limit: print( @@ -626,7 +627,7 @@ class VoiceBpeTokenizer: ) def preprocess_text(self, txt, lang): - if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh-cn", "ko"}: + if lang in {"ar", "cs", "de", "en", "es", "fr", "hu", "it", "nl", "pl", "pt", "ru", "tr", "zh", "ko"}: txt = multilingual_cleaners(txt, lang) if lang == "zh": txt = chinese_transliterate(txt) @@ -642,6 +643,7 @@ class VoiceBpeTokenizer: lang = lang.split("-")[0] # remove the region self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) + lang = "zh-cn" if lang == "zh" else lang txt = f"[{lang}]{txt}" txt = txt.replace(" ", "[SPACE]") return self.tokenizer.encode(txt).ids @@ -738,8 +740,8 @@ def test_expand_numbers_multilingual(): ("Dat wordt dan $20 meneer.", "Dat wordt dan twintig dollar meneer.", "nl"), ("Dat wordt dan 20€ meneer.", "Dat wordt dan twintig euro meneer.", "nl"), # Chinese (Simplified) - ("在12.5秒内", "在十二点五秒内", "zh-cn"), - ("有50名士兵", "有五十名士兵", "zh-cn"), + ("在12.5秒内", "在十二点五秒内", "zh"), + ("有50名士兵", "有五十名士兵", "zh"), # ("那将是$20先生", '那将是二十美元先生', 'zh'), currency doesn't work # ("那将是20€先生", '那将是二十欧元先生', 'zh'), # Turkish @@ -820,7 +822,7 @@ def test_symbols_multilingual(): ("Ik heb 14% batterij", "Ik heb 14 procent batterij", "nl"), ("Ik zie je @ het feest", "Ik zie je bij het feest", "nl"), ("لدي 14% في البطارية", "لدي 14 في المئة في البطارية", "ar"), - ("我的电量为 14%", "我的电量为 14 百分之", "zh-cn"), + ("我的电量为 14%", "我的电量为 14 百分之", "zh"), ("Pilim %14 dolu.", "Pilim yüzde 14 dolu.", "tr"), ("Az akkumulátorom töltöttsége 14%", "Az akkumulátorom töltöttsége 14 százalék", "hu"), ("배터리 잔량이 14%입니다.", "배터리 잔량이 14 퍼센트입니다.", "ko"), diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 5ccb26c3..3583591f 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -396,7 +396,7 @@ class Xtts(BaseTTS): inference with config """ assert ( - language in self.config.languages + "zh-cn" if language == "zh" else language in self.config.languages ), f" ❗ Language {language} is not supported. Supported languages are {self.config.languages}" # Use generally found best tuning knobs for generation. settings = { From 7e4375da2bc463636384b0d3ac6d340747e531b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 16 Nov 2023 17:52:13 +0100 Subject: [PATCH 50/67] Update to v0.20.6 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 1b619f34..752e6303 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.5 +0.20.6 From fdf0c8b10a00404b51bdd62bf62231c0dbf4e50f Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 16 Nov 2023 23:40:21 +0100 Subject: [PATCH 51/67] chore(encoder): remove unused code --- TTS/encoder/utils/generic_utils.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/TTS/encoder/utils/generic_utils.py b/TTS/encoder/utils/generic_utils.py index 1da02961..bbce6a8a 100644 --- a/TTS/encoder/utils/generic_utils.py +++ b/TTS/encoder/utils/generic_utils.py @@ -2,7 +2,6 @@ import datetime import glob import os import random -import re import numpy as np from scipy import signal @@ -118,11 +117,6 @@ class AugmentWAV(object): return self.additive_noise(noise_type, audio) -def to_camel(text): - text = text.capitalize() - return re.sub(r"(?!^)_([a-zA-Z])", lambda m: m.group(1).upper(), text) - - def setup_encoder_model(config: "Coqpit"): if config.model_params["model_name"].lower() == "lstm": model = LSTMSpeakerEncoder( From 39fe38bda4d6937336255d32e542d4f84dd0fe15 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 16 Nov 2023 23:46:26 +0100 Subject: [PATCH 52/67] refactor: use save_fsspec() from Trainer --- TTS/encoder/utils/generic_utils.py | 2 +- TTS/encoder/utils/io.py | 2 +- TTS/utils/io.py | 13 +------------ 3 files changed, 3 insertions(+), 14 deletions(-) diff --git a/TTS/encoder/utils/generic_utils.py b/TTS/encoder/utils/generic_utils.py index bbce6a8a..2b003ac8 100644 --- a/TTS/encoder/utils/generic_utils.py +++ b/TTS/encoder/utils/generic_utils.py @@ -5,10 +5,10 @@ import random import numpy as np from scipy import signal +from trainer.io import save_fsspec from TTS.encoder.models.lstm import LSTMSpeakerEncoder from TTS.encoder.models.resnet import ResNetSpeakerEncoder -from TTS.utils.io import save_fsspec class AugmentWAV(object): diff --git a/TTS/encoder/utils/io.py b/TTS/encoder/utils/io.py index d1dad3e2..a8359be1 100644 --- a/TTS/encoder/utils/io.py +++ b/TTS/encoder/utils/io.py @@ -1,7 +1,7 @@ import datetime import os -from TTS.utils.io import save_fsspec +from trainer.io import save_fsspec def save_checkpoint(model, optimizer, model_loss, out_path, current_step): diff --git a/TTS/utils/io.py b/TTS/utils/io.py index e9bdf3e6..9ab1075c 100644 --- a/TTS/utils/io.py +++ b/TTS/utils/io.py @@ -8,6 +8,7 @@ from typing import Any, Callable, Dict, Union import fsspec import torch from coqpit import Coqpit +from trainer.io import save_fsspec from TTS.utils.generic_utils import get_user_data_dir @@ -102,18 +103,6 @@ def load_checkpoint( return model, state -def save_fsspec(state: Any, path: str, **kwargs): - """Like torch.save but can save to other locations (e.g. s3:// , gs://). - - Args: - state: State object to save - path: Any path or url supported by fsspec. - **kwargs: Keyword arguments forwarded to torch.save. - """ - with fsspec.open(path, "wb") as f: - torch.save(state, f, **kwargs) - - def save_model(config, model, optimizer, scaler, current_step, epoch, output_path, **kwargs): if hasattr(model, "module"): model_state = model.module.state_dict() From 5119e651a1dbccdc4e5fdb47dc386d33f378e621 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 16 Nov 2023 23:52:28 +0100 Subject: [PATCH 53/67] chore(utils.io): remove unused code These are all available in Trainer. --- TTS/utils/io.py | 104 ------------------------------------------------ 1 file changed, 104 deletions(-) diff --git a/TTS/utils/io.py b/TTS/utils/io.py index 9ab1075c..7aaedbe2 100644 --- a/TTS/utils/io.py +++ b/TTS/utils/io.py @@ -1,4 +1,3 @@ -import datetime import json import os import pickle as pickle_tts @@ -8,7 +7,6 @@ from typing import Any, Callable, Dict, Union import fsspec import torch from coqpit import Coqpit -from trainer.io import save_fsspec from TTS.utils.generic_utils import get_user_data_dir @@ -101,105 +99,3 @@ def load_checkpoint( if eval: model.eval() return model, state - - -def save_model(config, model, optimizer, scaler, current_step, epoch, output_path, **kwargs): - if hasattr(model, "module"): - model_state = model.module.state_dict() - else: - model_state = model.state_dict() - if isinstance(optimizer, list): - optimizer_state = [optim.state_dict() for optim in optimizer] - elif optimizer.__class__.__name__ == "CapacitronOptimizer": - optimizer_state = [optimizer.primary_optimizer.state_dict(), optimizer.secondary_optimizer.state_dict()] - else: - optimizer_state = optimizer.state_dict() if optimizer is not None else None - - if isinstance(scaler, list): - scaler_state = [s.state_dict() for s in scaler] - else: - scaler_state = scaler.state_dict() if scaler is not None else None - - if isinstance(config, Coqpit): - config = config.to_dict() - - state = { - "config": config, - "model": model_state, - "optimizer": optimizer_state, - "scaler": scaler_state, - "step": current_step, - "epoch": epoch, - "date": datetime.date.today().strftime("%B %d, %Y"), - } - state.update(kwargs) - save_fsspec(state, output_path) - - -def save_checkpoint( - config, - model, - optimizer, - scaler, - current_step, - epoch, - output_folder, - **kwargs, -): - file_name = "checkpoint_{}.pth".format(current_step) - checkpoint_path = os.path.join(output_folder, file_name) - print("\n > CHECKPOINT : {}".format(checkpoint_path)) - save_model( - config, - model, - optimizer, - scaler, - current_step, - epoch, - checkpoint_path, - **kwargs, - ) - - -def save_best_model( - current_loss, - best_loss, - config, - model, - optimizer, - scaler, - current_step, - epoch, - out_path, - keep_all_best=False, - keep_after=10000, - **kwargs, -): - if current_loss < best_loss: - best_model_name = f"best_model_{current_step}.pth" - checkpoint_path = os.path.join(out_path, best_model_name) - print(" > BEST MODEL : {}".format(checkpoint_path)) - save_model( - config, - model, - optimizer, - scaler, - current_step, - epoch, - checkpoint_path, - model_loss=current_loss, - **kwargs, - ) - fs = fsspec.get_mapper(out_path).fs - # only delete previous if current is saved successfully - if not keep_all_best or (current_step < keep_after): - model_names = fs.glob(os.path.join(out_path, "best_model*.pth")) - for model_name in model_names: - if os.path.basename(model_name) != best_model_name: - fs.rm(model_name) - # create a shortcut which always points to the currently best model - shortcut_name = "best_model.pth" - shortcut_path = os.path.join(out_path, shortcut_name) - fs.copy(checkpoint_path, shortcut_path) - best_loss = current_loss - return best_loss From 96678c7ba227871d0929f2366d083219ccfa9262 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Fri, 17 Nov 2023 00:12:09 +0100 Subject: [PATCH 54/67] refactor: use copy_model_files() from Trainer --- TTS/bin/train_encoder.py | 4 ++-- TTS/encoder/utils/training.py | 2 +- TTS/utils/io.py | 31 ------------------------------- 3 files changed, 3 insertions(+), 34 deletions(-) diff --git a/TTS/bin/train_encoder.py b/TTS/bin/train_encoder.py index f2e7779c..c4fb920f 100644 --- a/TTS/bin/train_encoder.py +++ b/TTS/bin/train_encoder.py @@ -8,6 +8,7 @@ import traceback import torch from torch.utils.data import DataLoader +from trainer.io import copy_model_files from trainer.torch import NoamLR from trainer.trainer_utils import get_optimizer @@ -18,7 +19,6 @@ from TTS.encoder.utils.visual import plot_embeddings from TTS.tts.datasets import load_tts_samples from TTS.utils.audio import AudioProcessor from TTS.utils.generic_utils import count_parameters, remove_experiment_folder -from TTS.utils.io import copy_model_files from TTS.utils.samplers import PerfectBatchSampler from TTS.utils.training import check_update @@ -276,7 +276,7 @@ def main(args): # pylint: disable=redefined-outer-name if c.loss == "softmaxproto" and c.model != "speaker_encoder": c.map_classid_to_classname = map_classid_to_classname - copy_model_files(c, OUT_PATH) + copy_model_files(c, OUT_PATH, new_fields={}) if args.restore_path: criterion, args.restore_step = model.load_checkpoint( diff --git a/TTS/encoder/utils/training.py b/TTS/encoder/utils/training.py index 7c58a232..ff8f271d 100644 --- a/TTS/encoder/utils/training.py +++ b/TTS/encoder/utils/training.py @@ -3,13 +3,13 @@ from dataclasses import dataclass, field from coqpit import Coqpit from trainer import TrainerArgs, get_last_checkpoint +from trainer.io import copy_model_files from trainer.logging import logger_factory from trainer.logging.console_logger import ConsoleLogger from TTS.config import load_config, register_config from TTS.tts.utils.text.characters import parse_symbols from TTS.utils.generic_utils import get_experiment_folder_path, get_git_branch -from TTS.utils.io import copy_model_files @dataclass diff --git a/TTS/utils/io.py b/TTS/utils/io.py index 7aaedbe2..3107ba66 100644 --- a/TTS/utils/io.py +++ b/TTS/utils/io.py @@ -1,12 +1,9 @@ -import json import os import pickle as pickle_tts -import shutil from typing import Any, Callable, Dict, Union import fsspec import torch -from coqpit import Coqpit from TTS.utils.generic_utils import get_user_data_dir @@ -27,34 +24,6 @@ class AttrDict(dict): self.__dict__ = self -def copy_model_files(config: Coqpit, out_path, new_fields=None): - """Copy config.json and other model files to training folder and add - new fields. - - Args: - config (Coqpit): Coqpit config defining the training run. - out_path (str): output path to copy the file. - new_fields (dict): new fileds to be added or edited - in the config file. - """ - copy_config_path = os.path.join(out_path, "config.json") - # add extra information fields - if new_fields: - config.update(new_fields, allow_new=True) - # TODO: Revert to config.save_json() once Coqpit supports arbitrary paths. - with fsspec.open(copy_config_path, "w", encoding="utf8") as f: - json.dump(config.to_dict(), f, indent=4) - - # copy model stats file if available - if config.audio.stats_path is not None: - copy_stats_path = os.path.join(out_path, "scale_stats.npy") - filesystem = fsspec.get_mapper(copy_stats_path).fs - if not filesystem.exists(copy_stats_path): - with fsspec.open(config.audio.stats_path, "rb") as source_file: - with fsspec.open(copy_stats_path, "wb") as target_file: - shutil.copyfileobj(source_file, target_file) - - def load_fsspec( path: str, map_location: Union[str, Callable, torch.device, Dict[Union[str, torch.device], Union[str, torch.device]]] = None, From 0fb0d67de7bd05ef4afd80f05e242217e9800c80 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Fri, 17 Nov 2023 00:39:11 +0100 Subject: [PATCH 55/67] refactor: use save_checkpoint()/save_best_model() from Trainer --- TTS/bin/train_encoder.py | 21 +++++++++--- TTS/encoder/utils/generic_utils.py | 40 ----------------------- TTS/encoder/utils/io.py | 38 --------------------- tests/aux_tests/test_embedding_manager.py | 4 +-- tests/aux_tests/test_speaker_manager.py | 4 +-- tests/inference_tests/test_synthesizer.py | 3 +- 6 files changed, 23 insertions(+), 87 deletions(-) delete mode 100644 TTS/encoder/utils/io.py diff --git a/TTS/bin/train_encoder.py b/TTS/bin/train_encoder.py index c4fb920f..448fefc7 100644 --- a/TTS/bin/train_encoder.py +++ b/TTS/bin/train_encoder.py @@ -8,12 +8,12 @@ import traceback import torch from torch.utils.data import DataLoader -from trainer.io import copy_model_files +from trainer.io import copy_model_files, save_best_model, save_checkpoint from trainer.torch import NoamLR from trainer.trainer_utils import get_optimizer from TTS.encoder.dataset import EncoderDataset -from TTS.encoder.utils.generic_utils import save_best_model, save_checkpoint, setup_encoder_model +from TTS.encoder.utils.generic_utils import setup_encoder_model from TTS.encoder.utils.training import init_training from TTS.encoder.utils.visual import plot_embeddings from TTS.tts.datasets import load_tts_samples @@ -222,7 +222,9 @@ def train(model, optimizer, scheduler, criterion, data_loader, eval_data_loader, if global_step % c.save_step == 0: # save model - save_checkpoint(model, optimizer, criterion, loss.item(), OUT_PATH, global_step, epoch) + save_checkpoint( + c, model, optimizer, None, global_step, epoch, OUT_PATH, criterion=criterion.state_dict() + ) end_time = time.time() @@ -245,7 +247,18 @@ def train(model, optimizer, scheduler, criterion, data_loader, eval_data_loader, flush=True, ) # save the best checkpoint - best_loss = save_best_model(model, optimizer, criterion, eval_loss, best_loss, OUT_PATH, global_step, epoch) + best_loss = save_best_model( + eval_loss, + best_loss, + c, + model, + optimizer, + None, + global_step, + epoch, + OUT_PATH, + criterion=criterion.state_dict(), + ) model.train() return best_loss, global_step diff --git a/TTS/encoder/utils/generic_utils.py b/TTS/encoder/utils/generic_utils.py index 2b003ac8..236d6fe9 100644 --- a/TTS/encoder/utils/generic_utils.py +++ b/TTS/encoder/utils/generic_utils.py @@ -1,11 +1,9 @@ -import datetime import glob import os import random import numpy as np from scipy import signal -from trainer.io import save_fsspec from TTS.encoder.models.lstm import LSTMSpeakerEncoder from TTS.encoder.models.resnet import ResNetSpeakerEncoder @@ -136,41 +134,3 @@ def setup_encoder_model(config: "Coqpit"): audio_config=config.audio, ) return model - - -def save_checkpoint(model, optimizer, criterion, model_loss, out_path, current_step, epoch): - checkpoint_path = "checkpoint_{}.pth".format(current_step) - checkpoint_path = os.path.join(out_path, checkpoint_path) - print(" | | > Checkpoint saving : {}".format(checkpoint_path)) - - new_state_dict = model.state_dict() - state = { - "model": new_state_dict, - "optimizer": optimizer.state_dict() if optimizer is not None else None, - "criterion": criterion.state_dict(), - "step": current_step, - "epoch": epoch, - "loss": model_loss, - "date": datetime.date.today().strftime("%B %d, %Y"), - } - save_fsspec(state, checkpoint_path) - - -def save_best_model(model, optimizer, criterion, model_loss, best_loss, out_path, current_step, epoch): - if model_loss < best_loss: - new_state_dict = model.state_dict() - state = { - "model": new_state_dict, - "optimizer": optimizer.state_dict(), - "criterion": criterion.state_dict(), - "step": current_step, - "epoch": epoch, - "loss": model_loss, - "date": datetime.date.today().strftime("%B %d, %Y"), - } - best_loss = model_loss - bestmodel_path = "best_model.pth" - bestmodel_path = os.path.join(out_path, bestmodel_path) - print("\n > BEST MODEL ({0:.5f}) : {1:}".format(model_loss, bestmodel_path)) - save_fsspec(state, bestmodel_path) - return best_loss diff --git a/TTS/encoder/utils/io.py b/TTS/encoder/utils/io.py deleted file mode 100644 index a8359be1..00000000 --- a/TTS/encoder/utils/io.py +++ /dev/null @@ -1,38 +0,0 @@ -import datetime -import os - -from trainer.io import save_fsspec - - -def save_checkpoint(model, optimizer, model_loss, out_path, current_step): - checkpoint_path = "checkpoint_{}.pth".format(current_step) - checkpoint_path = os.path.join(out_path, checkpoint_path) - print(" | | > Checkpoint saving : {}".format(checkpoint_path)) - - new_state_dict = model.state_dict() - state = { - "model": new_state_dict, - "optimizer": optimizer.state_dict() if optimizer is not None else None, - "step": current_step, - "loss": model_loss, - "date": datetime.date.today().strftime("%B %d, %Y"), - } - save_fsspec(state, checkpoint_path) - - -def save_best_model(model, optimizer, model_loss, best_loss, out_path, current_step): - if model_loss < best_loss: - new_state_dict = model.state_dict() - state = { - "model": new_state_dict, - "optimizer": optimizer.state_dict(), - "step": current_step, - "loss": model_loss, - "date": datetime.date.today().strftime("%B %d, %Y"), - } - best_loss = model_loss - bestmodel_path = "best_model.pth" - bestmodel_path = os.path.join(out_path, bestmodel_path) - print("\n > BEST MODEL ({0:.5f}) : {1:}".format(model_loss, bestmodel_path)) - save_fsspec(state, bestmodel_path) - return best_loss diff --git a/tests/aux_tests/test_embedding_manager.py b/tests/aux_tests/test_embedding_manager.py index 73921501..e3acd62b 100644 --- a/tests/aux_tests/test_embedding_manager.py +++ b/tests/aux_tests/test_embedding_manager.py @@ -3,11 +3,11 @@ import unittest import numpy as np import torch +from trainer.io import save_checkpoint from tests import get_tests_input_path from TTS.config import load_config from TTS.encoder.utils.generic_utils import setup_encoder_model -from TTS.encoder.utils.io import save_checkpoint from TTS.tts.utils.managers import EmbeddingManager from TTS.utils.audio import AudioProcessor @@ -31,7 +31,7 @@ class EmbeddingManagerTest(unittest.TestCase): # create a dummy speaker encoder model = setup_encoder_model(config) - save_checkpoint(model, None, None, get_tests_input_path(), 0) + save_checkpoint(config, model, None, None, 0, 0, get_tests_input_path()) # load audio processor and speaker encoder manager = EmbeddingManager(encoder_model_path=encoder_model_path, encoder_config_path=encoder_config_path) diff --git a/tests/aux_tests/test_speaker_manager.py b/tests/aux_tests/test_speaker_manager.py index 397f9c81..402fbca4 100644 --- a/tests/aux_tests/test_speaker_manager.py +++ b/tests/aux_tests/test_speaker_manager.py @@ -3,11 +3,11 @@ import unittest import numpy as np import torch +from trainer.io import save_checkpoint from tests import get_tests_input_path from TTS.config import load_config from TTS.encoder.utils.generic_utils import setup_encoder_model -from TTS.encoder.utils.io import save_checkpoint from TTS.tts.utils.speakers import SpeakerManager from TTS.utils.audio import AudioProcessor @@ -30,7 +30,7 @@ class SpeakerManagerTest(unittest.TestCase): # create a dummy speaker encoder model = setup_encoder_model(config) - save_checkpoint(model, None, None, get_tests_input_path(), 0) + save_checkpoint(config, model, None, None, 0, 0, get_tests_input_path()) # load audio processor and speaker encoder ap = AudioProcessor(**config.audio) diff --git a/tests/inference_tests/test_synthesizer.py b/tests/inference_tests/test_synthesizer.py index 40e83017..ce4fc751 100644 --- a/tests/inference_tests/test_synthesizer.py +++ b/tests/inference_tests/test_synthesizer.py @@ -1,10 +1,11 @@ import os import unittest +from trainer.io import save_checkpoint + from tests import get_tests_input_path from TTS.config import load_config from TTS.tts.models import setup_model -from TTS.utils.io import save_checkpoint from TTS.utils.synthesizer import Synthesizer From 63d71456470651d122ace6b67135b3728bfde66b Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 12:10:46 +0100 Subject: [PATCH 56/67] Update versions --- requirements.txt | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/requirements.txt b/requirements.txt index 836de40a..86421511 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,31 +1,31 @@ # core deps numpy==1.22.0;python_version<="3.10" -numpy==1.24.3;python_version>"3.10" -cython==0.29.30 +numpy>=1.24.3;python_version>"3.10" +cython>=0.29.30 scipy>=1.11.2 torch>=2.1 torchaudio -soundfile==0.12.* -librosa==0.10.* -scikit-learn==1.3.0 +soundfile>=0.12.0 +librosa>=0.10.0 +scikit-learn>=1.3.0 numba==0.55.1;python_version<"3.9" -numba==0.57.0;python_version>="3.9" -inflect==5.6.* -tqdm==4.64.* -anyascii==0.3.* -pyyaml==6.* -fsspec==2023.6.0 # <= 2023.9.1 makes aux tests fail -aiohttp==3.8.* -packaging==23.1 +numba>=0.57.0;python_version>="3.9" +inflect>=5.6.0 +tqdm>=4.64.1 +anyascii>=0.3.0 +pyyaml>=6.0 +fsspec>=2023.6.0 # <= 2023.9.1 makes aux tests fail +aiohttp>=3.8.1 +packaging>=23.1 # deps for examples -flask==2.* +flask>=2.0.1 # deps for inference -pysbd==0.3.4 +pysbd>=0.3.4 # deps for notebooks -umap-learn==0.5.* +umap-learn>=0.5.1 pandas>=1.4,<2.0 # deps for training -matplotlib==3.7.* +matplotlib>=3.7.0 # coqui stack trainer # config management @@ -47,11 +47,11 @@ bnnumerizer bnunicodenormalizer #deps for tortoise k_diffusion -einops==0.6.* -transformers==4.33.* +einops>=0.6.0 +transformers>=4.33.0 #deps for bark -encodec==0.1.* +encodec>=0.1.1 # deps for XTTS -unidecode==1.3.* +unidecode>=1.3.2 num2words spacy[ja]>=3 \ No newline at end of file From 08d11e91987e3af3c8d1e423715e1decfa99f756 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:01:32 +0100 Subject: [PATCH 57/67] Update CI version --- .github/workflows/pypi-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml index 49a5b300..2bbcf3cd 100644 --- a/.github/workflows/pypi-release.yml +++ b/.github/workflows/pypi-release.yml @@ -10,7 +10,7 @@ jobs: build-sdist: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Verify tag matches version run: | set -ex @@ -38,7 +38,7 @@ jobs: matrix: python-version: ["3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} From 26efdf6ee7feaed7a6b926d3237a393e97814754 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:42:33 +0100 Subject: [PATCH 58/67] Make k_diffusion optional --- TTS/tts/layers/tortoise/diffusion.py | 13 +++++++++++-- requirements.txt | 1 - 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/TTS/tts/layers/tortoise/diffusion.py b/TTS/tts/layers/tortoise/diffusion.py index cb350af7..fcdaa9d7 100644 --- a/TTS/tts/layers/tortoise/diffusion.py +++ b/TTS/tts/layers/tortoise/diffusion.py @@ -13,12 +13,19 @@ import math import numpy as np import torch import torch as th -from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral from tqdm import tqdm from TTS.tts.layers.tortoise.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper -K_DIFFUSION_SAMPLERS = {"k_euler_a": sample_euler_ancestral, "dpm++2m": sample_dpmpp_2m} + +try: + from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral + + K_DIFFUSION_SAMPLERS = {"k_euler_a": sample_euler_ancestral, "dpm++2m": sample_dpmpp_2m} +except ImportError: + K_DIFFUSION_SAMPLERS = None + + SAMPLERS = ["dpm++2m", "p", "ddim"] @@ -531,6 +538,8 @@ class GaussianDiffusion: if self.conditioning_free is not True: raise RuntimeError("cond_free must be true") with tqdm(total=self.num_timesteps) as pbar: + if K_DIFFUSION_SAMPLERS is None: + raise ModuleNotFoundError("Install k_diffusion for using k_diffusion samplers") return self.k_diffusion_sample_loop(K_DIFFUSION_SAMPLERS[s], pbar, *args, **kwargs) else: raise RuntimeError("sampler not impl") diff --git a/requirements.txt b/requirements.txt index 86421511..ce0e5d92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,6 @@ bangla bnnumerizer bnunicodenormalizer #deps for tortoise -k_diffusion einops>=0.6.0 transformers>=4.33.0 #deps for bark From 44880f09ed6e4accfb9794a44cc5cf1c383ccc34 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:43:34 +0100 Subject: [PATCH 59/67] Make style --- TTS/tts/layers/tortoise/diffusion.py | 1 - TTS/tts/layers/xtts/gpt.py | 4 +++- TTS/tts/layers/xtts/tokenizer.py | 23 ++++++++++++----------- TTS/tts/models/xtts.py | 14 +++++--------- tests/zoo_tests/test_models.py | 4 ++-- 5 files changed, 22 insertions(+), 24 deletions(-) diff --git a/TTS/tts/layers/tortoise/diffusion.py b/TTS/tts/layers/tortoise/diffusion.py index fcdaa9d7..7bea02ca 100644 --- a/TTS/tts/layers/tortoise/diffusion.py +++ b/TTS/tts/layers/tortoise/diffusion.py @@ -17,7 +17,6 @@ from tqdm import tqdm from TTS.tts.layers.tortoise.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper - try: from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral diff --git a/TTS/tts/layers/xtts/gpt.py b/TTS/tts/layers/xtts/gpt.py index d914ebf9..e7b186b8 100644 --- a/TTS/tts/layers/xtts/gpt.py +++ b/TTS/tts/layers/xtts/gpt.py @@ -441,7 +441,9 @@ class GPT(nn.Module): audio_codes = F.pad(audio_codes[:, :max_mel_len], (0, 1), value=self.stop_audio_token) # Pad mel codes with stop_audio_token - audio_codes = self.set_mel_padding(audio_codes, code_lengths - 3) # -3 to get the real code lengths without consider start and stop tokens that was not added yet + audio_codes = self.set_mel_padding( + audio_codes, code_lengths - 3 + ) # -3 to get the real code lengths without consider start and stop tokens that was not added yet # Build input and target tensors # Prepend start token to inputs and append stop token to targets diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 1ef655a3..52848743 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -1,23 +1,22 @@ import os import re -import torch -import pypinyin import textwrap - from functools import cached_property + +import pypinyin +import torch from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words +from spacy.lang.ar import Arabic +from spacy.lang.en import English +from spacy.lang.es import Spanish +from spacy.lang.ja import Japanese +from spacy.lang.zh import Chinese from tokenizers import Tokenizer from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words -from spacy.lang.en import English -from spacy.lang.zh import Chinese -from spacy.lang.ja import Japanese -from spacy.lang.ar import Arabic -from spacy.lang.es import Spanish - def get_spacy_lang(lang): if lang == "zh": @@ -32,6 +31,7 @@ def get_spacy_lang(lang): # For most languages, Enlish does the job return English() + def split_sentence(text, lang, text_split_length=250): """Preprocess the input text""" text_splits = [] @@ -67,6 +67,7 @@ def split_sentence(text, lang, text_split_length=250): return text_splits + _whitespace_re = re.compile(r"\s+") # List of (regular expression, replacement) pairs for abbreviations: @@ -619,7 +620,7 @@ class VoiceBpeTokenizer: return cutlet.Cutlet() def check_input_length(self, txt, lang): - lang = lang.split("-")[0] # remove the region + lang = lang.split("-")[0] # remove the region limit = self.char_limits.get(lang, 250) if len(txt) > limit: print( @@ -640,7 +641,7 @@ class VoiceBpeTokenizer: return txt def encode(self, txt, lang): - lang = lang.split("-")[0] # remove the region + lang = lang.split("-")[0] # remove the region self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) lang = "zh-cn" if lang == "zh" else lang diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 3583591f..208ec4d5 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -513,13 +513,13 @@ class Xtts(BaseTTS): enable_text_splitting=False, **hf_generate_kwargs, ): - language = language.split("-")[0] # remove the country code + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) if enable_text_splitting: text = split_sentence(text, language, self.tokenizer.char_limits[language]) else: text = [text] - + wavs = [] gpt_latents_list = [] for sent in text: @@ -563,9 +563,7 @@ class Xtts(BaseTTS): if length_scale != 1.0: gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" + gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear" ).transpose(1, 2) gpt_latents_list.append(gpt_latents.cpu()) @@ -623,7 +621,7 @@ class Xtts(BaseTTS): enable_text_splitting=False, **hf_generate_kwargs, ): - language = language.split("-")[0] # remove the country code + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) if enable_text_splitting: text = split_sentence(text, language, self.tokenizer.char_limits[language]) @@ -675,9 +673,7 @@ class Xtts(BaseTTS): gpt_latents = torch.cat(all_latents, dim=0)[None, :] if length_scale != 1.0: gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" + gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear" ).transpose(1, 2) wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index a5aad5c1..8fa56e28 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -186,7 +186,7 @@ def test_xtts_v2_streaming(): "en", gpt_cond_latent, speaker_embedding, - speed=1.5 + speed=1.5, ) wav_chuncks = [] for i, chunk in enumerate(chunks): @@ -198,7 +198,7 @@ def test_xtts_v2_streaming(): "en", gpt_cond_latent, speaker_embedding, - speed=0.66 + speed=0.66, ) wav_chuncks = [] for i, chunk in enumerate(chunks): From 11283fce07fc3cc113bb4b207a086ce217fba0c0 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Fri, 17 Nov 2023 11:13:46 -0300 Subject: [PATCH 60/67] Ensures that only GPT model is in training mode during XTTS GPT training (#3241) * Ensures that only GPT model is in training mode during training * Fix parallel wavegan unit test --- TTS/tts/layers/xtts/trainer/gpt_trainer.py | 7 ++++--- TTS/vocoder/configs/parallel_wavegan_config.py | 1 + requirements.txt | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/TTS/tts/layers/xtts/trainer/gpt_trainer.py b/TTS/tts/layers/xtts/trainer/gpt_trainer.py index 005b30be..4789e1f4 100644 --- a/TTS/tts/layers/xtts/trainer/gpt_trainer.py +++ b/TTS/tts/layers/xtts/trainer/gpt_trainer.py @@ -318,9 +318,10 @@ class GPTTrainer(BaseTTS): batch["cond_idxs"] = None return self.train_step(batch, criterion) - def on_epoch_start(self, trainer): # pylint: disable=W0613 - # guarante that dvae will be in eval mode after .train() on evaluation end - self.dvae = self.dvae.eval() + def on_train_epoch_start(self, trainer): + trainer.model.eval() # the whole model to eval + # put gpt model in training mode + trainer.model.xtts.gpt.train() def on_init_end(self, trainer): # pylint: disable=W0613 # ignore similarities.pth on clearml save/upload diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py index 7845dd6b..6059d7f0 100644 --- a/TTS/vocoder/configs/parallel_wavegan_config.py +++ b/TTS/vocoder/configs/parallel_wavegan_config.py @@ -94,6 +94,7 @@ class ParallelWaveganConfig(BaseGANVocoderConfig): use_noise_augment: bool = False use_cache: bool = True steps_to_start_discriminator: int = 200000 + target_loss: str = "loss_1" # LOSS PARAMETERS - overrides use_stft_loss: bool = True diff --git a/requirements.txt b/requirements.txt index ce0e5d92..1f7a44f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,7 +27,7 @@ pandas>=1.4,<2.0 # deps for training matplotlib>=3.7.0 # coqui stack -trainer +trainer>=0.0.32 # config management coqpit>=0.0.16 # chinese g2p deps From c864acf2b7d280d553b01601a35954ba0a366ab3 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 12:10:46 +0100 Subject: [PATCH 61/67] Update versions --- requirements.txt | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/requirements.txt b/requirements.txt index 836de40a..86421511 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,31 +1,31 @@ # core deps numpy==1.22.0;python_version<="3.10" -numpy==1.24.3;python_version>"3.10" -cython==0.29.30 +numpy>=1.24.3;python_version>"3.10" +cython>=0.29.30 scipy>=1.11.2 torch>=2.1 torchaudio -soundfile==0.12.* -librosa==0.10.* -scikit-learn==1.3.0 +soundfile>=0.12.0 +librosa>=0.10.0 +scikit-learn>=1.3.0 numba==0.55.1;python_version<"3.9" -numba==0.57.0;python_version>="3.9" -inflect==5.6.* -tqdm==4.64.* -anyascii==0.3.* -pyyaml==6.* -fsspec==2023.6.0 # <= 2023.9.1 makes aux tests fail -aiohttp==3.8.* -packaging==23.1 +numba>=0.57.0;python_version>="3.9" +inflect>=5.6.0 +tqdm>=4.64.1 +anyascii>=0.3.0 +pyyaml>=6.0 +fsspec>=2023.6.0 # <= 2023.9.1 makes aux tests fail +aiohttp>=3.8.1 +packaging>=23.1 # deps for examples -flask==2.* +flask>=2.0.1 # deps for inference -pysbd==0.3.4 +pysbd>=0.3.4 # deps for notebooks -umap-learn==0.5.* +umap-learn>=0.5.1 pandas>=1.4,<2.0 # deps for training -matplotlib==3.7.* +matplotlib>=3.7.0 # coqui stack trainer # config management @@ -47,11 +47,11 @@ bnnumerizer bnunicodenormalizer #deps for tortoise k_diffusion -einops==0.6.* -transformers==4.33.* +einops>=0.6.0 +transformers>=4.33.0 #deps for bark -encodec==0.1.* +encodec>=0.1.1 # deps for XTTS -unidecode==1.3.* +unidecode>=1.3.2 num2words spacy[ja]>=3 \ No newline at end of file From 44494daa27c5b8e1abd424c4ad5f003e3556ec73 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:01:32 +0100 Subject: [PATCH 62/67] Update CI version --- .github/workflows/pypi-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml index 49a5b300..2bbcf3cd 100644 --- a/.github/workflows/pypi-release.yml +++ b/.github/workflows/pypi-release.yml @@ -10,7 +10,7 @@ jobs: build-sdist: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Verify tag matches version run: | set -ex @@ -38,7 +38,7 @@ jobs: matrix: python-version: ["3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} From f21067a84a2236d1d8c7c1a8d91ef6704f2dce31 Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:42:33 +0100 Subject: [PATCH 63/67] Make k_diffusion optional --- TTS/tts/layers/tortoise/diffusion.py | 13 +++++++++++-- requirements.txt | 1 - 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/TTS/tts/layers/tortoise/diffusion.py b/TTS/tts/layers/tortoise/diffusion.py index cb350af7..fcdaa9d7 100644 --- a/TTS/tts/layers/tortoise/diffusion.py +++ b/TTS/tts/layers/tortoise/diffusion.py @@ -13,12 +13,19 @@ import math import numpy as np import torch import torch as th -from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral from tqdm import tqdm from TTS.tts.layers.tortoise.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper -K_DIFFUSION_SAMPLERS = {"k_euler_a": sample_euler_ancestral, "dpm++2m": sample_dpmpp_2m} + +try: + from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral + + K_DIFFUSION_SAMPLERS = {"k_euler_a": sample_euler_ancestral, "dpm++2m": sample_dpmpp_2m} +except ImportError: + K_DIFFUSION_SAMPLERS = None + + SAMPLERS = ["dpm++2m", "p", "ddim"] @@ -531,6 +538,8 @@ class GaussianDiffusion: if self.conditioning_free is not True: raise RuntimeError("cond_free must be true") with tqdm(total=self.num_timesteps) as pbar: + if K_DIFFUSION_SAMPLERS is None: + raise ModuleNotFoundError("Install k_diffusion for using k_diffusion samplers") return self.k_diffusion_sample_loop(K_DIFFUSION_SAMPLERS[s], pbar, *args, **kwargs) else: raise RuntimeError("sampler not impl") diff --git a/requirements.txt b/requirements.txt index 86421511..ce0e5d92 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,7 +46,6 @@ bangla bnnumerizer bnunicodenormalizer #deps for tortoise -k_diffusion einops>=0.6.0 transformers>=4.33.0 #deps for bark From a3279f92942b2d2d0d7a628e79f2002ef5ea88eb Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 13:43:34 +0100 Subject: [PATCH 64/67] Make style --- TTS/tts/layers/tortoise/diffusion.py | 1 - TTS/tts/layers/xtts/gpt.py | 4 +++- TTS/tts/layers/xtts/tokenizer.py | 23 ++++++++++++----------- TTS/tts/models/xtts.py | 14 +++++--------- tests/zoo_tests/test_models.py | 4 ++-- 5 files changed, 22 insertions(+), 24 deletions(-) diff --git a/TTS/tts/layers/tortoise/diffusion.py b/TTS/tts/layers/tortoise/diffusion.py index fcdaa9d7..7bea02ca 100644 --- a/TTS/tts/layers/tortoise/diffusion.py +++ b/TTS/tts/layers/tortoise/diffusion.py @@ -17,7 +17,6 @@ from tqdm import tqdm from TTS.tts.layers.tortoise.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper - try: from k_diffusion.sampling import sample_dpmpp_2m, sample_euler_ancestral diff --git a/TTS/tts/layers/xtts/gpt.py b/TTS/tts/layers/xtts/gpt.py index d914ebf9..e7b186b8 100644 --- a/TTS/tts/layers/xtts/gpt.py +++ b/TTS/tts/layers/xtts/gpt.py @@ -441,7 +441,9 @@ class GPT(nn.Module): audio_codes = F.pad(audio_codes[:, :max_mel_len], (0, 1), value=self.stop_audio_token) # Pad mel codes with stop_audio_token - audio_codes = self.set_mel_padding(audio_codes, code_lengths - 3) # -3 to get the real code lengths without consider start and stop tokens that was not added yet + audio_codes = self.set_mel_padding( + audio_codes, code_lengths - 3 + ) # -3 to get the real code lengths without consider start and stop tokens that was not added yet # Build input and target tensors # Prepend start token to inputs and append stop token to targets diff --git a/TTS/tts/layers/xtts/tokenizer.py b/TTS/tts/layers/xtts/tokenizer.py index 1ef655a3..52848743 100644 --- a/TTS/tts/layers/xtts/tokenizer.py +++ b/TTS/tts/layers/xtts/tokenizer.py @@ -1,23 +1,22 @@ import os import re -import torch -import pypinyin import textwrap - from functools import cached_property + +import pypinyin +import torch from hangul_romanize import Transliter from hangul_romanize.rule import academic from num2words import num2words +from spacy.lang.ar import Arabic +from spacy.lang.en import English +from spacy.lang.es import Spanish +from spacy.lang.ja import Japanese +from spacy.lang.zh import Chinese from tokenizers import Tokenizer from TTS.tts.layers.xtts.zh_num2words import TextNorm as zh_num2words -from spacy.lang.en import English -from spacy.lang.zh import Chinese -from spacy.lang.ja import Japanese -from spacy.lang.ar import Arabic -from spacy.lang.es import Spanish - def get_spacy_lang(lang): if lang == "zh": @@ -32,6 +31,7 @@ def get_spacy_lang(lang): # For most languages, Enlish does the job return English() + def split_sentence(text, lang, text_split_length=250): """Preprocess the input text""" text_splits = [] @@ -67,6 +67,7 @@ def split_sentence(text, lang, text_split_length=250): return text_splits + _whitespace_re = re.compile(r"\s+") # List of (regular expression, replacement) pairs for abbreviations: @@ -619,7 +620,7 @@ class VoiceBpeTokenizer: return cutlet.Cutlet() def check_input_length(self, txt, lang): - lang = lang.split("-")[0] # remove the region + lang = lang.split("-")[0] # remove the region limit = self.char_limits.get(lang, 250) if len(txt) > limit: print( @@ -640,7 +641,7 @@ class VoiceBpeTokenizer: return txt def encode(self, txt, lang): - lang = lang.split("-")[0] # remove the region + lang = lang.split("-")[0] # remove the region self.check_input_length(txt, lang) txt = self.preprocess_text(txt, lang) lang = "zh-cn" if lang == "zh" else lang diff --git a/TTS/tts/models/xtts.py b/TTS/tts/models/xtts.py index 3583591f..208ec4d5 100644 --- a/TTS/tts/models/xtts.py +++ b/TTS/tts/models/xtts.py @@ -513,13 +513,13 @@ class Xtts(BaseTTS): enable_text_splitting=False, **hf_generate_kwargs, ): - language = language.split("-")[0] # remove the country code + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) if enable_text_splitting: text = split_sentence(text, language, self.tokenizer.char_limits[language]) else: text = [text] - + wavs = [] gpt_latents_list = [] for sent in text: @@ -563,9 +563,7 @@ class Xtts(BaseTTS): if length_scale != 1.0: gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" + gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear" ).transpose(1, 2) gpt_latents_list.append(gpt_latents.cpu()) @@ -623,7 +621,7 @@ class Xtts(BaseTTS): enable_text_splitting=False, **hf_generate_kwargs, ): - language = language.split("-")[0] # remove the country code + language = language.split("-")[0] # remove the country code length_scale = 1.0 / max(speed, 0.05) if enable_text_splitting: text = split_sentence(text, language, self.tokenizer.char_limits[language]) @@ -675,9 +673,7 @@ class Xtts(BaseTTS): gpt_latents = torch.cat(all_latents, dim=0)[None, :] if length_scale != 1.0: gpt_latents = F.interpolate( - gpt_latents.transpose(1, 2), - scale_factor=length_scale, - mode="linear" + gpt_latents.transpose(1, 2), scale_factor=length_scale, mode="linear" ).transpose(1, 2) wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( diff --git a/tests/zoo_tests/test_models.py b/tests/zoo_tests/test_models.py index a5aad5c1..8fa56e28 100644 --- a/tests/zoo_tests/test_models.py +++ b/tests/zoo_tests/test_models.py @@ -186,7 +186,7 @@ def test_xtts_v2_streaming(): "en", gpt_cond_latent, speaker_embedding, - speed=1.5 + speed=1.5, ) wav_chuncks = [] for i, chunk in enumerate(chunks): @@ -198,7 +198,7 @@ def test_xtts_v2_streaming(): "en", gpt_cond_latent, speaker_embedding, - speed=0.66 + speed=0.66, ) wav_chuncks = [] for i, chunk in enumerate(chunks): From 6075fa208c4f508bd9b629d13b99800724899502 Mon Sep 17 00:00:00 2001 From: Edresson Casanova Date: Fri, 17 Nov 2023 11:13:46 -0300 Subject: [PATCH 65/67] Ensures that only GPT model is in training mode during XTTS GPT training (#3241) * Ensures that only GPT model is in training mode during training * Fix parallel wavegan unit test --- TTS/tts/layers/xtts/trainer/gpt_trainer.py | 7 ++++--- TTS/vocoder/configs/parallel_wavegan_config.py | 1 + requirements.txt | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/TTS/tts/layers/xtts/trainer/gpt_trainer.py b/TTS/tts/layers/xtts/trainer/gpt_trainer.py index 005b30be..4789e1f4 100644 --- a/TTS/tts/layers/xtts/trainer/gpt_trainer.py +++ b/TTS/tts/layers/xtts/trainer/gpt_trainer.py @@ -318,9 +318,10 @@ class GPTTrainer(BaseTTS): batch["cond_idxs"] = None return self.train_step(batch, criterion) - def on_epoch_start(self, trainer): # pylint: disable=W0613 - # guarante that dvae will be in eval mode after .train() on evaluation end - self.dvae = self.dvae.eval() + def on_train_epoch_start(self, trainer): + trainer.model.eval() # the whole model to eval + # put gpt model in training mode + trainer.model.xtts.gpt.train() def on_init_end(self, trainer): # pylint: disable=W0613 # ignore similarities.pth on clearml save/upload diff --git a/TTS/vocoder/configs/parallel_wavegan_config.py b/TTS/vocoder/configs/parallel_wavegan_config.py index 7845dd6b..6059d7f0 100644 --- a/TTS/vocoder/configs/parallel_wavegan_config.py +++ b/TTS/vocoder/configs/parallel_wavegan_config.py @@ -94,6 +94,7 @@ class ParallelWaveganConfig(BaseGANVocoderConfig): use_noise_augment: bool = False use_cache: bool = True steps_to_start_discriminator: int = 200000 + target_loss: str = "loss_1" # LOSS PARAMETERS - overrides use_stft_loss: bool = True diff --git a/requirements.txt b/requirements.txt index ce0e5d92..1f7a44f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,7 +27,7 @@ pandas>=1.4,<2.0 # deps for training matplotlib>=3.7.0 # coqui stack -trainer +trainer>=0.0.32 # config management coqpit>=0.0.16 # chinese g2p deps From 52cb1e2f680a982bcc825e89690cf5d736b8044a Mon Sep 17 00:00:00 2001 From: Eren G??lge Date: Fri, 17 Nov 2023 15:16:08 +0100 Subject: [PATCH 66/67] Update model hash for v2.0.2 --- TTS/.models.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TTS/.models.json b/TTS/.models.json index 13da715b..5f4008fb 100644 --- a/TTS/.models.json +++ b/TTS/.models.json @@ -3,14 +3,14 @@ "multilingual": { "multi-dataset": { "xtts_v2": { - "description": "XTTS-v2 by Coqui with 16 languages.", + "description": "XTTS-v2.0.2 by Coqui with 16 languages.", "hf_url": [ "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/model.pth", "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/config.json", "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/vocab.json", "https://coqui.gateway.scarf.sh/hf-coqui/XTTS-v2/main/hash.md5" ], - "model_hash": "6a09d1ad43896f06041ed8195956c9698f13b6189dc80f1c74bdc2b8e8d15324", + "model_hash": "5ce0502bfe3bc88dc8d9312b12a7558c", "default_vocoder": null, "commit": "480a6cdf7", "license": "CPML", From c011ab7455875f42b795d7fef61e2ddb1bad2910 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 16 Nov 2023 17:52:13 +0100 Subject: [PATCH 67/67] Update to v0.20.6 --- TTS/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/VERSION b/TTS/VERSION index 1b619f34..752e6303 100644 --- a/TTS/VERSION +++ b/TTS/VERSION @@ -1 +1 @@ -0.20.5 +0.20.6