From 8ada870a5776daf71bc71f7b3296ac1943912ec1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:16:34 +0000 Subject: [PATCH 01/64] Refactor `trainer.py` for v2 --- TTS/trainer.py | 539 ++++++++---------- .../datasets/{TTSDataset.py => dataset.py} | 0 TTS/tts/models/base_tts.py | 36 +- TTS/utils/trainer_utils.py | 68 +++ TTS/vocoder/models/base_vocoder.py | 36 +- 5 files changed, 388 insertions(+), 291 deletions(-) rename TTS/tts/datasets/{TTSDataset.py => dataset.py} (100%) diff --git a/TTS/trainer.py b/TTS/trainer.py index 8589ae5c..d75b8e14 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -4,16 +4,14 @@ import importlib import multiprocessing import os import platform -import re import sys import time import traceback from argparse import Namespace from dataclasses import dataclass, field -from typing import Dict, List, Tuple, Union -from urllib.parse import urlparse +from inspect import signature +from typing import Callable, Dict, List, Tuple, Union -import fsspec import torch import torch.distributed as dist from coqpit import Coqpit @@ -21,11 +19,7 @@ from torch import nn from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data import DataLoader -from TTS.config import load_config, register_config -from TTS.tts.datasets import load_meta_data -from TTS.tts.models import setup_model as setup_tts_model -from TTS.tts.utils.text.symbols import parse_symbols -from TTS.utils.audio import AudioProcessor +from TTS.stt.datasets.tokenizer import Tokenizer from TTS.utils.callbacks import TrainerCallback from TTS.utils.distribute import init_distributed from TTS.utils.generic_utils import ( @@ -39,9 +33,13 @@ from TTS.utils.generic_utils import ( ) from TTS.utils.io import copy_model_files, load_fsspec, save_best_model, save_checkpoint from TTS.utils.logging import ConsoleLogger, TensorboardLogger, WandbLogger, init_dashboard_logger -from TTS.utils.trainer_utils import get_optimizer, get_scheduler, is_apex_available, setup_torch_training_env -from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data -from TTS.vocoder.models import setup_model as setup_vocoder_model +from TTS.utils.trainer_utils import ( + get_last_checkpoint, + get_optimizer, + get_scheduler, + is_apex_available, + setup_torch_training_env, +) multiprocessing.set_start_method("fork") @@ -80,6 +78,9 @@ class TrainingArgs(Coqpit): "help": "Best model file to be used for extracting the best loss. If not specified, the latest best model in continue path is used" }, ) + skip_train_epoch: bool = field( + default=False, metadata={"help": "Run only evaluation iteration. Useful for debugging."} + ) config_path: str = field(default="", metadata={"help": "Path to the configuration file."}) rank: int = field(default=0, metadata={"help": "Process rank in distributed training."}) group_id: str = field(default="", metadata={"help": "Process group id in distributed training."}) @@ -98,7 +99,14 @@ class Trainer: c_logger: ConsoleLogger = None, dashboard_logger: Union[TensorboardLogger, WandbLogger] = None, model: nn.Module = None, + get_model: Callable = None, + get_data_samples: Callable = None, + train_samples: List = None, + eval_samples: List = None, + tokenizer: Tokenizer = None, cudnn_benchmark: bool = False, + training_assets: Dict = {}, + parse_command_line_args: bool = True, ) -> None: """Simple yet powerful 🐸💬 TTS trainer for PyTorch. It can train all the available `tts` and `vocoder` models or easily be customized. @@ -127,24 +135,44 @@ class Trainer: model (nn.Module, optional): Initialized and ready-to-train model. If it is not defined, `Trainer` initializes a model from the provided config. Defaults to None. + get_model (Callable): + A function that returns a model. It is used to initialize the model when `model` is not provided. + It either takes the config as the only argument or does not take any argument. + Defaults to None + + get_data_samples (Callable): + A function that returns a list of training and evaluation samples. Used if `train_samples` and + `eval_samples` are None. Defaults to None. + + train_samples (List): + A list of training samples used by the model's `get_data_loader` to init the `dataset` and the + `data_loader`. Defaults to None. + + eval_samples (List): + A list of evaluation samples used by the model's `get_data_loader` to init the `dataset` and the + `data_loader`. Defaults to None. + cudnn_benchmark (bool): enable/disable PyTorch cudnn benchmarking. It is better to disable if the model input length is changing batch to batch along the training. + training_assets (Dict): + A dictionary of assets to be used at training and passed to the model's ```train_log(), eval_log(), get_data_loader()``` + during training. It can include `AudioProcessor` or/and `Tokenizer`. Defaults to {}. + + parse_command_line_args (bool): + If true, parse command-line arguments and update `TrainingArgs` and model `config` values. Set it + to false if you parse the arguments yourself. Defaults to True. + Examples: - Running trainer on a model. + Running trainer with HifiGAN model. >>> args = TrainingArgs(...) >>> config = HifiganConfig(...) >>> model = GANModel(config) - >>> trainer = Trainer(args, config, output_path, model=model) - >>> trainer.fit() - - Running trainer on a config. - - >>> config = WavegradConfig(data_path="/home/erogol/nvme/gdrive/Datasets/LJSpeech-1.1/wavs/", output_path=output_path,) - >>> args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) - >>> trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + >>> ap = AudioProcessor(**config.audio) + >>> assets = {"audio_processor": ap} + >>> trainer = Trainer(args, config, output_path, model=model, training_assets=assets) >>> trainer.fit() TODO: @@ -154,20 +182,33 @@ class Trainer: - Profiler integration. - Overfitting to a batch. - TPU training + - NOTE: Consider moving `training_assets` to the model implementation. """ + if parse_command_line_args: + # parse command-line arguments for TrainingArgs() + args, coqpit_overrides = self.parse_argv(args) - if config is None: - # parse config from console arguments - config, output_path, _, c_logger, dashboard_logger = process_args(args) + # get ready for training and parse command-line arguments for the model config + config = self.init_training(args, coqpit_overrides, config) + # define the experiment path and create the folder + output_path = get_experiment_folder_path(config.output_path, config.run_name) + os.makedirs(output_path, exist_ok=True) + + # copy training assets to the output folder + copy_model_files(config, output_path, new_fields=None) + + # init class members self.args = args self.config = config self.output_path = output_path self.config.output_log_path = output_path + self.training_assets = training_assets # setup logging log_file = os.path.join(self.output_path, f"trainer_{args.rank}_log.txt") self._setup_logger_config(log_file) + time.sleep(1.0) # wait for the logger to be ready # set and initialize Pytorch runtime self.use_cuda, self.num_gpus = setup_torch_training_env(True, cudnn_benchmark, args.use_ddp) @@ -196,33 +237,30 @@ class Trainer: self.use_apex = self._is_apex_available() self.use_amp_scaler = self.config.mixed_precision and self.use_cuda - # init audio processor - self.ap = AudioProcessor(**self.config.audio.to_dict()) + # init tokenizer + self.tokenizer = tokenizer # load data samples - # TODO: refactor this - if "datasets" in self.config: - # load data for `tts` models - self.data_train, self.data_eval = load_meta_data(self.config.datasets) - elif self.config.feature_path is not None: - # load pre-comnputed features for `vocoder`models - print(f" > Loading features from: {self.config.feature_path}") - self.data_eval, self.data_train = load_wav_feat_data( - self.config.data_path, self.config.feature_path, self.config.eval_split_size - ) + if train_samples is None and get_data_samples is None: + raise ValueError("[!] `train_samples` and `get_data_samples` cannot both be None.") + if train_samples is not None: + self.train_samples = train_samples + self.eval_samples = eval_samples else: - # load data for `vocoder`models - self.data_eval, self.data_train = load_wav_data(self.config.data_path, self.config.eval_split_size) + self.train_samples, self.eval_samples = self.run_get_data_samples(config, get_data_samples) # init TTS model + if model is None and get_model is None: + raise ValueError("[!] `model` and `get_model` cannot both be None.") if model is not None: self.model = model else: - self.model = self.get_model(self.config) + self.run_get_model(self.config, get_model) + # TODO: out! # init multispeaker settings of the model if hasattr(self.model, "init_multispeaker"): - self.model.init_multispeaker(self.config, self.data_train + self.data_eval) + self.model.init_multispeaker(self.config, self.train_samples + self.eval_samples) # setup criterion self.criterion = self.get_criterion(self.model) @@ -247,7 +285,7 @@ class Trainer: # setup optimizer self.optimizer = self.get_optimizer(self.model, self.config) - # callback + # CALLBACK self.callbacks = TrainerCallback(self) self.callbacks.on_init_start() @@ -280,7 +318,7 @@ class Trainer: else: self.scheduler.last_epoch = self.restore_step - # DISTRUBUTED + # DISTRIBUTED if self.num_gpus > 1: self.model = DDP_th(self.model, device_ids=[args.rank], output_device=args.rank) @@ -291,8 +329,54 @@ class Trainer: self.callbacks.on_init_end() @staticmethod - def get_model(config: Coqpit) -> nn.Module: - """Initialize model from config. + def parse_argv(args: Union[Coqpit, List]): + """Parse command line arguments to init or override `TrainingArgs()`.""" + if isinstance(args, Coqpit): + parser = args.init_argparse(arg_prefix="") + else: + train_config = TrainingArgs() + parser = train_config.init_argparse(arg_prefix="") + training_args, coqpit_overrides = parser.parse_known_args() + args.parse_args(training_args) + return args, coqpit_overrides + + def init_training(self, args: TrainingArgs, coqpit_overrides: Dict, config: Coqpit = None): + """Initialize training and update model configs from command line arguments. + + Args: + args (argparse.Namespace or dict like): Parsed input arguments. + config_overrides (argparse.Namespace or dict like): Parsed config overriding arguments. + config (Coqpit): Model config. If none, it is generated from `args`. Defaults to None. + + Returns: + c (TTS.utils.io.AttrDict): Config paramaters. + """ + # set arguments for continuing training + if args.continue_path: + experiment_path = args.continue_path + args.config_path = os.path.join(args.continue_path, "config.json") + args.restore_path, best_model = get_last_checkpoint(args.continue_path) + if not args.best_path: + args.best_path = best_model + + # override config values from command-line args + # TODO: Maybe it is better to do it outside + if len(coqpit_overrides) > 0: + config.parse_known_args(coqpit_overrides, relaxed_parser=True) + experiment_path = args.continue_path + + # update the config.json fields and copy it to the output folder + if args.rank == 0: + new_fields = {} + if args.restore_path: + new_fields["restore_path"] = args.restore_path + new_fields["github_branch"] = get_git_branch() + copy_model_files(config, experiment_path, new_fields) + return config + + @staticmethod + def run_get_model(config: Coqpit, get_model: Callable) -> nn.Module: + """Run the `get_model` function and return the model. Args: config (Coqpit): Model config. @@ -300,12 +384,23 @@ class Trainer: Returns: nn.Module: initialized model. """ - try: - model = setup_vocoder_model(config) - except ModuleNotFoundError: - model = setup_tts_model(config) + if len(signature(get_model).sig.parameters) == 1: + model = get_model(config) + else: + model = get_model() return model + @staticmethod + def run_get_data_samples(config: Coqpit, get_data_samples: Callable) -> nn.Module: + if isinstance(get_data_samples, Callable): + if len(signature(get_data_samples).sig.parameters) == 1: + train_samples, eval_samples = get_data_samples(config) + else: + train_samples, eval_samples = get_data_samples() + return train_samples, eval_samples + else: + return None, None + def restore_model( self, config: Coqpit, @@ -366,11 +461,15 @@ class Trainer: torch.cuda.empty_cache() return model, optimizer, scaler, restore_step + ######################### + # DATA LOADING FUNCTIONS + ######################### + def _get_loader( self, model: nn.Module, config: Coqpit, - ap: AudioProcessor, + assets: Dict, is_eval: bool, data_items: List, verbose: bool, @@ -379,14 +478,14 @@ class Trainer: if num_gpus > 1: if hasattr(model.module, "get_data_loader"): loader = model.module.get_data_loader( - config, ap, is_eval, data_items, verbose, num_gpus, self.args.rank + config, assets, is_eval, data_items, verbose, num_gpus, self.args.rank ) else: if hasattr(model, "get_data_loader"): - loader = model.get_data_loader(config, ap, is_eval, data_items, verbose, num_gpus) + loader = model.get_data_loader(config, assets, is_eval, data_items, verbose, num_gpus) return loader - def get_train_dataloader(self, ap: AudioProcessor, data_items: List, verbose: bool) -> DataLoader: + def get_train_dataloader(self, training_assets: Dict, data_items: List, verbose: bool) -> DataLoader: """Initialize and return a training data loader. Args: @@ -397,10 +496,10 @@ class Trainer: Returns: DataLoader: Initialized training data loader. """ - return self._get_loader(self.model, self.config, ap, False, data_items, verbose, self.num_gpus) + return self._get_loader(self.model, self.config, training_assets, False, data_items, verbose, self.num_gpus) - def get_eval_dataloader(self, ap: AudioProcessor, data_items: List, verbose: bool) -> DataLoader: - return self._get_loader(self.model, self.config, ap, True, data_items, verbose, self.num_gpus) + def get_eval_dataloader(self, training_assets: Dict, data_items: List, verbose: bool) -> DataLoader: + return self._get_loader(self.model, self.config, training_assets, True, data_items, verbose, self.num_gpus) def format_batch(self, batch: List) -> Dict: """Format the dataloader output and return a batch. @@ -420,6 +519,10 @@ class Trainer: batch[k] = to_cuda(v) return batch + ###################### + # TRAIN FUNCTIONS + ###################### + @staticmethod def master_params(optimizer: torch.optim.Optimizer): """Generator over parameters owned by the optimizer. @@ -567,24 +670,6 @@ class Trainer: loss_dict["grad_norm"] = grad_norm return outputs, loss_dict, step_time - @staticmethod - def _detach_loss_dict(loss_dict: Dict) -> Dict: - """Detach loss values from autograp. - - Args: - loss_dict (Dict): losses. - - Returns: - Dict: losses detached from autograph. - """ - loss_dict_detached = {} - for key, value in loss_dict.items(): - if isinstance(value, (int, float)): - loss_dict_detached[key] = value - else: - loss_dict_detached[key] = value.item() - return loss_dict_detached - def train_step(self, batch: Dict, batch_n_steps: int, step: int, loader_start_time: float) -> Tuple[Dict, Dict]: """Perform a training step on a batch of inputs and log the process. @@ -700,15 +785,14 @@ class Trainer: self.dashboard_logger.log_artifact(self.output_path, "checkpoint", "model", aliases) # training visualizations - figures, audios = None, None if hasattr(self.model, "module") and hasattr(self.model.module, "train_log"): - figures, audios = self.model.module.train_log(self.ap, batch, outputs) + self.model.module.train_log( + batch, outputs, self.dashboard_logger, self.training_assets, self.total_steps_done + ) elif hasattr(self.model, "train_log"): - figures, audios = self.model.train_log(self.ap, batch, outputs) - if figures is not None: - self.dashboard_logger.train_figures(self.total_steps_done, figures) - if audios is not None: - self.dashboard_logger.train_audios(self.total_steps_done, audios, self.ap.sample_rate) + self.model.train_log( + batch, outputs, self.dashboard_logger, self.training_assets, self.total_steps_done + ) self.dashboard_logger.flush() @@ -718,11 +802,13 @@ class Trainer: def train_epoch(self) -> None: """Main entry point for the training loop. Run training on the all training samples.""" + # initialize the data loader self.train_loader = self.get_train_dataloader( - self.ap, - self.data_train, + self.training_assets, + self.train_samples, verbose=True, ) + # set model to training mode if self.num_gpus > 1: self.model.module.train() else: @@ -734,11 +820,12 @@ class Trainer: batch_num_steps = int(len(self.train_loader.dataset) / self.config.batch_size) self.c_logger.print_train_start() loader_start_time = time.time() + # iterate over the training samples for cur_step, batch in enumerate(self.train_loader): _, _ = self.train_step(batch, batch_num_steps, cur_step, loader_start_time) loader_start_time = time.time() epoch_time = time.time() - epoch_start_time - # Plot self.epochs_done Stats + # plot self.epochs_done Stats if self.args.rank == 0: epoch_stats = {"epoch_time": epoch_time} epoch_stats.update(self.keep_avg_train.avg_values) @@ -754,6 +841,10 @@ class Trainer: else: self.scheduler.step() + ####################### + # EVAL FUNCTIONS + ####################### + @staticmethod def _model_eval_step( batch: Dict, model: nn.Module, criterion: nn.Module, optimizer_idx: int = None @@ -803,7 +894,7 @@ class Trainer: loss_dict_new[f"loss_{idx}"] = loss_dict_new.pop("loss") loss_dict.update(loss_dict_new) - loss_dict = self._detach_loss_dict(loss_dict) + loss_dict = self._detach_loss_dict(loss_dict) # update avg stats update_eval_values = {} @@ -819,8 +910,8 @@ class Trainer: """Main entry point for the evaluation loop. Run evaluation on the all validation samples.""" self.eval_loader = ( self.get_eval_dataloader( - self.ap, - self.data_eval, + self.training_assets, + self.eval_samples, verbose=True, ) if self.config.run_eval @@ -840,15 +931,12 @@ class Trainer: loader_start_time = time.time() # plot epoch stats, artifacts and figures if self.args.rank == 0: - figures, audios = None, None if hasattr(self.model, "module") and hasattr(self.model.module, "eval_log"): - figures, audios = self.model.module.eval_log(self.ap, batch, outputs) + self.model.module.eval_log( + batch, outputs, self.dashboard_logger, self.training_assets, self.total_steps_done + ) elif hasattr(self.model, "eval_log"): - figures, audios = self.model.eval_log(self.ap, batch, outputs) - if figures is not None: - self.dashboard_logger.eval_figures(self.total_steps_done, figures) - if audios is not None: - self.dashboard_logger.eval_audios(self.total_steps_done, audios, self.ap.sample_rate) + self.model.eval_log(batch, outputs, self.dashboard_logger, self.training_assets, self.total_steps_done) self.dashboard_logger.eval_stats(self.total_steps_done, self.keep_avg_eval.avg_values) def test_run(self) -> None: @@ -857,22 +945,22 @@ class Trainer: if hasattr(self.model, "test_run") or (self.num_gpus > 1 and hasattr(self.model.module, "test_run")): if self.eval_loader is None: self.eval_loader = self.get_eval_dataloader( - self.ap, - self.data_eval, + self.training_assets, + self.eval_samples, verbose=True, ) if hasattr(self.eval_loader.dataset, "load_test_samples"): samples = self.eval_loader.dataset.load_test_samples(1) if self.num_gpus > 1: - figures, audios = self.model.module.test_run(self.ap, samples, None) + figures, audios = self.model.module.test_run(self.training_assets, samples, None) else: - figures, audios = self.model.test_run(self.ap, samples, None) + figures, audios = self.model.test_run(self.training_assets, samples, None) else: if self.num_gpus > 1: - figures, audios = self.model.module.test_run(self.ap) + figures, audios = self.model.module.test_run(self.training_assets) else: - figures, audios = self.model.test_run(self.ap) + figures, audios = self.model.test_run(self.training_assets) self.dashboard_logger.test_audios(self.total_steps_done, audios, self.config.audio["sample_rate"]) self.dashboard_logger.test_figures(self.total_steps_done, figures) @@ -886,6 +974,10 @@ class Trainer: self.best_loss = ch["model_loss"] print(f" > Starting with loaded last best loss {self.best_loss}.") + ################################### + # FIT FUNCTIONS + ################################### + def _fit(self) -> None: """🏃 train -> evaluate -> test for the number of epochs.""" self._restore_best_loss() @@ -901,7 +993,8 @@ class Trainer: self.keep_avg_eval = KeepAverage() if self.config.run_eval else None self.epochs_done = epoch self.c_logger.print_epoch_start(epoch, self.config.epochs, self.output_path) - self.train_epoch() + if not self.args.skip_train_epoch: + self.train_epoch() if self.config.run_eval: self.eval_epoch() if epoch >= self.config.test_delay_epochs and self.args.rank <= 0: @@ -939,24 +1032,6 @@ class Trainer: traceback.print_exc() sys.exit(1) - def _pick_target_avg_loss(self, keep_avg_target: KeepAverage) -> Dict: - """Pick the target loss to compare models""" - target_avg_loss = None - - # return if target loss defined in the model config - if "target_loss" in self.config and self.config.target_loss: - return keep_avg_target[f"avg_{self.config.target_loss}"] - - # take the average of loss_{optimizer_idx} as the target loss when there are multiple optimizers - if isinstance(self.optimizer, list): - target_avg_loss = 0 - for idx in range(len(self.optimizer)): - target_avg_loss += keep_avg_target[f"avg_loss_{idx}"] - target_avg_loss /= len(self.optimizer) - else: - target_avg_loss = keep_avg_target["avg_loss"] - return target_avg_loss - def save_best_model(self) -> None: """Save the best model. It only saves if the current target loss is smaller then the previous.""" @@ -978,35 +1053,9 @@ class Trainer: keep_after=self.config.keep_after, ) - def _setup_logger_config(self, log_file: str) -> None: - """Write log strings to a file and print logs to the terminal. - TODO: Causes formatting issues in pdb debugging.""" - - class Logger(object): - def __init__(self, print_to_terminal=True): - self.print_to_terminal = print_to_terminal - self.terminal = sys.stdout - self.log_file = log_file - - def write(self, message): - if self.print_to_terminal: - self.terminal.write(message) - with open(self.log_file, "a", encoding="utf-8") as f: - f.write(message) - - def flush(self): - # this flush method is needed for python 3 compatibility. - # this handles the flush command by doing nothing. - # you might want to specify some extra behavior here. - pass - - # don't let processes rank > 0 write to the terminal - sys.stdout = Logger(self.args.rank == 0) - - @staticmethod - def _is_apex_available() -> bool: - """Check if Nvidia's APEX is available.""" - return importlib.util.find_spec("apex") is not None + ##################### + # GET FUNCTIONS + ##################### @staticmethod def get_optimizer(model: nn.Module, config: Coqpit) -> Union[torch.optim.Optimizer, List]: @@ -1084,154 +1133,72 @@ class Trainer: criterion = model.get_criterion() return criterion + #################### + # HELPER FUNCTIONS + #################### -def getarguments(): - train_config = TrainingArgs() - parser = train_config.init_argparse(arg_prefix="") - return parser + @staticmethod + def _detach_loss_dict(loss_dict: Dict) -> Dict: + """Detach loss values from autograp. + Args: + loss_dict (Dict): losses. -def get_last_checkpoint(path: str) -> Tuple[str, str]: - """Get latest checkpoint or/and best model in path. + Returns: + Dict: losses detached from autograph. + """ + loss_dict_detached = {} + for key, value in loss_dict.items(): + if isinstance(value, (int, float)): + loss_dict_detached[key] = value + else: + loss_dict_detached[key] = value.detach() + return loss_dict_detached - It is based on globbing for `*.pth.tar` and the RegEx - `(checkpoint|best_model)_([0-9]+)`. + def _pick_target_avg_loss(self, keep_avg_target: KeepAverage) -> Dict: + """Pick the target loss to compare models""" + target_avg_loss = None - Args: - path: Path to files to be compared. + # return if target loss defined in the model config + if "target_loss" in self.config and self.config.target_loss: + return keep_avg_target[f"avg_{self.config.target_loss}"] - Raises: - ValueError: If no checkpoint or best_model files are found. - - Returns: - Path to the last checkpoint - Path to best checkpoint - """ - fs = fsspec.get_mapper(path).fs - file_names = fs.glob(os.path.join(path, "*.pth.tar")) - scheme = urlparse(path).scheme - if scheme: # scheme is not preserved in fs.glob, add it back - file_names = [scheme + "://" + file_name for file_name in file_names] - last_models = {} - last_model_nums = {} - for key in ["checkpoint", "best_model"]: - last_model_num = None - last_model = None - # pass all the checkpoint files and find - # the one with the largest model number suffix. - for file_name in file_names: - match = re.search(f"{key}_([0-9]+)", file_name) - if match is not None: - model_num = int(match.groups()[0]) - if last_model_num is None or model_num > last_model_num: - last_model_num = model_num - last_model = file_name - - # if there is no checkpoint found above - # find the checkpoint with the latest - # modification date. - key_file_names = [fn for fn in file_names if key in fn] - if last_model is None and len(key_file_names) > 0: - last_model = max(key_file_names, key=os.path.getctime) - last_model_num = load_fsspec(last_model)["step"] - - if last_model is not None: - last_models[key] = last_model - last_model_nums[key] = last_model_num - - # check what models were found - if not last_models: - raise ValueError(f"No models found in continue path {path}!") - if "checkpoint" not in last_models: # no checkpoint just best model - last_models["checkpoint"] = last_models["best_model"] - elif "best_model" not in last_models: # no best model - # this shouldn't happen, but let's handle it just in case - last_models["best_model"] = last_models["checkpoint"] - # finally check if last best model is more recent than checkpoint - elif last_model_nums["best_model"] > last_model_nums["checkpoint"]: - last_models["checkpoint"] = last_models["best_model"] - - return last_models["checkpoint"], last_models["best_model"] - - -def process_args(args, config=None): - """Process parsed comand line arguments and initialize the config if not provided. - - Args: - args (argparse.Namespace or dict like): Parsed input arguments. - config (Coqpit): Model config. If none, it is generated from `args`. Defaults to None. - - Returns: - c (TTS.utils.io.AttrDict): Config paramaters. - out_path (str): Path to save models and logging. - audio_path (str): Path to save generated test audios. - c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does - logging to the console. - - dashboard_logger (WandbLogger or TensorboardLogger): Class that does the dashboard Logging - - TODO: - - Interactive config definition. - """ - if isinstance(args, tuple): - args, coqpit_overrides = args - if args.continue_path: - # continue a previous training from its output folder - experiment_path = args.continue_path - args.config_path = os.path.join(args.continue_path, "config.json") - args.restore_path, best_model = get_last_checkpoint(args.continue_path) - if not args.best_path: - args.best_path = best_model - # init config if not already defined - if config is None: - if args.config_path: - # init from a file - config = load_config(args.config_path) + # take the average of loss_{optimizer_idx} as the target loss when there are multiple optimizers + if isinstance(self.optimizer, list): + target_avg_loss = 0 + for idx in range(len(self.optimizer)): + target_avg_loss += keep_avg_target[f"avg_loss_{idx}"] + target_avg_loss /= len(self.optimizer) else: - # init from console args - from TTS.config.shared_configs import BaseTrainingConfig # pylint: disable=import-outside-toplevel + target_avg_loss = keep_avg_target["avg_loss"] + return target_avg_loss - config_base = BaseTrainingConfig() - config_base.parse_known_args(coqpit_overrides) - config = register_config(config_base.model)() - # override values from command-line args - config.parse_known_args(coqpit_overrides, relaxed_parser=True) - experiment_path = args.continue_path - if not experiment_path: - experiment_path = get_experiment_folder_path(config.output_path, config.run_name) - audio_path = os.path.join(experiment_path, "test_audios") - config.output_log_path = experiment_path - # setup rank 0 process in distributed training - dashboard_logger = None - if args.rank == 0: - new_fields = {} - if args.restore_path: - new_fields["restore_path"] = args.restore_path - new_fields["github_branch"] = get_git_branch() - # if model characters are not set in the config file - # save the default set to the config file for future - # compatibility. - if config.has("characters") and config.characters is None: - used_characters = parse_symbols() - new_fields["characters"] = used_characters - copy_model_files(config, experiment_path, new_fields) - dashboard_logger = init_dashboard_logger(config) - c_logger = ConsoleLogger() - return config, experiment_path, audio_path, c_logger, dashboard_logger + def _setup_logger_config(self, log_file: str) -> None: + """Write log strings to a file and print logs to the terminal. + TODO: Causes formatting issues in pdb debugging.""" + class Logger(object): + def __init__(self, print_to_terminal=True): + self.print_to_terminal = print_to_terminal + self.terminal = sys.stdout + self.log_file = log_file -def init_arguments(): - train_config = TrainingArgs() - parser = train_config.init_argparse(arg_prefix="") - return parser + def write(self, message): + if self.print_to_terminal: + self.terminal.write(message) + with open(self.log_file, "a", encoding="utf-8") as f: + f.write(message) + def flush(self): + # this flush method is needed for python 3 compatibility. + # this handles the flush command by doing nothing. + # you might want to specify some extra behavior here. + pass -def init_training(argv: Union[List, Coqpit], config: Coqpit = None): - """Initialization of a training run.""" - if isinstance(argv, Coqpit): - parser = argv.init_argparse(arg_prefix="") - else: - parser = init_arguments() - args = parser.parse_known_args() - config, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = process_args(args, config) - return args[0], config, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger + # don't let processes rank > 0 write to the terminal + sys.stdout = Logger(self.args.rank == 0) + + @staticmethod + def _is_apex_available() -> bool: + """Check if Nvidia's APEX is available.""" + return importlib.util.find_spec("apex") is not None diff --git a/TTS/tts/datasets/TTSDataset.py b/TTS/tts/datasets/dataset.py similarity index 100% rename from TTS/tts/datasets/TTSDataset.py rename to TTS/tts/datasets/dataset.py diff --git a/TTS/tts/models/base_tts.py b/TTS/tts/models/base_tts.py index 06c7cb2b..0c9f60e8 100644 --- a/TTS/tts/models/base_tts.py +++ b/TTS/tts/models/base_tts.py @@ -9,7 +9,7 @@ from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.model import BaseModel -from TTS.tts.datasets import TTSDataset +from TTS.tts.datasets.dataset import TTSDataset from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text import make_symbols @@ -32,6 +32,30 @@ class BaseTTS(BaseModel): - 1D tensors `batch x 1` """ + def _set_model_args(self, config: Coqpit): + """Setup model args based on the config type. + + If the config is for training with a name like "*Config", then the model args are embeded in the + config.model_args + + If the config is for the model with a name like "*Args", then we assign the directly. + """ + # don't use isintance not to import recursively + if "Config" in config.__class__.__name__: + if "characters" in config: + _, self.config, num_chars = self.get_characters(config) + self.config.num_chars = num_chars + if hasattr(self.config, "model_args"): + config.model_args.num_chars = num_chars + self.args = self.config.model_args + else: + self.config = config + self.args = config.model_args + elif "Args" in config.__class__.__name__: + self.args = config + else: + raise ValueError("config must be either a *Config or *Args") + @staticmethod def get_characters(config: Coqpit) -> str: # TODO: implement CharacterProcessor @@ -169,7 +193,7 @@ class BaseTTS(BaseModel): def get_data_loader( self, config: Coqpit, - ap: AudioProcessor, + assets: Dict, is_eval: bool, data_items: List, verbose: bool, @@ -179,6 +203,8 @@ class BaseTTS(BaseModel): if is_eval and not config.run_eval: loader = None else: + ap = assets["audio_processor"] + # setup multi-speaker attributes if hasattr(self, "speaker_manager"): speaker_id_mapping = self.speaker_manager.speaker_ids if config.use_speaker_embedding else None @@ -280,14 +306,18 @@ class BaseTTS(BaseModel): ) return loader - def test_run(self, ap) -> Tuple[Dict, Dict]: + def test_run(self, assets: Dict) -> Tuple[Dict, Dict]: """Generic test run for `tts` models used by `Trainer`. You can override this for a different behaviour. + Args: + assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`. + Returns: Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard. """ + ap = assets["audio_processor"] print(" | > Synthesizing test sentences.") test_audios = {} test_figures = {} diff --git a/TTS/utils/trainer_utils.py b/TTS/utils/trainer_utils.py index 005114d1..dabb33cd 100644 --- a/TTS/utils/trainer_utils.py +++ b/TTS/utils/trainer_utils.py @@ -1,8 +1,13 @@ import importlib +import os +import re from typing import Dict, List, Tuple +from urllib.parse import urlparse +import fsspec import torch +from TTS.utils.io import load_fsspec from TTS.utils.training import NoamLR @@ -80,3 +85,66 @@ def get_optimizer( if model is not None: parameters = model.parameters() return optimizer(parameters, lr=lr, **optimizer_params) + + +def get_last_checkpoint(path: str) -> Tuple[str, str]: + """Get latest checkpoint or/and best model in path. + + It is based on globbing for `*.pth.tar` and the RegEx + `(checkpoint|best_model)_([0-9]+)`. + + Args: + path: Path to files to be compared. + + Raises: + ValueError: If no checkpoint or best_model files are found. + + Returns: + Path to the last checkpoint + Path to best checkpoint + """ + fs = fsspec.get_mapper(path).fs + file_names = fs.glob(os.path.join(path, "*.pth.tar")) + scheme = urlparse(path).scheme + if scheme: # scheme is not preserved in fs.glob, add it back + file_names = [scheme + "://" + file_name for file_name in file_names] + last_models = {} + last_model_nums = {} + for key in ["checkpoint", "best_model"]: + last_model_num = None + last_model = None + # pass all the checkpoint files and find + # the one with the largest model number suffix. + for file_name in file_names: + match = re.search(f"{key}_([0-9]+)", file_name) + if match is not None: + model_num = int(match.groups()[0]) + if last_model_num is None or model_num > last_model_num: + last_model_num = model_num + last_model = file_name + + # if there is no checkpoint found above + # find the checkpoint with the latest + # modification date. + key_file_names = [fn for fn in file_names if key in fn] + if last_model is None and len(key_file_names) > 0: + last_model = max(key_file_names, key=os.path.getctime) + last_model_num = load_fsspec(last_model)["step"] + + if last_model is not None: + last_models[key] = last_model + last_model_nums[key] = last_model_num + + # check what models were found + if not last_models: + raise ValueError(f"No models found in continue path {path}!") + if "checkpoint" not in last_models: # no checkpoint just best model + last_models["checkpoint"] = last_models["best_model"] + elif "best_model" not in last_models: # no best model + # this shouldn't happen, but let's handle it just in case + last_models["best_model"] = last_models["checkpoint"] + # finally check if last best model is more recent than checkpoint + elif last_model_nums["best_model"] > last_model_nums["checkpoint"]: + last_models["checkpoint"] = last_models["best_model"] + + return last_models["checkpoint"], last_models["best_model"] diff --git a/TTS/vocoder/models/base_vocoder.py b/TTS/vocoder/models/base_vocoder.py index f879cd42..9d6ef26f 100644 --- a/TTS/vocoder/models/base_vocoder.py +++ b/TTS/vocoder/models/base_vocoder.py @@ -1,3 +1,5 @@ +from coqpit import Coqpit + from TTS.model import BaseModel # pylint: skip-file @@ -16,5 +18,35 @@ class BaseVocoder(BaseModel): - 1D tensors `batch x 1` """ - def __init__(self): - super().__init__() + def __init__(self, config): + super().__init__(config) + + def _set_model_args(self, config: Coqpit): + """Setup model args based on the config type. + + If the config is for training with a name like "*Config", then the model args are embeded in the + config.model_args + + If the config is for the model with a name like "*Args", then we assign the directly. + """ + # don't use isintance not to import recursively + if "Config" in config.__class__.__name__: + if "characters" in config: + _, self.config, num_chars = self.get_characters(config) + self.config.num_chars = num_chars + if hasattr(self.config, "model_args"): + config.model_args.num_chars = num_chars + if "model_args" in config: + self.args = self.config.model_args + # This is for backward compatibility + if "model_params" in config: + self.args = self.config.model_params + else: + self.config = config + if "model_args" in config: + self.args = self.config.model_args + # This is for backward compatibility + if "model_params" in config: + self.args = self.config.model_params + else: + raise ValueError("config must be either a *Config or *Args") From d9df33f8376c135b9dc21e75f7ceeebd31a36b63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:18:10 +0000 Subject: [PATCH 02/64] Update `align_tts` for trainer_v2 --- TTS/tts/models/align_tts.py | 21 ++++++++++---- recipes/ljspeech/align_tts/train_aligntts.py | 29 ++++++++++++++++++-- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/TTS/tts/models/align_tts.py b/TTS/tts/models/align_tts.py index 78fbaeab..3b0a848d 100644 --- a/TTS/tts/models/align_tts.py +++ b/TTS/tts/models/align_tts.py @@ -103,7 +103,7 @@ class AlignTTS(BaseTTS): def __init__(self, config: Coqpit): - super().__init__() + super().__init__(config) self.config = config self.phase = -1 self.length_scale = ( @@ -360,9 +360,7 @@ class AlignTTS(BaseTTS): return outputs, loss_dict - def train_log( - self, ap: AudioProcessor, batch: dict, outputs: dict - ) -> Tuple[Dict, Dict]: # pylint: disable=no-self-use + def _create_logs(self, batch, outputs, ap): model_outputs = outputs["model_outputs"] alignments = outputs["alignments"] mel_input = batch["mel_input"] @@ -381,11 +379,22 @@ class AlignTTS(BaseTTS): train_audio = ap.inv_melspectrogram(pred_spec.T) return figures, {"audio": train_audio} + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ) -> None: # pylint: disable=no-self-use + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.train_figures(steps, figures) + logger.train_audios(steps, audios, ap.sample_rate) + def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) - def eval_log(self, ap: AudioProcessor, batch: dict, outputs: dict): - return self.train_log(ap, batch, outputs) + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.eval_figures(steps, figures) + logger.eval_audios(steps, audios, ap.sample_rate) def load_checkpoint( self, config, checkpoint_path, eval=False diff --git a/recipes/ljspeech/align_tts/train_aligntts.py b/recipes/ljspeech/align_tts/train_aligntts.py index 4e214f92..76409374 100644 --- a/recipes/ljspeech/align_tts/train_aligntts.py +++ b/recipes/ljspeech/align_tts/train_aligntts.py @@ -1,9 +1,14 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import AlignTTSConfig, BaseDatasetConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.align_tts import AlignTTS +from TTS.utils.audio import AudioProcessor output_path = os.path.dirname(os.path.abspath(__file__)) + +# init configs dataset_config = BaseDatasetConfig( name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") ) @@ -25,6 +30,24 @@ config = AlignTTSConfig( output_path=output_path, datasets=[dataset_config], ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = AlignTTS(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From a156a40b47229239423fabf0a30dd96e5ec23fb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:19:19 +0000 Subject: [PATCH 03/64] Update ForwardTTS for Trainer_v2 --- TTS/tts/models/forward_tts.py | 37 ++++---- .../ljspeech/fast_pitch/train_fast_pitch.py | 27 +++++- .../ljspeech/fast_speech/train_fast_speech.py | 88 +++++++++++++++++++ .../speedy_speech/train_speedy_speech.py | 50 +++++++---- 4 files changed, 159 insertions(+), 43 deletions(-) create mode 100644 recipes/ljspeech/fast_speech/train_fast_speech.py diff --git a/TTS/tts/models/forward_tts.py b/TTS/tts/models/forward_tts.py index 9dce36fa..6d0497a9 100644 --- a/TTS/tts/models/forward_tts.py +++ b/TTS/tts/models/forward_tts.py @@ -161,24 +161,7 @@ class ForwardTTS(BaseTTS): # pylint: disable=dangerous-default-value def __init__(self, config: Coqpit): - super().__init__() - - # don't use isintance not to import recursively - if "Config" in config.__class__.__name__: - if "characters" in config: - # loading from FasrPitchConfig - _, self.config, num_chars = self.get_characters(config) - config.model_args.num_chars = num_chars - self.args = self.config.model_args - else: - # loading from ForwardTTSArgs - self.config = config - self.args = config.model_args - elif isinstance(config, ForwardTTSArgs): - self.args = config - self.config = config - else: - raise ValueError("config must be either a *Config or ForwardTTSArgs") + super().__init__(config) self.max_duration = self.args.max_duration self.use_aligner = self.args.use_aligner @@ -634,7 +617,8 @@ class ForwardTTS(BaseTTS): return outputs, loss_dict - def train_log(self, ap: AudioProcessor, batch: dict, outputs: dict): # pylint: disable=no-self-use + def _create_logs(self, batch, outputs, ap): + """Create common logger outputs.""" model_outputs = outputs["model_outputs"] alignments = outputs["alignments"] mel_input = batch["mel_input"] @@ -674,11 +658,22 @@ class ForwardTTS(BaseTTS): train_audio = ap.inv_melspectrogram(pred_spec.T) return figures, {"audio": train_audio} + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ) -> None: # pylint: disable=no-self-use + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.train_figures(steps, figures) + logger.train_audios(steps, audios, ap.sample_rate) + def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) - def eval_log(self, ap: AudioProcessor, batch: dict, outputs: dict): - return self.train_log(ap, batch, outputs) + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.eval_figures(steps, figures) + logger.eval_audios(steps, audios, ap.sample_rate) def load_checkpoint( self, config, checkpoint_path, eval=False diff --git a/recipes/ljspeech/fast_pitch/train_fast_pitch.py b/recipes/ljspeech/fast_pitch/train_fast_pitch.py index 614e42e0..fead67a0 100644 --- a/recipes/ljspeech/fast_pitch/train_fast_pitch.py +++ b/recipes/ljspeech/fast_pitch/train_fast_pitch.py @@ -1,8 +1,11 @@ import os from TTS.config import BaseAudioConfig, BaseDatasetConfig -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import FastPitchConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.utils.audio import AudioProcessor from TTS.utils.manage import ModelManager output_path = os.path.dirname(os.path.abspath(__file__)) @@ -64,7 +67,23 @@ if not config.model_args.use_aligner: f"python TTS/bin/compute_attention_masks.py --model_path {model_path} --config_path {config_path} --dataset ljspeech --dataset_metafile metadata.csv --data_path ./recipes/ljspeech/LJSpeech-1.1/ --use_cuda true" ) -# train the model -args, config, output_path, _, c_logger, tb_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, tb_logger) +# init audio processor +ap = AudioProcessor(**config.audio) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init the model +model = ForwardTTS(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() diff --git a/recipes/ljspeech/fast_speech/train_fast_speech.py b/recipes/ljspeech/fast_speech/train_fast_speech.py new file mode 100644 index 00000000..56557c26 --- /dev/null +++ b/recipes/ljspeech/fast_speech/train_fast_speech.py @@ -0,0 +1,88 @@ +import os + +from TTS.config import BaseAudioConfig, BaseDatasetConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs import FastSpeechConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.utils.audio import AudioProcessor +from TTS.utils.manage import ModelManager + +output_path = os.path.dirname(os.path.abspath(__file__)) + +# init configs +dataset_config = BaseDatasetConfig( + name="ljspeech", + meta_file_train="metadata.csv", + # meta_file_attn_mask=os.path.join(output_path, "../LJSpeech-1.1/metadata_attn_mask.txt"), + path=os.path.join(output_path, "../LJSpeech-1.1/"), +) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=60.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = FastSpeechConfig( + run_name="fast_speech_ljspeech", + audio=audio_config, + batch_size=32, + eval_batch_size=16, + num_loader_workers=8, + num_eval_loader_workers=4, + compute_input_seq_cache=True, + compute_f0=False, + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=True, + use_espeak_phonemes=False, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=50, + print_eval=False, + mixed_precision=False, + sort_by_audio_len=True, + max_seq_len=500000, + output_path=output_path, + datasets=[dataset_config], +) + +# compute alignments +if not config.model_args.use_aligner: + manager = ModelManager() + model_path, config_path, _ = manager.download_model("tts_models/en/ljspeech/tacotron2-DCA") + # TODO: make compute_attention python callable + os.system( + f"python TTS/bin/compute_attention_masks.py --model_path {model_path} --config_path {config_path} --dataset ljspeech --dataset_metafile metadata.csv --data_path ./recipes/ljspeech/LJSpeech-1.1/ --use_cuda true" + ) + +# init audio processor +ap = AudioProcessor(**config.audio) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init the model +model = ForwardTTS(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/ljspeech/speedy_speech/train_speedy_speech.py b/recipes/ljspeech/speedy_speech/train_speedy_speech.py index 2882468f..27639e6b 100644 --- a/recipes/ljspeech/speedy_speech/train_speedy_speech.py +++ b/recipes/ljspeech/speedy_speech/train_speedy_speech.py @@ -1,18 +1,16 @@ import os from TTS.config import BaseAudioConfig, BaseDatasetConfig -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import SpeedySpeechConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.utils.audio import AudioProcessor from TTS.utils.manage import ModelManager output_path = os.path.dirname(os.path.abspath(__file__)) - -# init configs dataset_config = BaseDatasetConfig( - name="ljspeech", - meta_file_train="metadata.csv", - # meta_file_attn_mask=os.path.join(output_path, "../LJSpeech-1.1/metadata_attn_mask.txt"), - path=os.path.join(output_path, "../LJSpeech-1.1/"), + name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") ) audio_config = BaseAudioConfig( @@ -53,16 +51,32 @@ config = SpeedySpeechConfig( datasets=[dataset_config], ) -# compute alignments -if not config.model_args.use_aligner: - manager = ModelManager() - model_path, config_path, _ = manager.download_model("tts_models/en/ljspeech/tacotron2-DCA") - # TODO: make compute_attention python callable - os.system( - f"python TTS/bin/compute_attention_masks.py --model_path {model_path} --config_path {config_path} --dataset ljspeech --dataset_metafile metadata.csv --data_path ./recipes/ljspeech/LJSpeech-1.1/ --use_cuda true" - ) +# # compute alignments +# if not config.model_args.use_aligner: +# manager = ModelManager() +# model_path, config_path, _ = manager.download_model("tts_models/en/ljspeech/tacotron2-DCA") +# # TODO: make compute_attention python callable +# os.system( +# f"python TTS/bin/compute_attention_masks.py --model_path {model_path} --config_path {config_path} --dataset ljspeech --dataset_metafile metadata.csv --data_path ./recipes/ljspeech/LJSpeech-1.1/ --use_cuda true" +# ) -# train the model -args, config, output_path, _, c_logger, tb_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, tb_logger) +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = ForwardTTS(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From 4baecdf92a6a1d75135573af404f7d19be8ed908 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:20:30 +0000 Subject: [PATCH 04/64] Update GAN for Trainer_v2 --- TTS/vocoder/models/gan.py | 19 +++++++---- recipes/ljspeech/hifigan/train_hifigan.py | 32 ++++++++++++++++--- .../train_multiband_melgan.py | 32 ++++++++++++++++--- recipes/ljspeech/univnet/train.py | 27 ++++++++++++++-- 4 files changed, 91 insertions(+), 19 deletions(-) diff --git a/TTS/vocoder/models/gan.py b/TTS/vocoder/models/gan.py index f203c533..81ba87c4 100644 --- a/TTS/vocoder/models/gan.py +++ b/TTS/vocoder/models/gan.py @@ -35,7 +35,7 @@ class GAN(BaseVocoder): >>> config = HifiganConfig() >>> model = GAN(config) """ - super().__init__() + super().__init__(config) self.config = config self.model_g = setup_generator(config) self.model_d = setup_discriminator(config) @@ -197,18 +197,24 @@ class GAN(BaseVocoder): audios = {f"{name}/audio": sample_voice} return figures, audios - def train_log(self, ap: AudioProcessor, batch: Dict, outputs: Dict) -> Tuple[Dict, np.ndarray]: + def train_log( + self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int # pylint: disable=unused-argument + ) -> Tuple[Dict, np.ndarray]: """Call `_log()` for training.""" - return self._log("train", ap, batch, outputs) + ap = assets["audio_processor"] + self._log("train", ap, batch, outputs) @torch.no_grad() def eval_step(self, batch: Dict, criterion: nn.Module, optimizer_idx: int) -> Tuple[Dict, Dict]: """Call `train_step()` with `no_grad()`""" return self.train_step(batch, criterion, optimizer_idx) - def eval_log(self, ap: AudioProcessor, batch: Dict, outputs: Dict) -> Tuple[Dict, np.ndarray]: + def eval_log( + self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int # pylint: disable=unused-argument + ) -> Tuple[Dict, np.ndarray]: """Call `_log()` for evaluation.""" - return self._log("eval", ap, batch, outputs) + ap = assets["audio_processor"] + self._log("eval", ap, batch, outputs) def load_checkpoint( self, @@ -299,7 +305,7 @@ class GAN(BaseVocoder): def get_data_loader( # pylint: disable=no-self-use self, config: Coqpit, - ap: AudioProcessor, + assets: Dict, is_eval: True, data_items: List, verbose: bool, @@ -318,6 +324,7 @@ class GAN(BaseVocoder): Returns: DataLoader: Torch dataloader. """ + ap = assets["audio_processor"] dataset = GANDataset( ap=ap, items=data_items, diff --git a/recipes/ljspeech/hifigan/train_hifigan.py b/recipes/ljspeech/hifigan/train_hifigan.py index f50ef476..8d1c272a 100644 --- a/recipes/ljspeech/hifigan/train_hifigan.py +++ b/recipes/ljspeech/hifigan/train_hifigan.py @@ -1,29 +1,51 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import HifiganConfig +from TTS.vocoder.datasets.preprocess import load_wav_data +from TTS.vocoder.models.gan import GAN output_path = os.path.dirname(os.path.abspath(__file__)) + config = HifiganConfig( batch_size=32, eval_batch_size=16, num_loader_workers=4, num_eval_loader_workers=4, run_eval=True, - test_delay_epochs=-1, + test_delay_epochs=5, epochs=1000, seq_len=8192, pad_short=2000, use_noise_augment=True, eval_split_size=10, print_step=25, - print_eval=True, + print_eval=False, mixed_precision=False, lr_gen=1e-4, lr_disc=1e-4, data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = GAN(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() diff --git a/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py b/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py index 1473ec3c..90c52997 100644 --- a/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py +++ b/recipes/ljspeech/multiband_melgan/train_multiband_melgan.py @@ -1,29 +1,51 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import MultibandMelganConfig +from TTS.vocoder.datasets.preprocess import load_wav_data +from TTS.vocoder.models.gan import GAN output_path = os.path.dirname(os.path.abspath(__file__)) + config = MultibandMelganConfig( batch_size=32, eval_batch_size=16, num_loader_workers=4, num_eval_loader_workers=4, run_eval=True, - test_delay_epochs=-1, + test_delay_epochs=5, epochs=1000, seq_len=8192, pad_short=2000, use_noise_augment=True, eval_split_size=10, print_step=25, - print_eval=True, + print_eval=False, mixed_precision=False, lr_gen=1e-4, lr_disc=1e-4, data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = GAN(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() diff --git a/recipes/ljspeech/univnet/train.py b/recipes/ljspeech/univnet/train.py index e8979c92..a4ab93bf 100644 --- a/recipes/ljspeech/univnet/train.py +++ b/recipes/ljspeech/univnet/train.py @@ -1,7 +1,10 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import UnivnetConfig +from TTS.vocoder.datasets.preprocess import load_wav_data +from TTS.vocoder.models.gan import GAN output_path = os.path.dirname(os.path.abspath(__file__)) config = UnivnetConfig( @@ -24,6 +27,24 @@ config = UnivnetConfig( data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = GAN(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From fd95926009e763a84846f8da2ad813c421d0abc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:21:02 +0000 Subject: [PATCH 05/64] Update GlowTTS --- TTS/tts/layers/glow_tts/glow.py | 1 - TTS/tts/models/glow_tts.py | 23 ++++++++++++++---- recipes/ljspeech/glow_tts/train_glowtts.py | 27 +++++++++++++++++++--- 3 files changed, 42 insertions(+), 9 deletions(-) diff --git a/TTS/tts/layers/glow_tts/glow.py b/TTS/tts/layers/glow_tts/glow.py index 392447de..ff1b99e8 100644 --- a/TTS/tts/layers/glow_tts/glow.py +++ b/TTS/tts/layers/glow_tts/glow.py @@ -106,7 +106,6 @@ class InvConvNear(nn.Module): - x: :math:`[B, C, T]` - x_mask: :math:`[B, 1, T]` """ - b, c, t = x.size() assert c % self.num_splits == 0 if x_mask is None: diff --git a/TTS/tts/models/glow_tts.py b/TTS/tts/models/glow_tts.py index 2e94659e..bcc46cec 100644 --- a/TTS/tts/models/glow_tts.py +++ b/TTS/tts/models/glow_tts.py @@ -1,4 +1,5 @@ import math +from typing import Dict, Tuple import torch from torch import nn @@ -47,7 +48,7 @@ class GlowTTS(BaseTTS): def __init__(self, config: GlowTTSConfig): - super().__init__() + super().__init__(config) # pass all config fields to `self` # for fewer code change @@ -387,7 +388,7 @@ class GlowTTS(BaseTTS): ) return outputs, loss_dict - def train_log(self, ap: AudioProcessor, batch: dict, outputs: dict): # pylint: disable=no-self-use + def _create_logs(self, batch, outputs, ap): alignments = outputs["alignments"] text_input = batch["text_input"] text_lengths = batch["text_lengths"] @@ -416,15 +417,26 @@ class GlowTTS(BaseTTS): train_audio = ap.inv_melspectrogram(pred_spec.T) return figures, {"audio": train_audio} + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ) -> None: # pylint: disable=no-self-use + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.train_figures(steps, figures) + logger.train_audios(steps, audios, ap.sample_rate) + @torch.no_grad() def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) - def eval_log(self, ap: AudioProcessor, batch: dict, outputs: dict): - return self.train_log(ap, batch, outputs) + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.eval_figures(steps, figures) + logger.eval_audios(steps, audios, ap.sample_rate) @torch.no_grad() - def test_run(self, ap): + def test_run(self, assets: Dict) -> Tuple[Dict, Dict]: """Generic test run for `tts` models used by `Trainer`. You can override this for a different behaviour. @@ -432,6 +444,7 @@ class GlowTTS(BaseTTS): Returns: Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard. """ + ap = assets["audio_processor"] print(" | > Synthesizing test sentences.") test_audios = {} test_figures = {} diff --git a/recipes/ljspeech/glow_tts/train_glowtts.py b/recipes/ljspeech/glow_tts/train_glowtts.py index 5d71f4ed..29077eeb 100644 --- a/recipes/ljspeech/glow_tts/train_glowtts.py +++ b/recipes/ljspeech/glow_tts/train_glowtts.py @@ -1,7 +1,10 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import BaseDatasetConfig, GlowTTSConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.glow_tts import GlowTTS +from TTS.utils.audio import AudioProcessor output_path = os.path.dirname(os.path.abspath(__file__)) dataset_config = BaseDatasetConfig( @@ -25,6 +28,24 @@ config = GlowTTSConfig( output_path=output_path, datasets=[dataset_config], ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = GlowTTS(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From 3d5205d66ff84bdb82b0f18f3c47064226a43193 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:21:25 +0000 Subject: [PATCH 06/64] Update WaveGrad --- TTS/vocoder/models/wavegrad.py | 18 +++++++------ recipes/ljspeech/wavegrad/train_wavegrad.py | 28 ++++++++++++++++++--- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/TTS/vocoder/models/wavegrad.py b/TTS/vocoder/models/wavegrad.py index 8d95a063..5755a9a7 100644 --- a/TTS/vocoder/models/wavegrad.py +++ b/TTS/vocoder/models/wavegrad.py @@ -58,7 +58,7 @@ class Wavegrad(BaseVocoder): # pylint: disable=dangerous-default-value def __init__(self, config: Coqpit): - super().__init__() + super().__init__(config) self.config = config self.use_weight_norm = config.model_params.use_weight_norm self.hop_len = np.prod(config.model_params.upsample_factors) @@ -258,21 +258,22 @@ class Wavegrad(BaseVocoder): return {"model_output": noise_hat}, {"loss": loss} def train_log( # pylint: disable=no-self-use - self, ap: AudioProcessor, batch: Dict, outputs: Dict # pylint: disable=unused-argument + self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int # pylint: disable=unused-argument ) -> Tuple[Dict, np.ndarray]: - return None, None + pass @torch.no_grad() def eval_step(self, batch: Dict, criterion: nn.Module) -> Tuple[Dict, Dict]: return self.train_step(batch, criterion) def eval_log( # pylint: disable=no-self-use - self, ap: AudioProcessor, batch: Dict, outputs: Dict # pylint: disable=unused-argument - ) -> Tuple[Dict, np.ndarray]: - return None, None + self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int # pylint: disable=unused-argument + ) -> None: + pass - def test_run(self, ap: AudioProcessor, samples: List[Dict], ouputs: Dict): # pylint: disable=unused-argument + def test_run(self, assets: Dict, samples: List[Dict], outputs: Dict): # pylint: disable=unused-argument # setup noise schedule and inference + ap = assets["audio_processor"] noise_schedule = self.config["test_noise_schedule"] betas = np.linspace(noise_schedule["min_val"], noise_schedule["max_val"], noise_schedule["num_steps"]) self.compute_noise_level(betas) @@ -307,8 +308,9 @@ class Wavegrad(BaseVocoder): return {"input": m, "waveform": y} def get_data_loader( - self, config: Coqpit, ap: AudioProcessor, is_eval: True, data_items: List, verbose: bool, num_gpus: int + self, config: Coqpit, assets: Dict, is_eval: True, data_items: List, verbose: bool, num_gpus: int ): + ap = assets["audio_processor"] dataset = WaveGradDataset( ap=ap, items=data_items, diff --git a/recipes/ljspeech/wavegrad/train_wavegrad.py b/recipes/ljspeech/wavegrad/train_wavegrad.py index fe038915..aa873169 100644 --- a/recipes/ljspeech/wavegrad/train_wavegrad.py +++ b/recipes/ljspeech/wavegrad/train_wavegrad.py @@ -1,7 +1,11 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import WavegradConfig +from TTS.vocoder.models.wavegrad import Wavegrad +from TTS.vocoder.datasets.preprocess import load_wav_data + output_path = os.path.dirname(os.path.abspath(__file__)) config = WavegradConfig( @@ -22,6 +26,24 @@ config = WavegradConfig( data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = Wavegrad(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From 4f94f91305119edd3b373b90912fc06ccea96b1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:21:35 +0000 Subject: [PATCH 07/64] Update WaveRNN --- TTS/vocoder/models/wavernn.py | 17 +++++++------- recipes/ljspeech/wavernn/train_wavernn.py | 28 ++++++++++++++++++++--- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/TTS/vocoder/models/wavernn.py b/TTS/vocoder/models/wavernn.py index 9b0d6837..1977efb6 100644 --- a/TTS/vocoder/models/wavernn.py +++ b/TTS/vocoder/models/wavernn.py @@ -222,10 +222,7 @@ class Wavernn(BaseVocoder): samples at once. The Subscale WaveRNN produces 16 samples per step without loss of quality and offers an orthogonal method for increasing sampling efficiency. """ - super().__init__() - - self.args = config.model_params - self.config = config + super().__init__(config) if isinstance(self.args.mode, int): self.n_classes = 2 ** self.args.mode @@ -572,8 +569,9 @@ class Wavernn(BaseVocoder): @torch.no_grad() def test_run( - self, ap: AudioProcessor, samples: List[Dict], output: Dict # pylint: disable=unused-argument + self, assets: Dict, samples: List[Dict], output: Dict # pylint: disable=unused-argument ) -> Tuple[Dict, Dict]: + ap = assets["audio_processor"] figures = {} audios = {} for idx, sample in enumerate(samples): @@ -600,20 +598,21 @@ class Wavernn(BaseVocoder): def get_data_loader( # pylint: disable=no-self-use self, config: Coqpit, - ap: AudioProcessor, + assets: Dict, is_eval: True, data_items: List, verbose: bool, num_gpus: int, ): + ap = assets["audio_processor"] dataset = WaveRNNDataset( ap=ap, items=data_items, seq_len=config.seq_len, hop_len=ap.hop_length, - pad=config.model_params.pad, - mode=config.model_params.mode, - mulaw=config.model_params.mulaw, + pad=config.model_args.pad, + mode=config.model_args.mode, + mulaw=config.model_args.mulaw, is_training=not is_eval, verbose=verbose, ) diff --git a/recipes/ljspeech/wavernn/train_wavernn.py b/recipes/ljspeech/wavernn/train_wavernn.py index 8f138298..9777a985 100644 --- a/recipes/ljspeech/wavernn/train_wavernn.py +++ b/recipes/ljspeech/wavernn/train_wavernn.py @@ -1,7 +1,11 @@ import os -from TTS.trainer import Trainer, TrainingArgs, init_training + +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import WavernnConfig +from TTS.vocoder.datasets.preprocess import load_wav_data +from TTS.vocoder.models.wavernn import Wavernn output_path = os.path.dirname(os.path.abspath(__file__)) config = WavernnConfig( @@ -24,6 +28,24 @@ config = WavernnConfig( data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, dashboard_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, dashboard_logger, cudnn_benchmark=True) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = Wavernn(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From 45889804c2e314bdfeccb7e405aefabf4b17424f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:22:11 +0000 Subject: [PATCH 08/64] Update VITS --- TTS/tts/models/vits.py | 43 ++++++++++++++----------- recipes/ljspeech/vits_tts/train_vits.py | 29 +++++++++++++++-- 2 files changed, 50 insertions(+), 22 deletions(-) diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 87695774..0ede3d13 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -217,7 +217,7 @@ class Vits(BaseTTS): def __init__(self, config: Coqpit): - super().__init__() + super().__init__(config) self.END2END = True @@ -576,22 +576,7 @@ class Vits(BaseTTS): ) return outputs, loss_dict - def train_log( - self, ap: AudioProcessor, batch: Dict, outputs: List, name_prefix="train" - ): # pylint: disable=no-self-use - """Create visualizations and waveform examples. - - For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to - be projected onto Tensorboard. - - Args: - ap (AudioProcessor): audio processor used at training. - batch (Dict): Model inputs used at the previous training step. - outputs (Dict): Model outputs generated at the previoud training step. - - Returns: - Tuple[Dict, np.ndarray]: training plots and output waveform. - """ + def _log(self, ap, batch, outputs, name_prefix="train"): y_hat = outputs[0]["model_outputs"] y = outputs[0]["waveform_seg"] figures = plot_results(y_hat, y, ap, name_prefix) @@ -609,12 +594,32 @@ class Vits(BaseTTS): return figures, audios + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ): # pylint: disable=no-self-use + """Create visualizations and waveform examples. + + For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to + be projected onto Tensorboard. + + Args: + ap (AudioProcessor): audio processor used at training. + batch (Dict): Model inputs used at the previous training step. + outputs (Dict): Model outputs generated at the previoud training step. + + Returns: + Tuple[Dict, np.ndarray]: training plots and output waveform. + """ + ap = assets["audio_processor"] + self._log(ap, batch, outputs, "train") + @torch.no_grad() def eval_step(self, batch: dict, criterion: nn.Module, optimizer_idx: int): return self.train_step(batch, criterion, optimizer_idx) - def eval_log(self, ap: AudioProcessor, batch: dict, outputs: dict): - return self.train_log(ap, batch, outputs, "eval") + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + return self._log(ap, batch, outputs, "eval") @torch.no_grad() def test_run(self, ap) -> Tuple[Dict, Dict]: diff --git a/recipes/ljspeech/vits_tts/train_vits.py b/recipes/ljspeech/vits_tts/train_vits.py index 7cf52f89..3a2b1ef1 100644 --- a/recipes/ljspeech/vits_tts/train_vits.py +++ b/recipes/ljspeech/vits_tts/train_vits.py @@ -1,8 +1,12 @@ import os from TTS.config.shared_configs import BaseAudioConfig -from TTS.trainer import Trainer, TrainingArgs, init_training +from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import BaseDatasetConfig, VitsConfig +from TTS.tts.models.vits import Vits +from TTS.utils.audio import AudioProcessor +from TTS.tts.datasets import load_tts_samples + output_path = os.path.dirname(os.path.abspath(__file__)) dataset_config = BaseDatasetConfig( @@ -24,6 +28,7 @@ audio_config = BaseAudioConfig( signal_norm=False, do_amp_to_db_linear=False, ) + config = VitsConfig( audio=audio_config, run_name="vits_ljspeech", @@ -47,6 +52,24 @@ config = VitsConfig( output_path=output_path, datasets=[dataset_config], ) -args, config, output_path, _, c_logger, tb_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, tb_logger, cudnn_benchmark=True) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = Vits(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() From e27feade38277e1f3bf1189d8c2ce0c64280987a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:22:32 +0000 Subject: [PATCH 09/64] Fixup wavernn --- TTS/vocoder/configs/wavernn_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/vocoder/configs/wavernn_config.py b/TTS/vocoder/configs/wavernn_config.py index 0afa1f43..f39400e5 100644 --- a/TTS/vocoder/configs/wavernn_config.py +++ b/TTS/vocoder/configs/wavernn_config.py @@ -75,7 +75,7 @@ class WavernnConfig(BaseVocoderConfig): model: str = "wavernn" # Model specific params - model_params: WavernnArgs = field(default_factory=WavernnArgs) + model_args: WavernnArgs = field(default_factory=WavernnArgs) target_loss: str = "loss" # Inference From 4163b4f2e480cfe6fb0b8f57d16a1a95cd9c6144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:23:00 +0000 Subject: [PATCH 10/64] Update Tacotron models --- TTS/tts/models/base_tacotron.py | 51 +------------ TTS/tts/models/tacotron.py | 25 +++++-- TTS/tts/models/tacotron2.py | 25 +++++-- .../tacotron2-DCA/train_tacotron_dca.py | 75 +++++++++++++++++++ .../tacotron2-DDC/train_tacotron_ddc.py | 74 ++++++++++++++++++ 5 files changed, 187 insertions(+), 63 deletions(-) create mode 100644 recipes/ljspeech/tacotron2-DCA/train_tacotron_dca.py create mode 100644 recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index 8cfd750d..22dba586 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -17,43 +17,12 @@ from TTS.utils.io import load_fsspec from TTS.utils.training import gradual_training_scheduler -@dataclass -class BaseTacotronArgs(Coqpit): - """TODO: update Tacotron configs using it""" - - num_chars: int = MISSING - num_speakers: int = MISSING - r: int = MISSING - out_channels: int = 80 - decoder_output_dim: int = 80 - attn_type: str = "original" - attn_win: bool = False - attn_norm: str = "softmax" - prenet_type: str = "original" - prenet_dropout: bool = True - prenet_dropout_at_inference: bool = False - forward_attn: bool = False - trans_agent: bool = False - forward_attn_mask: bool = False - location_attn: bool = True - attn_K: int = 5 - separate_stopnet: bool = True - bidirectional_decoder: bool = False - double_decoder_consistency: bool = False - ddc_r: int = None - encoder_in_features: int = 512 - decoder_in_features: int = 512 - d_vector_dim: int = None - use_gst: bool = False - gst: bool = None - gradual_training: bool = None - - class BaseTacotron(BaseTTS): def __init__(self, config: Coqpit): """Abstract Tacotron class""" - super().__init__() + super().__init__(config) + # pass all config fields as class attributes for key in config: setattr(self, key, config[key]) @@ -133,22 +102,6 @@ class BaseTacotron(BaseTTS): def get_criterion(self) -> nn.Module: return TacotronLoss(self.config) - @staticmethod - def get_characters(config: Coqpit) -> str: - # TODO: implement CharacterProcessor - if config.characters is not None: - symbols, phonemes = make_symbols(**config.characters) - else: - from TTS.tts.utils.text.symbols import ( # pylint: disable=import-outside-toplevel - parse_symbols, - phonemes, - symbols, - ) - - config.characters = parse_symbols() - model_characters = phonemes if config.use_phonemes else symbols - return model_characters, config - @staticmethod def get_speaker_manager(config: Coqpit, restore_path: str, data: List, out_path: str = None) -> SpeakerManager: return get_speaker_manager(config, restore_path, data, out_path) diff --git a/TTS/tts/models/tacotron.py b/TTS/tts/models/tacotron.py index 84a256d5..3a7dd339 100644 --- a/TTS/tts/models/tacotron.py +++ b/TTS/tts/models/tacotron.py @@ -23,7 +23,7 @@ class Tacotron(BaseTacotron): def __init__(self, config: Coqpit): super().__init__(config) - chars, self.config = self.get_characters(config) + chars, self.config, _ = self.get_characters(config) config.num_chars = self.num_chars = len(chars) # pass all config fields to `self` @@ -264,7 +264,7 @@ class Tacotron(BaseTacotron): loss_dict["align_error"] = align_error return outputs, loss_dict - def train_log(self, ap: AudioProcessor, batch: dict, outputs: dict) -> Tuple[Dict, Dict]: + def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] @@ -284,11 +284,22 @@ class Tacotron(BaseTacotron): figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio - train_audio = ap.inv_spectrogram(pred_spec.T) - return figures, {"audio": train_audio} + audio = ap.inv_spectrogram(pred_spec.T) + return figures, {"audio": audio} - def eval_step(self, batch, criterion): + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ) -> None: # pylint: disable=no-self-use + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.train_figures(steps, figures) + logger.train_audios(steps, audios, ap.sample_rate) + + def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) - def eval_log(self, ap, batch, outputs): - return self.train_log(ap, batch, outputs) + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.eval_figures(steps, figures) + logger.eval_audios(steps, audios, ap.sample_rate) diff --git a/TTS/tts/models/tacotron2.py b/TTS/tts/models/tacotron2.py index 39ef12a8..300cf903 100644 --- a/TTS/tts/models/tacotron2.py +++ b/TTS/tts/models/tacotron2.py @@ -22,7 +22,7 @@ class Tacotron2(BaseTacotron): def __init__(self, config: Coqpit): super().__init__(config) - chars, self.config = self.get_characters(config) + chars, self.config, _ = self.get_characters(config) config.num_chars = len(chars) self.decoder_output_dim = config.out_channels @@ -269,7 +269,7 @@ class Tacotron2(BaseTacotron): loss_dict["align_error"] = align_error return outputs, loss_dict - def train_log(self, ap: AudioProcessor, batch: dict, outputs: dict) -> Tuple[Dict, Dict]: + def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] @@ -289,11 +289,22 @@ class Tacotron2(BaseTacotron): figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio - train_audio = ap.inv_melspectrogram(pred_spec.T) - return figures, {"audio": train_audio} + audio = ap.inv_melspectrogram(pred_spec.T) + return figures, {"audio": audio} - def eval_step(self, batch, criterion): + def train_log( + self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int + ) -> None: # pylint: disable=no-self-use + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.train_figures(steps, figures) + logger.train_audios(steps, audios, ap.sample_rate) + + def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) - def eval_log(self, ap, batch, outputs): - return self.train_log(ap, batch, outputs) + def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: + ap = assets["audio_processor"] + figures, audios = self._create_logs(batch, outputs, ap) + logger.eval_figures(steps, figures) + logger.eval_audios(steps, audios, ap.sample_rate) diff --git a/recipes/ljspeech/tacotron2-DCA/train_tacotron_dca.py b/recipes/ljspeech/tacotron2-DCA/train_tacotron_dca.py new file mode 100644 index 00000000..be32f989 --- /dev/null +++ b/recipes/ljspeech/tacotron2-DCA/train_tacotron_dca.py @@ -0,0 +1,75 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs import BaseDatasetConfig, Tacotron2Config +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.tacotron2 import Tacotron2 +from TTS.utils.audio import AudioProcessor + +# from TTS.tts.datasets.tokenizer import Tokenizer + +output_path = os.path.dirname(os.path.abspath(__file__)) + +# init configs +dataset_config = BaseDatasetConfig( + name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") +) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=60.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = Tacotron2Config( # This is the config that is saved for the future use + audio=audio_config, + batch_size=64, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + ga_alpha=5.0, + r=2, + attention_type="dynamic_convolution", + double_decoder_consistency=True, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=True, + mixed_precision=False, + output_path=output_path, + datasets=[dataset_config], +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = Tacotron2(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py b/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py new file mode 100644 index 00000000..d72576d3 --- /dev/null +++ b/recipes/ljspeech/tacotron2-DDC/train_tacotron_ddc.py @@ -0,0 +1,74 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs import BaseDatasetConfig, Tacotron2Config +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.tacotron2 import Tacotron2 +from TTS.utils.audio import AudioProcessor + +# from TTS.tts.datasets.tokenizer import Tokenizer + +output_path = os.path.dirname(os.path.abspath(__file__)) + +# init configs +dataset_config = BaseDatasetConfig( + name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") +) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=60.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = Tacotron2Config( # This is the config that is saved for the future use + audio=audio_config, + batch_size=64, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + r=6, + gradual_training=[[0, 6, 64], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]], + double_decoder_consistency=True, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=True, + mixed_precision=False, + output_path=output_path, + datasets=[dataset_config], +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init model +model = Tacotron2(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() From 9a0d8fa02737dc81bd503b89b3fd0b67dd87488e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:24:24 +0000 Subject: [PATCH 11/64] Update `copy_model_files()` --- TTS/utils/io.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/TTS/utils/io.py b/TTS/utils/io.py index dd4ffd60..a93f6118 100644 --- a/TTS/utils/io.py +++ b/TTS/utils/io.py @@ -38,7 +38,8 @@ def copy_model_files(config: Coqpit, out_path, new_fields): """ copy_config_path = os.path.join(out_path, "config.json") # add extra information fields - config.update(new_fields, allow_new=True) + if new_fields: + config.update(new_fields, allow_new=True) # TODO: Revert to config.save_json() once Coqpit supports arbitrary paths. with fsspec.open(copy_config_path, "w", encoding="utf8") as f: json.dump(config.to_dict(), f, indent=4) From 16b70be0dd1f0e15d3746f2fa7e51692e3a552f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:27:04 +0000 Subject: [PATCH 12/64] Add `_set_model_args` to BaseModel --- TTS/model.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/TTS/model.py b/TTS/model.py index cfd1ec62..e34846bb 100644 --- a/TTS/model.py +++ b/TTS/model.py @@ -22,6 +22,14 @@ class BaseModel(nn.Module, ABC): - 1D tensors `batch x 1` """ + def __init__(self, config: Coqpit): + super().__init__() + self._set_model_args(config) + + def _set_model_args(self, config: Coqpit): + """Set model arguments from the config. Override this.""" + pass + @abstractmethod def forward(self, input: torch.Tensor, *args, aux_input={}, **kwargs) -> Dict: """Forward pass for the model mainly used in training. @@ -73,7 +81,7 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def train_log(self, ap: AudioProcessor, batch: Dict, outputs: Dict) -> Tuple[Dict, np.ndarray]: + def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: """Create visualizations and waveform examples for training. For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to @@ -87,7 +95,7 @@ class BaseModel(nn.Module, ABC): Returns: Tuple[Dict, np.ndarray]: training plots and output waveform. """ - return None, None + pass @abstractmethod def eval_step(self, batch: Dict, criterion: nn.Module) -> Tuple[Dict, Dict]: @@ -106,9 +114,9 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def eval_log(self, ap: AudioProcessor, batch: Dict, outputs: Dict) -> Tuple[Dict, np.ndarray]: + def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: """The same as `train_log()`""" - return None, None + pass @abstractmethod def load_checkpoint(self, config: Coqpit, checkpoint_path: str, eval: bool = False) -> None: From 9f23ad6a0f650a243614404b08cdf58ce3c2b0f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:28:28 +0000 Subject: [PATCH 13/64] Fix imports --- TTS/model.py | 5 ++--- TTS/tts/datasets/formatters.py | 4 ++-- TTS/tts/utils/speakers.py | 6 +++--- TTS/vocoder/datasets/__init__.py | 1 + 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/TTS/model.py b/TTS/model.py index e34846bb..604a1ffa 100644 --- a/TTS/model.py +++ b/TTS/model.py @@ -6,7 +6,6 @@ import torch from coqpit import Coqpit from torch import nn -from TTS.utils.audio import AudioProcessor # pylint: skip-file @@ -81,7 +80,7 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: + def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: """Create visualizations and waveform examples for training. For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to @@ -114,7 +113,7 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: + def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: """The same as `train_log()`""" pass diff --git a/TTS/tts/datasets/formatters.py b/TTS/tts/datasets/formatters.py index eee407a8..dcd18740 100644 --- a/TTS/tts/datasets/formatters.py +++ b/TTS/tts/datasets/formatters.py @@ -308,14 +308,14 @@ def mls(root_path, meta_files=None): # ======================================== VOX CELEB =========================================== def voxceleb2(root_path, meta_file=None): """ - :param meta_file Used only for consistency with load_meta_data api + :param meta_file Used only for consistency with load_tts_samples api """ return _voxcel_x(root_path, meta_file, voxcel_idx="2") def voxceleb1(root_path, meta_file=None): """ - :param meta_file Used only for consistency with load_meta_data api + :param meta_file Used only for consistency with load_tts_samples api """ return _voxcel_x(root_path, meta_file, voxcel_idx="1") diff --git a/TTS/tts/utils/speakers.py b/TTS/tts/utils/speakers.py index 1b9ab96f..e58f0cfb 100644 --- a/TTS/tts/utils/speakers.py +++ b/TTS/tts/utils/speakers.py @@ -110,10 +110,10 @@ class SpeakerManager: @staticmethod def parse_speakers_from_data(items: list) -> Tuple[Dict, int]: - """Parse speaker IDs from data samples retured by `load_meta_data()`. + """Parse speaker IDs from data samples retured by `load_tts_samples()`. Args: - items (list): Data sampled returned by `load_meta_data()`. + items (list): Data sampled returned by `load_tts_samples()`. Returns: Tuple[Dict, int]: speaker IDs and number of speakers. @@ -127,7 +127,7 @@ class SpeakerManager: """Set speaker IDs from data samples. Args: - items (List): Data sampled returned by `load_meta_data()`. + items (List): Data sampled returned by `load_tts_samples()`. """ self.speaker_ids, _ = self.parse_speakers_from_data(items) diff --git a/TTS/vocoder/datasets/__init__.py b/TTS/vocoder/datasets/__init__.py index 86b059c3..871eb0d2 100644 --- a/TTS/vocoder/datasets/__init__.py +++ b/TTS/vocoder/datasets/__init__.py @@ -5,6 +5,7 @@ from torch.utils.data import Dataset from TTS.utils.audio import AudioProcessor from TTS.vocoder.datasets.gan_dataset import GANDataset +from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data from TTS.vocoder.datasets.wavegrad_dataset import WaveGradDataset from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset From 043dca61b4aa6edbb3aede7c0ddc5bf16be15d7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:28:53 +0000 Subject: [PATCH 14/64] Rename `load_meta_data` as `load_tts_data` --- TTS/bin/compute_embeddings.py | 4 ++-- TTS/bin/compute_statistics.py | 4 ++-- TTS/bin/extract_tts_spectrograms.py | 5 ++--- TTS/bin/find_unique_chars.py | 4 ++-- TTS/tts/datasets/__init__.py | 11 +++++++---- notebooks/dataset_analysis/PhonemeCoverage.ipynb | 4 ++-- 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/TTS/bin/compute_embeddings.py b/TTS/bin/compute_embeddings.py index 8c4d275f..83a5aeae 100644 --- a/TTS/bin/compute_embeddings.py +++ b/TTS/bin/compute_embeddings.py @@ -5,7 +5,7 @@ from argparse import RawTextHelpFormatter from tqdm import tqdm from TTS.config import load_config -from TTS.tts.datasets import load_meta_data +from TTS.tts.datasets import load_tts_samples from TTS.tts.utils.speakers import SpeakerManager parser = argparse.ArgumentParser( @@ -36,7 +36,7 @@ args = parser.parse_args() c_dataset = load_config(args.config_dataset_path) -meta_data_train, meta_data_eval = load_meta_data(c_dataset.datasets, eval_split=args.eval) +meta_data_train, meta_data_eval = load_tts_samples(c_dataset.datasets, eval_split=args.eval) wav_files = meta_data_train + meta_data_eval speaker_manager = SpeakerManager( diff --git a/TTS/bin/compute_statistics.py b/TTS/bin/compute_statistics.py index 6179dafc..e1974ae7 100755 --- a/TTS/bin/compute_statistics.py +++ b/TTS/bin/compute_statistics.py @@ -10,7 +10,7 @@ from tqdm import tqdm # from TTS.utils.io import load_config from TTS.config import load_config -from TTS.tts.datasets import load_meta_data +from TTS.tts.datasets import load_tts_samples from TTS.utils.audio import AudioProcessor @@ -41,7 +41,7 @@ def main(): if args.data_path: dataset_items = glob.glob(os.path.join(args.data_path, "**", "*.wav"), recursive=True) else: - dataset_items = load_meta_data(CONFIG.datasets)[0] # take only train data + dataset_items = load_tts_samples(CONFIG.datasets)[0] # take only train data print(f" > There are {len(dataset_items)} files.") mel_sum = 0 diff --git a/TTS/bin/extract_tts_spectrograms.py b/TTS/bin/extract_tts_spectrograms.py index 681fcc36..0af98ff1 100755 --- a/TTS/bin/extract_tts_spectrograms.py +++ b/TTS/bin/extract_tts_spectrograms.py @@ -10,8 +10,7 @@ from torch.utils.data import DataLoader from tqdm import tqdm from TTS.config import load_config -from TTS.tts.datasets import load_meta_data -from TTS.tts.datasets.TTSDataset import TTSDataset +from TTS.tts.datasets import TTSDataset, load_tts_samples from TTS.tts.models import setup_model from TTS.tts.utils.speakers import get_speaker_manager from TTS.utils.audio import AudioProcessor @@ -230,7 +229,7 @@ def main(args): # pylint: disable=redefined-outer-name ap = AudioProcessor(**c.audio) # load data instances - meta_data_train, meta_data_eval = load_meta_data(c.datasets, eval_split=args.eval) + meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=args.eval) # use eval and training partitions meta_data = meta_data_train + meta_data_eval diff --git a/TTS/bin/find_unique_chars.py b/TTS/bin/find_unique_chars.py index 16768e43..437c2d60 100644 --- a/TTS/bin/find_unique_chars.py +++ b/TTS/bin/find_unique_chars.py @@ -3,7 +3,7 @@ import argparse from argparse import RawTextHelpFormatter from TTS.config import load_config -from TTS.tts.datasets import load_meta_data +from TTS.tts.datasets import load_tts_samples def main(): @@ -23,7 +23,7 @@ def main(): c = load_config(args.config_path) # load all datasets - train_items, eval_items = load_meta_data(c.datasets, eval_split=True) + train_items, eval_items = load_tts_samples(c.datasets, eval_split=True) items = train_items + eval_items texts = "".join(item[0] for item in items) diff --git a/TTS/tts/datasets/__init__.py b/TTS/tts/datasets/__init__.py index c2e55038..c163a11d 100644 --- a/TTS/tts/datasets/__init__.py +++ b/TTS/tts/datasets/__init__.py @@ -1,12 +1,12 @@ import sys from collections import Counter from pathlib import Path -from typing import Dict, List, Tuple +from typing import Dict, List, Tuple, Union import numpy as np +from TTS.tts.datasets.dataset import * from TTS.tts.datasets.formatters import * -from TTS.tts.datasets.TTSDataset import TTSDataset def split_dataset(items): @@ -31,11 +31,12 @@ def split_dataset(items): return items[:eval_split_size], items[eval_split_size:] -def load_meta_data(datasets: List[Dict], eval_split=True) -> Tuple[List[List], List[List]]: +def load_tts_samples(datasets: Union[List[Dict], Dict], eval_split=True) -> Tuple[List[List], List[List]]: """Parse the dataset, load the samples as a list and load the attention alignments if provided. Args: - datasets (List[Dict]): A list of dataset dictionaries or dataset configs. + datasets (List[Dict], Dict): A list of datasets or a single dataset dictionary. If multiple datasets are + in the list, they are all merged. eval_split (bool, optional): If true, create a evaluation split. If an eval split provided explicitly, generate an eval split automatically. Defaults to True. @@ -44,6 +45,8 @@ def load_meta_data(datasets: List[Dict], eval_split=True) -> Tuple[List[List], L """ meta_data_train_all = [] meta_data_eval_all = [] if eval_split else None + if not isinstance(datasets, list): + datasets = [datasets] for dataset in datasets: name = dataset["name"] root_path = dataset["path"] diff --git a/notebooks/dataset_analysis/PhonemeCoverage.ipynb b/notebooks/dataset_analysis/PhonemeCoverage.ipynb index e659511a..2b7f5d67 100644 --- a/notebooks/dataset_analysis/PhonemeCoverage.ipynb +++ b/notebooks/dataset_analysis/PhonemeCoverage.ipynb @@ -50,7 +50,7 @@ "source": [ "# import stuff\n", "from TTS.utils.io import load_config\n", - "from TTS.tts.datasets.formatters import load_meta_data\n", + "from TTS.tts.datasets.formatters import load_tts_samples\n", "from TTS.tts.utils.text import phoneme_to_sequence, sequence_to_phoneme\n", "from tqdm import tqdm\n", "from matplotlib import pylab as plt\n", @@ -75,7 +75,7 @@ "CONFIG = load_config(CONFIG_FILE)\n", "\n", "# Load some properties from config.json\n", - "CONFIG_METADATA = sorted(load_meta_data(CONFIG.datasets)[0])\n", + "CONFIG_METADATA = sorted(load_tts_samples(CONFIG.datasets)[0])\n", "CONFIG_METADATA = CONFIG_METADATA\n", "CONFIG_DATASET = CONFIG.datasets[0]\n", "CONFIG_PHONEME_LANGUAGE = CONFIG.phoneme_language\n", From 2e9b6b4f90996a9cc23f85387c56dcd5459f4360 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:29:43 +0000 Subject: [PATCH 15/64] Refactor Speaker Encoder training --- TTS/bin/train_encoder.py | 8 +-- TTS/speaker_encoder/utils/training.py | 95 +++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 4 deletions(-) create mode 100644 TTS/speaker_encoder/utils/training.py diff --git a/TTS/bin/train_encoder.py b/TTS/bin/train_encoder.py index 7ff35486..ad6d95f7 100644 --- a/TTS/bin/train_encoder.py +++ b/TTS/bin/train_encoder.py @@ -12,9 +12,9 @@ from torch.utils.data import DataLoader from TTS.speaker_encoder.dataset import SpeakerEncoderDataset from TTS.speaker_encoder.losses import AngleProtoLoss, GE2ELoss, SoftmaxAngleProtoLoss from TTS.speaker_encoder.utils.generic_utils import save_best_model, setup_model +from TTS.speaker_encoder.utils.training import init_training from TTS.speaker_encoder.utils.visual import plot_embeddings -from TTS.trainer import init_training -from TTS.tts.datasets import load_meta_data +from TTS.tts.datasets import load_tts_samples from TTS.utils.audio import AudioProcessor from TTS.utils.generic_utils import count_parameters, remove_experiment_folder, set_init_dict from TTS.utils.io import load_fsspec @@ -156,7 +156,7 @@ def main(args): # pylint: disable=redefined-outer-name optimizer = RAdam(model.parameters(), lr=c.lr) # pylint: disable=redefined-outer-name - meta_data_train, meta_data_eval = load_meta_data(c.datasets, eval_split=False) + meta_data_train, meta_data_eval = load_tts_samples(c.datasets, eval_split=False) data_loader, num_speakers = setup_loader(ap, is_val=False, verbose=True) @@ -208,7 +208,7 @@ def main(args): # pylint: disable=redefined-outer-name if __name__ == "__main__": - args, c, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = init_training(sys.argv) + args, c, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = init_training() try: main(args) diff --git a/TTS/speaker_encoder/utils/training.py b/TTS/speaker_encoder/utils/training.py new file mode 100644 index 00000000..0edbcaf4 --- /dev/null +++ b/TTS/speaker_encoder/utils/training.py @@ -0,0 +1,95 @@ +import os +from typing import List, Union + +from coqpit import Coqpit + +from TTS.config import load_config, register_config +from TTS.trainer import TrainingArgs +from TTS.tts.utils.text.symbols import parse_symbols +from TTS.utils.generic_utils import get_experiment_folder_path, get_git_branch +from TTS.utils.io import copy_model_files +from TTS.utils.logging import init_dashboard_logger +from TTS.utils.logging.console_logger import ConsoleLogger +from TTS.utils.trainer_utils import get_last_checkpoint + + +def getarguments(): + train_config = TrainingArgs() + parser = train_config.init_argparse(arg_prefix="") + return parser + + +def process_args(args, config=None): + """Process parsed comand line arguments and initialize the config if not provided. + Args: + args (argparse.Namespace or dict like): Parsed input arguments. + config (Coqpit): Model config. If none, it is generated from `args`. Defaults to None. + Returns: + c (TTS.utils.io.AttrDict): Config paramaters. + out_path (str): Path to save models and logging. + audio_path (str): Path to save generated test audios. + c_logger (TTS.utils.console_logger.ConsoleLogger): Class that does + logging to the console. + dashboard_logger (WandbLogger or TensorboardLogger): Class that does the dashboard Logging + TODO: + - Interactive config definition. + """ + if isinstance(args, tuple): + args, coqpit_overrides = args + if args.continue_path: + # continue a previous training from its output folder + experiment_path = args.continue_path + args.config_path = os.path.join(args.continue_path, "config.json") + args.restore_path, best_model = get_last_checkpoint(args.continue_path) + if not args.best_path: + args.best_path = best_model + # init config if not already defined + if config is None: + if args.config_path: + # init from a file + config = load_config(args.config_path) + else: + # init from console args + from TTS.config.shared_configs import BaseTrainingConfig # pylint: disable=import-outside-toplevel + + config_base = BaseTrainingConfig() + config_base.parse_known_args(coqpit_overrides) + config = register_config(config_base.model)() + # override values from command-line args + config.parse_known_args(coqpit_overrides, relaxed_parser=True) + experiment_path = args.continue_path + if not experiment_path: + experiment_path = get_experiment_folder_path(config.output_path, config.run_name) + audio_path = os.path.join(experiment_path, "test_audios") + config.output_log_path = experiment_path + # setup rank 0 process in distributed training + dashboard_logger = None + if args.rank == 0: + new_fields = {} + if args.restore_path: + new_fields["restore_path"] = args.restore_path + new_fields["github_branch"] = get_git_branch() + # if model characters are not set in the config file + # save the default set to the config file for future + # compatibility. + if config.has("characters") and config.characters is None: + used_characters = parse_symbols() + new_fields["characters"] = used_characters + copy_model_files(config, experiment_path, new_fields) + dashboard_logger = init_dashboard_logger(config) + c_logger = ConsoleLogger() + return config, experiment_path, audio_path, c_logger, dashboard_logger + + +def init_arguments(): + train_config = TrainingArgs() + parser = train_config.init_argparse(arg_prefix="") + return parser + + +def init_training(config: Coqpit = None): + """Initialization of a training run.""" + parser = init_arguments() + args = parser.parse_known_args() + config, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger = process_args(args, config) + return args[0], config, OUT_PATH, AUDIO_PATH, c_logger, dashboard_logger From ba2b8c827f6c6705c59d3e45e2b430aea1f209b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:30:49 +0000 Subject: [PATCH 16/64] Update `train_tts.py` and `train_vocoder.py` --- TTS/bin/train_tts.py | 60 +++++++++++++++++++++++++++--- TTS/bin/train_vocoder.py | 79 +++++++++++++++++++++++++++++++--------- 2 files changed, 116 insertions(+), 23 deletions(-) diff --git a/TTS/bin/train_tts.py b/TTS/bin/train_tts.py index 863bd3b9..cfd092f1 100644 --- a/TTS/bin/train_tts.py +++ b/TTS/bin/train_tts.py @@ -1,12 +1,62 @@ -import sys +import os -from TTS.trainer import Trainer, init_training +from TTS.config import load_config, register_config +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models import setup_model +from TTS.utils.audio import AudioProcessor def main(): - """Run 🐸TTS trainer from terminal. This is also necessary to run DDP training by ```distribute.py```""" - args, config, output_path, _, c_logger, dashboard_logger = init_training(sys.argv) - trainer = Trainer(args, config, output_path, c_logger, dashboard_logger, cudnn_benchmark=False) + """Run `tts` model training directly by a `config.json` file.""" + # init trainer args + train_args = TrainingArgs() + parser = train_args.init_argparse(arg_prefix="") + + # override trainer args from comman-line args + args, config_overrides = parser.parse_known_args() + train_args.parse_args(args) + + # load config.json and register + if args.config_path or args.continue_path: + if args.config_path: + # init from a file + config = load_config(args.config_path) + if len(config_overrides) > 0: + config.parse_known_args(config_overrides, relaxed_parser=True) + elif args.continue_path: + # continue from a prev experiment + config = load_config(os.path.join(args.continue_path, "config.json")) + if len(config_overrides) > 0: + config.parse_known_args(config_overrides, relaxed_parser=True) + else: + # init from console args + from TTS.config.shared_configs import BaseTrainingConfig # pylint: disable=import-outside-toplevel + + config_base = BaseTrainingConfig() + config_base.parse_known_args(config_overrides) + config = register_config(config_base.model)() + + # load training samples + train_samples, eval_samples = load_tts_samples(config.datasets, eval_split=True) + + # setup audio processor + ap = AudioProcessor(**config.audio) + + # init the model from config + model = setup_model(config) + + # init the trainer and 🚀 + trainer = Trainer( + train_args, + config, + config.output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, + parse_command_line_args=False, + ) trainer.fit() diff --git a/TTS/bin/train_vocoder.py b/TTS/bin/train_vocoder.py index 000083e0..cd665f29 100644 --- a/TTS/bin/train_vocoder.py +++ b/TTS/bin/train_vocoder.py @@ -1,26 +1,69 @@ import os -import sys -import traceback -from TTS.trainer import Trainer, init_training -from TTS.utils.generic_utils import remove_experiment_folder +from TTS.config import load_config, register_config +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor +from TTS.vocoder.datasets.preprocess import load_wav_data, load_wav_feat_data +from TTS.vocoder.models import setup_model def main(): - try: - args, config, output_path, _, c_logger, dashboard_logger = init_training(sys.argv) - trainer = Trainer(args, config, output_path, c_logger, dashboard_logger) - trainer.fit() - except KeyboardInterrupt: - remove_experiment_folder(output_path) - try: - sys.exit(0) - except SystemExit: - os._exit(0) # pylint: disable=protected-access - except Exception: # pylint: disable=broad-except - remove_experiment_folder(output_path) - traceback.print_exc() - sys.exit(1) + """Run `tts` model training directly by a `config.json` file.""" + # init trainer args + train_args = TrainingArgs() + parser = train_args.init_argparse(arg_prefix="") + + # override trainer args from comman-line args + args, config_overrides = parser.parse_known_args() + train_args.parse_args(args) + + # load config.json and register + if args.config_path or args.continue_path: + if args.config_path: + # init from a file + config = load_config(args.config_path) + if len(config_overrides) > 0: + config.parse_known_args(config_overrides, relaxed_parser=True) + elif args.continue_path: + # continue from a prev experiment + config = load_config(os.path.join(args.continue_path, "config.json")) + if len(config_overrides) > 0: + config.parse_known_args(config_overrides, relaxed_parser=True) + else: + # init from console args + from TTS.config.shared_configs import BaseTrainingConfig # pylint: disable=import-outside-toplevel + + config_base = BaseTrainingConfig() + config_base.parse_known_args(config_overrides) + config = register_config(config_base.model)() + + # load training samples + if "feature_path" in config and config.feature_path: + # load pre-computed features + print(f" > Loading features from: {config.feature_path}") + eval_samples, train_samples = load_wav_feat_data(config.data_path, config.feature_path, config.eval_split_size) + else: + # load data raw wav files + eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + + # setup audio processor + ap = AudioProcessor(**config.audio) + + # init the model from config + model = setup_model(config) + + # init the trainer and 🚀 + trainer = Trainer( + train_args, + config, + config.output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, + parse_command_line_args=False, + ) + trainer.fit() if __name__ == "__main__": From 9631aab0e7fd3544ede6ff581a460bf2e779fbe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:31:25 +0000 Subject: [PATCH 17/64] Fix imports --- recipes/ljspeech/univnet/train.py | 2 +- recipes/ljspeech/vits_tts/train_vits.py | 3 +-- recipes/ljspeech/wavegrad/train_wavegrad.py | 3 +-- recipes/ljspeech/wavernn/train_wavernn.py | 1 - 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/recipes/ljspeech/univnet/train.py b/recipes/ljspeech/univnet/train.py index a4ab93bf..589fd027 100644 --- a/recipes/ljspeech/univnet/train.py +++ b/recipes/ljspeech/univnet/train.py @@ -32,7 +32,7 @@ config = UnivnetConfig( ap = AudioProcessor(**config.audio.to_dict()) # load training samples -eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) # init model model = GAN(config) diff --git a/recipes/ljspeech/vits_tts/train_vits.py b/recipes/ljspeech/vits_tts/train_vits.py index 3a2b1ef1..8b5811f0 100644 --- a/recipes/ljspeech/vits_tts/train_vits.py +++ b/recipes/ljspeech/vits_tts/train_vits.py @@ -3,10 +3,9 @@ import os from TTS.config.shared_configs import BaseAudioConfig from TTS.trainer import Trainer, TrainingArgs from TTS.tts.configs import BaseDatasetConfig, VitsConfig +from TTS.tts.datasets import load_tts_samples from TTS.tts.models.vits import Vits from TTS.utils.audio import AudioProcessor -from TTS.tts.datasets import load_tts_samples - output_path = os.path.dirname(os.path.abspath(__file__)) dataset_config = BaseDatasetConfig( diff --git a/recipes/ljspeech/wavegrad/train_wavegrad.py b/recipes/ljspeech/wavegrad/train_wavegrad.py index aa873169..6786c052 100644 --- a/recipes/ljspeech/wavegrad/train_wavegrad.py +++ b/recipes/ljspeech/wavegrad/train_wavegrad.py @@ -3,9 +3,8 @@ import os from TTS.trainer import Trainer, TrainingArgs from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import WavegradConfig -from TTS.vocoder.models.wavegrad import Wavegrad from TTS.vocoder.datasets.preprocess import load_wav_data - +from TTS.vocoder.models.wavegrad import Wavegrad output_path = os.path.dirname(os.path.abspath(__file__)) config = WavegradConfig( diff --git a/recipes/ljspeech/wavernn/train_wavernn.py b/recipes/ljspeech/wavernn/train_wavernn.py index 9777a985..f64f5752 100644 --- a/recipes/ljspeech/wavernn/train_wavernn.py +++ b/recipes/ljspeech/wavernn/train_wavernn.py @@ -1,6 +1,5 @@ import os - from TTS.trainer import Trainer, TrainingArgs from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import WavernnConfig From 5fa78ee69f7294794952b4641d9ea8bfdecf73e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:31:46 +0000 Subject: [PATCH 18/64] Remove old Tacotron recipes --- recipes/ljspeech/tacotron2-DCA/run.sh | 22 ---- .../ljspeech/tacotron2-DCA/scale_stats.npy | Bin 10700 -> 0 bytes .../ljspeech/tacotron2-DCA/tacotron2-DCA.json | 85 ---------------- recipes/ljspeech/tacotron2-DDC/run.sh | 22 ---- .../ljspeech/tacotron2-DDC/scale_stats.npy | Bin 10700 -> 0 bytes .../ljspeech/tacotron2-DDC/tacotron2-DDC.json | 94 ------------------ 6 files changed, 223 deletions(-) delete mode 100644 recipes/ljspeech/tacotron2-DCA/run.sh delete mode 100644 recipes/ljspeech/tacotron2-DCA/scale_stats.npy delete mode 100644 recipes/ljspeech/tacotron2-DCA/tacotron2-DCA.json delete mode 100644 recipes/ljspeech/tacotron2-DDC/run.sh delete mode 100644 recipes/ljspeech/tacotron2-DDC/scale_stats.npy delete mode 100644 recipes/ljspeech/tacotron2-DDC/tacotron2-DDC.json diff --git a/recipes/ljspeech/tacotron2-DCA/run.sh b/recipes/ljspeech/tacotron2-DCA/run.sh deleted file mode 100644 index 8bcd9e3d..00000000 --- a/recipes/ljspeech/tacotron2-DCA/run.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# take the scripts's parent's directory to prefix all the output paths. -RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -echo $RUN_DIR -# # download LJSpeech dataset -# wget http://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 -# # extract -# tar -xjf LJSpeech-1.1.tar.bz2 -# # create train-val splits -# shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv -# head -n 12000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv -# tail -n 1100 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv -# mv LJSpeech-1.1 $RUN_DIR/ -# rm LJSpeech-1.1.tar.bz2 -# # compute dataset mean and variance for normalization -# python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/LJSpeech-1.1/wavs/ -# training .... -# change the GPU id if needed -CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tts.py --config_path $RUN_DIR/tacotron2-DCA.json \ - --coqpit.output_path $RUN_DIR \ - --coqpit.datasets.0.path /media/erogol/nvme_linux/gdrive/Projects/TTS/recipes/ljspeech/tacotron2-DDC/LJSpeech-1.1/ \ - --coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \ \ No newline at end of file diff --git a/recipes/ljspeech/tacotron2-DCA/scale_stats.npy b/recipes/ljspeech/tacotron2-DCA/scale_stats.npy deleted file mode 100644 index 1dc577a68253f87470e66444db4f19e583dc7adb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10700 zcmbulc|4Te8$Ukwy^TUDyHb`UmB^)(lr<@9$ugF*jV+2oN<>MEM?yqNR7y&vbEl1z zrAUQrm6>5?gu!5DzUQgW^ZmYlfB)u>xnAcw*Ltq&oO|Ydp9^+-Y#m$$NYSLD%YDfa zKHC6ik0*XC;NnkL_~&1`9w-mNPndyLwx_vQUt6?Cnq@sbvmd9>Q8;?RxC&8%cWqNT%$moO|y z7=)6Bml+8Wi2*-eH69frh6P!@4pbpRs(VO9Kwp@6w*010vyU*5K5m@++(DSQy!hFz zI6q;+ShGd5?UNAECz&=&6bKRF??3EzJS;?b`NYK2b_fwNif*hY>x76WpG6{=QbNQL z+m?e9N5}G%zoDf=c0$K4HRgxm59+Zbeazg=O(KMl!npFX zaS_7XX}`v(uPAX}>*bnNYs84FEuH^Rb;JoN>6*A3qvC{!n&`3kcnPBOX>0G7YZ8Qq z$L3+7L`?=_XQqYSKw{EYdIY(xi*Eb@6h0hn?{a& zG8G19+Akicr9#n_M*da?70$HlHeczX!sV{)l%hx~oJxK_#!sO_sfv-#<_;=M-50)A z_<;(P!jmO&m@hkT-nn86Do9tnezPNl3O(o7Ui25F!FE|`%f!1>5Lofu)BZgb6wmc+ z{V+>~eM;5aUeKvv*#1VK=`ML8tD4L3lhp zFRM+S3Mr@EyjjLnC|OhOdDRK~?<ntd*=vI^ zxu=y>RXqrgB2rej@dhEK$a%HFVk#t0TPd&JM}@mCm&7DLQ^DY$qWc@HXmE*oj%6H5 zgW$ODvGjv9*gMmDSUHadQrB(Ex60AsmA_()_f2QuD-Zf#(|_P$%Ai60o?J(GH49h(8;OIqHo z6lVfOIg+B{?IMMnHYhNJ>=pj^cvk(dan=59Ty;l^25wwi0l||L&3`*rd>)B(cs4+7 zGE9Il<7(+%mnRXncPyn>Zl6KvJ5usSou^QSP{h=!eQabE_2Y^wX#}O28mPD#(9nV8 zfRFaN1E_xgdew_YUFew(cSY;v&uHasxBl1jKcnw`+@a@J+L3_Is_!I+&nV{INMlWM z2O5-=^>ppsTT#&puHoD(pAhm&7jW9rj@-YT`5YY9fwRoFj1qUDDS)#3oVro%l{b7LU9Mg z&V{tH5V`Wn8xuYYT@ZYHW!J$G)NowEoZyTg`={HARvr9}-p$c5Eq(MGUGa%}u~~i; z?btLLlRIw|l?TS|drKKbE-kLBzsZgv1;ZOA_npR2nTon{RoNK2{>w|Wk~@aNV#<3= zgvU{E_E_2+y>Vok=seeP-#8L|x!=+wbsXuwqg0v~kE1(Q6V3}V#?f_C*%Zg9an!RQ zfZt^>j3@wZsLpM!VtJ>O(q0QJv0!lz^*0*ak)CPH_>EK@xLP8Ozfqi+zuR2aZxqw`OGS4Mwl`sg z?yMa}y@q--Az`D)wlJO1*))pSa#z<_tr|m3nR!#sc8sBk*_xQ_%rPVmucZwK$B^yC z=ly4O#!Jjy6C4pX`lK7$+Goy2p}lXUbC0r7 zoWiFlp)@=eJ~Y>%ij69IZgk|dvC#o<(Ea|4jiR0g`LF)QMh`^v6FR%th;-RIVeLmY zI#s89prDS8((Qe>_r7AI{J!5O6)M>1xuxLwm5EY2Ao%WTyUo?2g?y5-D4k zK506RHkRB|wVWD5zcWZ$s$FA9-R^K^Z08ti@ywaxw2vX~s}{au(->0ij$5{>e+=md zpq^tLW2h)=NY(et7%J2znf>}bhIBo7Pn2-q)wdj^md+nX(9)~of!AB>m4|(-+c+|) z3f6u{9!I%m*|#@b7)NWHQ$*Dtk0VVJi$YeNT%a@s(^eSM_7_PCUNHnDLS~U?YL~=c_V| z*obm2y7$9cHfsNV`Q_LeHrlCNGmmP*M%QG6=WoUSUx{tFz0eBxpXNoHod+8^<>)V& zCgVPeJJkOG#~c0A%_kXCdH+tGO~p9A9Rd|5_t~gp_ZwzNJsWL)S|2CZ&qe{s-O^Pf zY_vCSH1Vy>1Tyt%DA}Yqfr`q7YH1rM(4Ht?e%oeDr~US>_MAY~=Ui`u`%R#|%(3b3 z2@~k%zL1hHrzX&@BG)3@s}m@!(o!ws#sqR&DDhC_A(qp+`t#1y3H0IpOqWK(1WI~g zWu(_TfjEw<49`&~P^jJs-x%%$dTQ0VXpzJuI(PVWl+3b8v~S0zX7{C&$T?7GZn^0s zvUo0cKH6dui55;RJK!;i99v&?%X&_ty9SbDZAT|jyuE+%jkrlvT6^cc%-Knl>b^6| z@8Tp{ym8sC=8{Pidf{sDy2?qE@Xqhm?R%4`M=SAs@|#K2`K{kXfS5#s%WlSLcTA$L zT_*pOHccW%+%7qXL98bM>s&uiqOqQ(Vh2Vhk?DKI<3A=Qk!$ZzN0RImDmy;Ym#RF4 zcw@3Y{iajs?W=t4lKoR?tS!vY{lpadC2OH}vU&Kvr~aQJPl8wc6;@AW)@|F#rf3TPau;UH&8In{_C9Q0C6WJ%VlX=J{=@t}m$G&0pQ zx=y|}jndaz27GIoMs|l??zhO#pscUQBF8OfP|Jps0!z|nP{#ad_fld8&HD8hT%Dak zgluQGkl8F+n)Hql6F7?^585ms-JV6~Jd10q8)uP{wUL0;94>NP=N44Env1rj-Ex`@ z;387U#i^bnT$Fl@?`l!NMHV}go0e2@kx1-WL(vW{dP2`=Upm4?1(VFTb@O;gcV(@# zkTwr(@Ki;k+j;1~JeyqZVIJzpI)6I1kcW1--|FAf#zRk|NJ-N(JXAkD?D$ogkD|Js zcRVrUqX)~T_9b}n(XF~2fiDm7k!!Av&9_)S%9%-`GfMpk9HvYE9_Ai9mp0eR1 zWtCyew_1FZNz!l2GROSFh1OQae8ju`Ooy|Ij|^{`JzIrowj${teMvs5&ddB%F2qN3 zd@eTNfSVA-%6L9a2_2 zR4U`#)T+iq&5`N6VG$l$LyBC=?%^WIgntY|UU1Q{hi2pAVlGO{GJQKi=At`&Hrtb( zxJWcPCiT)vF0$S7X=j!I7cH&*DXR2)77@lXGpVh!=)x|8I*H0zo6PIVU1a*#y@nn5E+S?U(`GwAzAX5-ukGiYP|`@Y1pGpION zv_&{}26d*uxm#{Ch%+;)E4Y3J8D|*2{et^4RL*j>D`^I)7?*~McTb}VhDdko>uHo* z5$?aRbQ)bUc{TXy(ll~fv^3~&{50Bj>8dw3U>bcH&{N&!JdHAZ=3Csdm`0#}%g3(TUsLolOg;k+8uOFM7^2TJ`dpVKt9~ath+(uFY`J&7VvkJ0=Hl-K);% z406yYT9~u0hl4x?51%dSUc} z=R1~DUR0h{i05l9Z|TMiaL`zdamD9v9OO3oV#DRtWEThhqW#D^fcYP8g!^lJ z|LnZVLCp_srYZT@Py5@AE!Q|`EoYr@ zYcU52RK0+?7zSA$aAnV;XB-qiepxI5`@6m2dyO%U>!2LFaT|++WWFreSSvV zpe&w7)A?Ob%~nsNV{)CwH}Lr~H-1-x+o5T62IAK~%bG^~RU`6^mD9+hXw`<=o_(C` z7|lf|;p|9C5*MwteV2ar8isA$=06j9#6{|cbDm8$a1k;7)-8y@mH`Y7A1lZO;s7Qu_(JapZB!>(`%K3WsHy$Si+1;)UOFGU+2fpX=&Lr?rr|eO| z_2;qPUEOx$Qa<9QUNaqfz(*F}em@QWfbAMNm#Vb$k&~jdiV>BMtVVpcOpftU?5=yY z-XbJo`^tDZDQObXt#e~yEB;?sdo*IcrUr>HPf5ww)*%teZNaoUQxb7oJ|5V#B-f8bY!-SoZ_t-Si08n=ZcJtMtX6S7 zNJNQ}$s)D=BqCJ)!Ox{`BqIF!mRU7-5+TE@opWb5iAX3`PrZq0bxnfKVjNeL<(AP{ zOw;RwZJyxxd@q>!j9_ZuxcY_**0bq1o!;$9A_}c;CrkyCh*ZZfo2p|;gjp5m_<;-( zK_l1rMHgUuB6l`Etio~R7-_9Sg6Hq_<|6709p zc0NOb#4zoN{vr&+b*BjI{Coyer66s)pF+gSWe3!^Y!fD`@-OXoD;FlJc@`I%7K;!i zw#W0%WQq_vQ1i}TM3ku5>27P1EK108hV0vf#0bl^hV64w#0a?@wW-&0#RT_P+qaN=M47{{RG? z{{Vz^?OjEsi+{m>ZS4iyYI@;@?C^f7S~m!Mujpzx)(H*KXJ7e>euZ7)5=kp*kYtwK2f>aB##>e$^U>Z+Eh|SO;KEnR@O*$P+TKTGZ{xtZ~uA^s;p{}*) z)tTLfRQR11ws*HF6*i^waxUA@fl9WcnRd`&Nm*WRsmu^e8B5=Dn52W(!^;cn4ToUH zSK(&fE*j*IMbp^}Xi&aeV@XU} z`qjT{+G72U_AeDS(V*7W^V7gW8Wi0P?mX~=3OWZ23@?7B!Mj$e#w*`xa8O{w;yYhy zVC5t8P<$H=Dpa&}7iQAn#81`C4;?h<+R}gXLmdrTz1!JBeKhcN*n4VuB@I5!_8)U9 z$I#W1Bj>XWXmEG(>`2Q}8gPkxXR|Fduvs&F9({|=ce+-p@+i_*AFT;JXY=I((Dr%i*$-QBx>3DMxj*6A&|7-~DE$6VD` zMujw$lg_g_RM^lGo@XycgH@4_LQ2?FSTnWA;P)7Y>SPKYH)6Y5{k7ie<}|o^zM)DT zLuzNvC=~T$KY~7Q#lPctT)N1EXK@_s?LVpR4x+)@#OYk21YD0kouh(y{cE&#`)MW7 zpyk*@&1*3<@VW4uWSNEY`nq1ADV_!i{wf)oM`$4CvHFC|P8zU!75W$8xIfFpyfVUZ z4JaN6G3=zm(S}L77={jiw2nVG{Bn*k50>;@NuKUF`Z+FEd8i(Vyy6RJEq*aL~hG| zDy+YDBz%Pz6&Rr_4)!0WLfbx>kg`+^)hg^WMXn=lXO=x|DA0 zgZInMN&8`A6FNNa+MLwBgbry{$VdXrX?}RM7rqoNMQ&!?SpnQ51%`+@!OtYs9e{y zVZQ>dgL=E=jvOT#j1@`-S6k9x59wjo?LZpXJrDPMO`(BFP4TzNMC|{>ORcb28l=A1 z*2%&B8AXnB@xgV|8-L-C3TPl1a@u1zroknwt?R33;NQJPMLQ4g|KXyyYbx=%P*9ri z3eT5h85{N&;r?}V4ZQZ021cX*IPJyf&qos<*K>Hhk{@(1;xP@joXop&r3If$?iX&d z+Gudr{I$v@Cf*-OLN|(LXkdS>z&1se4o?S$6_l0epmgF^_phr-Q{t?c1h_bkH)&`;u@H*F)y&@Ei?)0-Zyj?Pp=Vff>^;W$r@ zeD1>INAa1w7AhTB}`qbieLm(61aBzXe5UhJFzI=`45S&ZP*?Ql72sU?U zJXP@@f;}z}n0&ObW@6_>0+>Pm;eoALDvvtkI`lp;58Xc~e!Ip~4gM{LKl zI@yFZ1V&FKUwMxX!J(Z|a9(B@3Z7g}mQ@~xmy7Q11N~t*S*?^9VLJ=~x%)JZdJIF3 z&cormM}|RD_2s4$hlU~E;KiUy>M#V}&trW$H4LF~UMtq$9|qa$)t*y#hCxk@9i@$5 zDIoLvk9xZCs|7Jl;ii#ckZ3Ny_fUWV*PGIArYbVv(bYD2vH=4`#ZQQDv}C~Ow>4o~ z$P8#nNSbg=WPsy@>h0Ax86Z@V_vv#Z1KRdWjkr%S;6%3Hi(y43EHhg0h_GjZUha;f zw@yqLcS^il9>WAXxny>~HxtZCy%xQVXM*et?)CI=ChQC{VQ)Fj1n!(3|1)Qp&^{I{ z!_8qrpK@KM>wPBBr)J(Nb}`}c=9;EEb6L>#57^4Xsz+6X*6x>|#;KLYQ` zS{@xMM?m_0(fd8CMj*{+vGzSo`ENTuQL+3bkq;V7?Gb3ZVtDR_`Un_DG|w*58iC?U z@rXUDBk*cf*`%KG2%vM^Vr#h(*wCJ3A&UJ`KR$>`k{N+#mJeNQF#qWs$#oNgBQW~r zWqD>d3nY%7WRsXIxO6FCiRu&!(hfa-UfRckkSc#Wr69n zC&#Z*SYWlLtKpG13p~zSi?2&$!Iv#37w4w1z$) z$H*61Q1W(Bu~rf0^VPWOH&~!o9P=#Y77NNBY@U|Kbv|}FwCYPHh8M6vyx42tHui6m+7>0B z!GcqgrsTe47Q|KT>~1}R$eLUJ#g7GS<7R&1E*8|v5&B>4 zu-v<-!%DkZ@J&wgR}U9cnFN0<}&0{760%j(iHp+jy3VUnwqXP{sR>$S@`^NMb^IeR$8NFedm`en^Hu zCj1`wAj;dt1o2yH4l&kD=<u$44GPRi?UGu^qL755P%C6^s#jhmI=gVbQ z$S`4FeZYYaGYqh2JpHzW!vN`?rf*tw28gFP7IpP8ARv8^wWX5*4r9eN(H|MGB6t4R z?f4wwzOt`aS;v3{&-4dwzhS`Tm~Xq?9^vz7xrSxUV+IhaDs&lw0iRM!4~{;^dTyJd zufJo!)BKcZCLSMhEh|dIb{=PKE|{!kz_xor+cskU;=(ks&-WNmQGcb!@Gb)=C%1pR zTg-so?Bf-=*$l8?i(To=#OK&X*|p&@3~-;W)#YH?lDGMHwQ^T-E?j2A1$1w1o7g`xN55sZmXH4CK^7lz~_C%swIm|74R!& znS#Au^BAyeefwp5EFb49z+8asOr*x$m^YsRWr9InF0u@GD>y1zkNwViAiu=_)K=yex zjzgg`SEvd1+wZlaOPh)q(Auy~C+jkPe%d_E&A@r6ZPhJ_KEr^8T$x9eNesBd6f5Cj z`bvqemy7cj7OXU}4Z!iZuYlFSRL*g z@h{cN_;V@EriVoCwc{BK=U}GiP7) zKfQ3o%JHdV@OMykT6i^1r3?SR;LfqI>;sB!Bt_5LMH&MLUQxb*p`Q4PnqQzlML(Xh z`tPkVKfg%Nh`=Lcih(Uj_z%)>I55OBh#cY{89-U{A1@#@>_48NElI#-4mRi)?iEb- zj0gzC7J?%vYwcX-{$K82{eRg;b}llQ9TAD8!n}MvgF}7Ely!e@@`RK9g2+CRl=XkE zNJMxAhXs*6!@VNOlnu6Od;i>N3n%|&Q;bvp2!(}{$-!X(UJ-#2l#Sc5-9Mi{`C)4~ z#Atj8&exk_VlDT_t#7DjL}*lquV+YTcre8@^-qYH85tfJjMK(p`j9D`PX7t#4-e~_ zS)00uV7pjy@X!y_o>Sp47XclcHyzTXiPLD_8W z@PF^)`2`1tP_}G0-|;^a|J~E`3-*ekSZp`1z4vFrMFNwEFtU%QzZX{6YAx`0Ndo;t zyn_D5zU}m%EU~@+m!;*-tN1?PpJGJB1_zUI20nkKx1atSf>+GnvDJ2Se68f)&mUJl zL4jd^Cw83vQ}c*OugC~oA6)aD7`P*m^n=O1fnNIL@KFEIApMYNTxooVB`QYW-#0uk znyl{-9(oYhE<)eQX}3PU`4|{Rj?l;dal*)Cp8)+xFQ3rJ@X(Mox?8te=tuZ?;lloL Tp%)SsOR /dev/null && pwd )" -echo $RUN_DIR -# download LJSpeech dataset -wget http://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 -# extract -tar -xjf LJSpeech-1.1.tar.bz2 -# create train-val splits -shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv -head -n 12000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv -tail -n 1100 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv -mv LJSpeech-1.1 $RUN_DIR/ -rm LJSpeech-1.1.tar.bz2 -# compute dataset mean and variance for normalization -python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/LJSpeech-1.1/wavs/ -# training .... -# change the GPU id if needed -CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tts.py --config_path $RUN_DIR/tacotron2-DDC.json \ - --coqpit.output_path $RUN_DIR \ - --coqpit.datasets.0.path $RUN_DIR/LJSpeech-1.1/ \ - --coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \ \ No newline at end of file diff --git a/recipes/ljspeech/tacotron2-DDC/scale_stats.npy b/recipes/ljspeech/tacotron2-DDC/scale_stats.npy deleted file mode 100644 index 1dc577a68253f87470e66444db4f19e583dc7adb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10700 zcmbulc|4Te8$Ukwy^TUDyHb`UmB^)(lr<@9$ugF*jV+2oN<>MEM?yqNR7y&vbEl1z zrAUQrm6>5?gu!5DzUQgW^ZmYlfB)u>xnAcw*Ltq&oO|Ydp9^+-Y#m$$NYSLD%YDfa zKHC6ik0*XC;NnkL_~&1`9w-mNPndyLwx_vQUt6?Cnq@sbvmd9>Q8;?RxC&8%cWqNT%$moO|y z7=)6Bml+8Wi2*-eH69frh6P!@4pbpRs(VO9Kwp@6w*010vyU*5K5m@++(DSQy!hFz zI6q;+ShGd5?UNAECz&=&6bKRF??3EzJS;?b`NYK2b_fwNif*hY>x76WpG6{=QbNQL z+m?e9N5}G%zoDf=c0$K4HRgxm59+Zbeazg=O(KMl!npFX zaS_7XX}`v(uPAX}>*bnNYs84FEuH^Rb;JoN>6*A3qvC{!n&`3kcnPBOX>0G7YZ8Qq z$L3+7L`?=_XQqYSKw{EYdIY(xi*Eb@6h0hn?{a& zG8G19+Akicr9#n_M*da?70$HlHeczX!sV{)l%hx~oJxK_#!sO_sfv-#<_;=M-50)A z_<;(P!jmO&m@hkT-nn86Do9tnezPNl3O(o7Ui25F!FE|`%f!1>5Lofu)BZgb6wmc+ z{V+>~eM;5aUeKvv*#1VK=`ML8tD4L3lhp zFRM+S3Mr@EyjjLnC|OhOdDRK~?<ntd*=vI^ zxu=y>RXqrgB2rej@dhEK$a%HFVk#t0TPd&JM}@mCm&7DLQ^DY$qWc@HXmE*oj%6H5 zgW$ODvGjv9*gMmDSUHadQrB(Ex60AsmA_()_f2QuD-Zf#(|_P$%Ai60o?J(GH49h(8;OIqHo z6lVfOIg+B{?IMMnHYhNJ>=pj^cvk(dan=59Ty;l^25wwi0l||L&3`*rd>)B(cs4+7 zGE9Il<7(+%mnRXncPyn>Zl6KvJ5usSou^QSP{h=!eQabE_2Y^wX#}O28mPD#(9nV8 zfRFaN1E_xgdew_YUFew(cSY;v&uHasxBl1jKcnw`+@a@J+L3_Is_!I+&nV{INMlWM z2O5-=^>ppsTT#&puHoD(pAhm&7jW9rj@-YT`5YY9fwRoFj1qUDDS)#3oVro%l{b7LU9Mg z&V{tH5V`Wn8xuYYT@ZYHW!J$G)NowEoZyTg`={HARvr9}-p$c5Eq(MGUGa%}u~~i; z?btLLlRIw|l?TS|drKKbE-kLBzsZgv1;ZOA_npR2nTon{RoNK2{>w|Wk~@aNV#<3= zgvU{E_E_2+y>Vok=seeP-#8L|x!=+wbsXuwqg0v~kE1(Q6V3}V#?f_C*%Zg9an!RQ zfZt^>j3@wZsLpM!VtJ>O(q0QJv0!lz^*0*ak)CPH_>EK@xLP8Ozfqi+zuR2aZxqw`OGS4Mwl`sg z?yMa}y@q--Az`D)wlJO1*))pSa#z<_tr|m3nR!#sc8sBk*_xQ_%rPVmucZwK$B^yC z=ly4O#!Jjy6C4pX`lK7$+Goy2p}lXUbC0r7 zoWiFlp)@=eJ~Y>%ij69IZgk|dvC#o<(Ea|4jiR0g`LF)QMh`^v6FR%th;-RIVeLmY zI#s89prDS8((Qe>_r7AI{J!5O6)M>1xuxLwm5EY2Ao%WTyUo?2g?y5-D4k zK506RHkRB|wVWD5zcWZ$s$FA9-R^K^Z08ti@ywaxw2vX~s}{au(->0ij$5{>e+=md zpq^tLW2h)=NY(et7%J2znf>}bhIBo7Pn2-q)wdj^md+nX(9)~of!AB>m4|(-+c+|) z3f6u{9!I%m*|#@b7)NWHQ$*Dtk0VVJi$YeNT%a@s(^eSM_7_PCUNHnDLS~U?YL~=c_V| z*obm2y7$9cHfsNV`Q_LeHrlCNGmmP*M%QG6=WoUSUx{tFz0eBxpXNoHod+8^<>)V& zCgVPeJJkOG#~c0A%_kXCdH+tGO~p9A9Rd|5_t~gp_ZwzNJsWL)S|2CZ&qe{s-O^Pf zY_vCSH1Vy>1Tyt%DA}Yqfr`q7YH1rM(4Ht?e%oeDr~US>_MAY~=Ui`u`%R#|%(3b3 z2@~k%zL1hHrzX&@BG)3@s}m@!(o!ws#sqR&DDhC_A(qp+`t#1y3H0IpOqWK(1WI~g zWu(_TfjEw<49`&~P^jJs-x%%$dTQ0VXpzJuI(PVWl+3b8v~S0zX7{C&$T?7GZn^0s zvUo0cKH6dui55;RJK!;i99v&?%X&_ty9SbDZAT|jyuE+%jkrlvT6^cc%-Knl>b^6| z@8Tp{ym8sC=8{Pidf{sDy2?qE@Xqhm?R%4`M=SAs@|#K2`K{kXfS5#s%WlSLcTA$L zT_*pOHccW%+%7qXL98bM>s&uiqOqQ(Vh2Vhk?DKI<3A=Qk!$ZzN0RImDmy;Ym#RF4 zcw@3Y{iajs?W=t4lKoR?tS!vY{lpadC2OH}vU&Kvr~aQJPl8wc6;@AW)@|F#rf3TPau;UH&8In{_C9Q0C6WJ%VlX=J{=@t}m$G&0pQ zx=y|}jndaz27GIoMs|l??zhO#pscUQBF8OfP|Jps0!z|nP{#ad_fld8&HD8hT%Dak zgluQGkl8F+n)Hql6F7?^585ms-JV6~Jd10q8)uP{wUL0;94>NP=N44Env1rj-Ex`@ z;387U#i^bnT$Fl@?`l!NMHV}go0e2@kx1-WL(vW{dP2`=Upm4?1(VFTb@O;gcV(@# zkTwr(@Ki;k+j;1~JeyqZVIJzpI)6I1kcW1--|FAf#zRk|NJ-N(JXAkD?D$ogkD|Js zcRVrUqX)~T_9b}n(XF~2fiDm7k!!Av&9_)S%9%-`GfMpk9HvYE9_Ai9mp0eR1 zWtCyew_1FZNz!l2GROSFh1OQae8ju`Ooy|Ij|^{`JzIrowj${teMvs5&ddB%F2qN3 zd@eTNfSVA-%6L9a2_2 zR4U`#)T+iq&5`N6VG$l$LyBC=?%^WIgntY|UU1Q{hi2pAVlGO{GJQKi=At`&Hrtb( zxJWcPCiT)vF0$S7X=j!I7cH&*DXR2)77@lXGpVh!=)x|8I*H0zo6PIVU1a*#y@nn5E+S?U(`GwAzAX5-ukGiYP|`@Y1pGpION zv_&{}26d*uxm#{Ch%+;)E4Y3J8D|*2{et^4RL*j>D`^I)7?*~McTb}VhDdko>uHo* z5$?aRbQ)bUc{TXy(ll~fv^3~&{50Bj>8dw3U>bcH&{N&!JdHAZ=3Csdm`0#}%g3(TUsLolOg;k+8uOFM7^2TJ`dpVKt9~ath+(uFY`J&7VvkJ0=Hl-K);% z406yYT9~u0hl4x?51%dSUc} z=R1~DUR0h{i05l9Z|TMiaL`zdamD9v9OO3oV#DRtWEThhqW#D^fcYP8g!^lJ z|LnZVLCp_srYZT@Py5@AE!Q|`EoYr@ zYcU52RK0+?7zSA$aAnV;XB-qiepxI5`@6m2dyO%U>!2LFaT|++WWFreSSvV zpe&w7)A?Ob%~nsNV{)CwH}Lr~H-1-x+o5T62IAK~%bG^~RU`6^mD9+hXw`<=o_(C` z7|lf|;p|9C5*MwteV2ar8isA$=06j9#6{|cbDm8$a1k;7)-8y@mH`Y7A1lZO;s7Qu_(JapZB!>(`%K3WsHy$Si+1;)UOFGU+2fpX=&Lr?rr|eO| z_2;qPUEOx$Qa<9QUNaqfz(*F}em@QWfbAMNm#Vb$k&~jdiV>BMtVVpcOpftU?5=yY z-XbJo`^tDZDQObXt#e~yEB;?sdo*IcrUr>HPf5ww)*%teZNaoUQxb7oJ|5V#B-f8bY!-SoZ_t-Si08n=ZcJtMtX6S7 zNJNQ}$s)D=BqCJ)!Ox{`BqIF!mRU7-5+TE@opWb5iAX3`PrZq0bxnfKVjNeL<(AP{ zOw;RwZJyxxd@q>!j9_ZuxcY_**0bq1o!;$9A_}c;CrkyCh*ZZfo2p|;gjp5m_<;-( zK_l1rMHgUuB6l`Etio~R7-_9Sg6Hq_<|6709p zc0NOb#4zoN{vr&+b*BjI{Coyer66s)pF+gSWe3!^Y!fD`@-OXoD;FlJc@`I%7K;!i zw#W0%WQq_vQ1i}TM3ku5>27P1EK108hV0vf#0bl^hV64w#0a?@wW-&0#RT_P+qaN=M47{{RG? z{{Vz^?OjEsi+{m>ZS4iyYI@;@?C^f7S~m!Mujpzx)(H*KXJ7e>euZ7)5=kp*kYtwK2f>aB##>e$^U>Z+Eh|SO;KEnR@O*$P+TKTGZ{xtZ~uA^s;p{}*) z)tTLfRQR11ws*HF6*i^waxUA@fl9WcnRd`&Nm*WRsmu^e8B5=Dn52W(!^;cn4ToUH zSK(&fE*j*IMbp^}Xi&aeV@XU} z`qjT{+G72U_AeDS(V*7W^V7gW8Wi0P?mX~=3OWZ23@?7B!Mj$e#w*`xa8O{w;yYhy zVC5t8P<$H=Dpa&}7iQAn#81`C4;?h<+R}gXLmdrTz1!JBeKhcN*n4VuB@I5!_8)U9 z$I#W1Bj>XWXmEG(>`2Q}8gPkxXR|Fduvs&F9({|=ce+-p@+i_*AFT;JXY=I((Dr%i*$-QBx>3DMxj*6A&|7-~DE$6VD` zMujw$lg_g_RM^lGo@XycgH@4_LQ2?FSTnWA;P)7Y>SPKYH)6Y5{k7ie<}|o^zM)DT zLuzNvC=~T$KY~7Q#lPctT)N1EXK@_s?LVpR4x+)@#OYk21YD0kouh(y{cE&#`)MW7 zpyk*@&1*3<@VW4uWSNEY`nq1ADV_!i{wf)oM`$4CvHFC|P8zU!75W$8xIfFpyfVUZ z4JaN6G3=zm(S}L77={jiw2nVG{Bn*k50>;@NuKUF`Z+FEd8i(Vyy6RJEq*aL~hG| zDy+YDBz%Pz6&Rr_4)!0WLfbx>kg`+^)hg^WMXn=lXO=x|DA0 zgZInMN&8`A6FNNa+MLwBgbry{$VdXrX?}RM7rqoNMQ&!?SpnQ51%`+@!OtYs9e{y zVZQ>dgL=E=jvOT#j1@`-S6k9x59wjo?LZpXJrDPMO`(BFP4TzNMC|{>ORcb28l=A1 z*2%&B8AXnB@xgV|8-L-C3TPl1a@u1zroknwt?R33;NQJPMLQ4g|KXyyYbx=%P*9ri z3eT5h85{N&;r?}V4ZQZ021cX*IPJyf&qos<*K>Hhk{@(1;xP@joXop&r3If$?iX&d z+Gudr{I$v@Cf*-OLN|(LXkdS>z&1se4o?S$6_l0epmgF^_phr-Q{t?c1h_bkH)&`;u@H*F)y&@Ei?)0-Zyj?Pp=Vff>^;W$r@ zeD1>INAa1w7AhTB}`qbieLm(61aBzXe5UhJFzI=`45S&ZP*?Ql72sU?U zJXP@@f;}z}n0&ObW@6_>0+>Pm;eoALDvvtkI`lp;58Xc~e!Ip~4gM{LKl zI@yFZ1V&FKUwMxX!J(Z|a9(B@3Z7g}mQ@~xmy7Q11N~t*S*?^9VLJ=~x%)JZdJIF3 z&cormM}|RD_2s4$hlU~E;KiUy>M#V}&trW$H4LF~UMtq$9|qa$)t*y#hCxk@9i@$5 zDIoLvk9xZCs|7Jl;ii#ckZ3Ny_fUWV*PGIArYbVv(bYD2vH=4`#ZQQDv}C~Ow>4o~ z$P8#nNSbg=WPsy@>h0Ax86Z@V_vv#Z1KRdWjkr%S;6%3Hi(y43EHhg0h_GjZUha;f zw@yqLcS^il9>WAXxny>~HxtZCy%xQVXM*et?)CI=ChQC{VQ)Fj1n!(3|1)Qp&^{I{ z!_8qrpK@KM>wPBBr)J(Nb}`}c=9;EEb6L>#57^4Xsz+6X*6x>|#;KLYQ` zS{@xMM?m_0(fd8CMj*{+vGzSo`ENTuQL+3bkq;V7?Gb3ZVtDR_`Un_DG|w*58iC?U z@rXUDBk*cf*`%KG2%vM^Vr#h(*wCJ3A&UJ`KR$>`k{N+#mJeNQF#qWs$#oNgBQW~r zWqD>d3nY%7WRsXIxO6FCiRu&!(hfa-UfRckkSc#Wr69n zC&#Z*SYWlLtKpG13p~zSi?2&$!Iv#37w4w1z$) z$H*61Q1W(Bu~rf0^VPWOH&~!o9P=#Y77NNBY@U|Kbv|}FwCYPHh8M6vyx42tHui6m+7>0B z!GcqgrsTe47Q|KT>~1}R$eLUJ#g7GS<7R&1E*8|v5&B>4 zu-v<-!%DkZ@J&wgR}U9cnFN0<}&0{760%j(iHp+jy3VUnwqXP{sR>$S@`^NMb^IeR$8NFedm`en^Hu zCj1`wAj;dt1o2yH4l&kD=<u$44GPRi?UGu^qL755P%C6^s#jhmI=gVbQ z$S`4FeZYYaGYqh2JpHzW!vN`?rf*tw28gFP7IpP8ARv8^wWX5*4r9eN(H|MGB6t4R z?f4wwzOt`aS;v3{&-4dwzhS`Tm~Xq?9^vz7xrSxUV+IhaDs&lw0iRM!4~{;^dTyJd zufJo!)BKcZCLSMhEh|dIb{=PKE|{!kz_xor+cskU;=(ks&-WNmQGcb!@Gb)=C%1pR zTg-so?Bf-=*$l8?i(To=#OK&X*|p&@3~-;W)#YH?lDGMHwQ^T-E?j2A1$1w1o7g`xN55sZmXH4CK^7lz~_C%swIm|74R!& znS#Au^BAyeefwp5EFb49z+8asOr*x$m^YsRWr9InF0u@GD>y1zkNwViAiu=_)K=yex zjzgg`SEvd1+wZlaOPh)q(Auy~C+jkPe%d_E&A@r6ZPhJ_KEr^8T$x9eNesBd6f5Cj z`bvqemy7cj7OXU}4Z!iZuYlFSRL*g z@h{cN_;V@EriVoCwc{BK=U}GiP7) zKfQ3o%JHdV@OMykT6i^1r3?SR;LfqI>;sB!Bt_5LMH&MLUQxb*p`Q4PnqQzlML(Xh z`tPkVKfg%Nh`=Lcih(Uj_z%)>I55OBh#cY{89-U{A1@#@>_48NElI#-4mRi)?iEb- zj0gzC7J?%vYwcX-{$K82{eRg;b}llQ9TAD8!n}MvgF}7Ely!e@@`RK9g2+CRl=XkE zNJMxAhXs*6!@VNOlnu6Od;i>N3n%|&Q;bvp2!(}{$-!X(UJ-#2l#Sc5-9Mi{`C)4~ z#Atj8&exk_VlDT_t#7DjL}*lquV+YTcre8@^-qYH85tfJjMK(p`j9D`PX7t#4-e~_ zS)00uV7pjy@X!y_o>Sp47XclcHyzTXiPLD_8W z@PF^)`2`1tP_}G0-|;^a|J~E`3-*ekSZp`1z4vFrMFNwEFtU%QzZX{6YAx`0Ndo;t zyn_D5zU}m%EU~@+m!;*-tN1?PpJGJB1_zUI20nkKx1atSf>+GnvDJ2Se68f)&mUJl zL4jd^Cw83vQ}c*OugC~oA6)aD7`P*m^n=O1fnNIL@KFEIApMYNTxooVB`QYW-#0uk znyl{-9(oYhE<)eQX}3PU`4|{Rj?l;dal*)Cp8)+xFQ3rJ@X(Mox?8te=tuZ?;lloL Tp%)SsOR Date: Thu, 30 Sep 2021 14:33:18 +0000 Subject: [PATCH 19/64] Fix WaveRNN test --- tests/vocoder_tests/test_wavernn_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/vocoder_tests/test_wavernn_train.py b/tests/vocoder_tests/test_wavernn_train.py index 43fc5fb1..337e2425 100644 --- a/tests/vocoder_tests/test_wavernn_train.py +++ b/tests/vocoder_tests/test_wavernn_train.py @@ -11,7 +11,7 @@ output_path = os.path.join(get_tests_output_path(), "train_outputs") config = WavernnConfig( - model_params=WavernnArgs(), + model_args=WavernnArgs(), batch_size=8, eval_batch_size=8, num_loader_workers=0, From f904dd4828f5dc0438104b9bf9ba0385afd099ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:34:22 +0000 Subject: [PATCH 20/64] =?UTF-8?q?Share=20some=20ASCII=20=E2=9D=A4=EF=B8=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- setup.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/setup.py b/setup.py index 53abe468..95f0841b 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,24 @@ #!/usr/bin/env python +# ,*++++++*, ,*++++++*, +# *++. .+++ *++. .++* +# *+* ,++++* *+* *+* ,++++, *+* +# ,+, .++++++++++* ,++,,,,*+, ,++++++++++. *+, +# *+. .++++++++++++..++ *+.,++++++++++++. .+* +# .+* ++++++++++++.*+, .+*.++++++++++++ *+, +# .++ *++++++++* ++, .++.*++++++++* ++, +# ,+++*. . .*++, ,++*. .*+++* +# *+, .,*++**. .**++**. ,+* +# .+* *+, +# *+. .+* +# *+* +++ +++ *+* +# .+++*. . . *+++. +# ,+* *+++*... ...*+++* *+, +# .++. .""""+++++++****+++++++"""". ++. +# ,++. .++, +# .++* *++. +# *+++, ,+++* +# .,*++++::::::++++*,. +# `````` import os import subprocess From 6d3b2d3cdda0565931d34462e085dcce5ca4c9b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:34:53 +0000 Subject: [PATCH 21/64] Update docs --- docs/source/implementing_a_new_model.md | 157 +++++++++++++++- docs/source/training_a_model.md | 68 ++++--- docs/source/tutorial_for_nervous_beginners.md | 169 ++++++++++++------ 3 files changed, 310 insertions(+), 84 deletions(-) diff --git a/docs/source/implementing_a_new_model.md b/docs/source/implementing_a_new_model.md index c0043bf1..176c4865 100644 --- a/docs/source/implementing_a_new_model.md +++ b/docs/source/implementing_a_new_model.md @@ -10,7 +10,7 @@ We keep tests under `tests` folder. You can add `tts` layers tests under `tts_tests` folder. Basic tests are checking input-output tensor shapes and output values for a given input. Consider testing extreme cases that are more likely to cause problems like `zero` tensors. -3. Implement loss function. +3. Implement a loss function. We keep loss functions under `TTS/tts/layers/losses.py`. You can also mix-and-match implemented loss functions as you like. @@ -29,19 +29,20 @@ A model interacts with the `Trainer API` for training, `Synthesizer API` for inference and testing. - A 🐸TTS model must return a dictionary by the `forward()` and `inference()` functions. This dictionary must also include the `model_outputs` key that is considered as the main model output by the `Trainer` and `Synthesizer`. + A 🐸TTS model must return a dictionary by the `forward()` and `inference()` functions. This dictionary must `model_outputs` key that is considered as the main model output by the `Trainer` and `Synthesizer`. You can place your `tts` model implementation under `TTS/tts/models/new_model.py` then inherit and implement the `BaseTTS`. There is also the `callback` interface by which you can manipulate both the model and the `Trainer` states. Callbacks give you - the infinite flexibility to add custom behaviours for your model and training routines. + an infinite flexibility to add custom behaviours for your model and training routines. For more details, see {ref}`BaseTTS ` and :obj:`TTS.utils.callbacks`. 6. Optionally, define `MyModelArgs`. - `MyModelArgs` is a 👨‍✈️Coqpit class that sets all the class arguments of the `MyModel`. It should be enough to pass - an `MyModelArgs` instance to initiate the `MyModel`. + `MyModelArgs` is a 👨‍✈️Coqpit class that sets all the class arguments of the `MyModel`. `MyModelArgs` must have + all the fields neccessary to instantiate the `MyModel`. However, for training, you need to pass `MyModelConfig` to + the model. 7. Test `MyModel`. @@ -59,3 +60,149 @@ 9. Write Docstrings. We love you more when you document your code. ❤️ + + +# Template 🐸TTS Model implementation + +You can start implementing your model by copying the following base class. + +```python +from TTS.tts.models.base_tts import BaseTTS + + +class MyModel(BaseTTS): + """ + Notes on input/output tensor shapes: + Any input or output tensor of the model must be shaped as + + - 3D tensors `batch x time x channels` + - 2D tensors `batch x channels` + - 1D tensors `batch x 1` + """ + + def __init__(self, config: Coqpit): + super().__init__() + self._set_model_args(config) + + def _set_model_args(self, config: Coqpit): + """Set model arguments from the config. Override this.""" + pass + + def forward(self, input: torch.Tensor, *args, aux_input={}, **kwargs) -> Dict: + """Forward pass for the model mainly used in training. + + You can be flexible here and use different number of arguments and argument names since it is intended to be + used by `train_step()` without exposing it out of the model. + + Args: + input (torch.Tensor): Input tensor. + aux_input (Dict): Auxiliary model inputs like embeddings, durations or any other sorts of inputs. + + Returns: + Dict: Model outputs. Main model output must be named as "model_outputs". + """ + outputs_dict = {"model_outputs": None} + ... + return outputs_dict + + def inference(self, input: torch.Tensor, aux_input={}) -> Dict: + """Forward pass for inference. + + We don't use `*kwargs` since it is problematic with the TorchScript API. + + Args: + input (torch.Tensor): [description] + aux_input (Dict): Auxiliary inputs like speaker embeddings, durations etc. + + Returns: + Dict: [description] + """ + outputs_dict = {"model_outputs": None} + ... + return outputs_dict + + def train_step(self, batch: Dict, criterion: nn.Module) -> Tuple[Dict, Dict]: + """Perform a single training step. Run the model forward pass and compute losses. + + Args: + batch (Dict): Input tensors. + criterion (nn.Module): Loss layer designed for the model. + + Returns: + Tuple[Dict, Dict]: Model ouputs and computed losses. + """ + outputs_dict = {} + loss_dict = {} # this returns from the criterion + ... + return outputs_dict, loss_dict + + def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: + """Create visualizations and waveform examples for training. + + For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to + be projected onto Tensorboard. + + Args: + ap (AudioProcessor): audio processor used at training. + batch (Dict): Model inputs used at the previous training step. + outputs (Dict): Model outputs generated at the previoud training step. + + Returns: + Tuple[Dict, np.ndarray]: training plots and output waveform. + """ + pass + + def eval_step(self, batch: Dict, criterion: nn.Module) -> Tuple[Dict, Dict]: + """Perform a single evaluation step. Run the model forward pass and compute losses. In most cases, you can + call `train_step()` with no changes. + + Args: + batch (Dict): Input tensors. + criterion (nn.Module): Loss layer designed for the model. + + Returns: + Tuple[Dict, Dict]: Model ouputs and computed losses. + """ + outputs_dict = {} + loss_dict = {} # this returns from the criterion + ... + return outputs_dict, loss_dict + + def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: + """The same as `train_log()`""" + pass + + def load_checkpoint(self, config: Coqpit, checkpoint_path: str, eval: bool = False) -> None: + """Load a checkpoint and get ready for training or inference. + + Args: + config (Coqpit): Model configuration. + checkpoint_path (str): Path to the model checkpoint file. + eval (bool, optional): If true, init model for inference else for training. Defaults to False. + """ + ... + + def get_optimizer(self) -> Union["Optimizer", List["Optimizer"]]: + """Setup an return optimizer or optimizers.""" + pass + + def get_lr(self) -> Union[float, List[float]]: + """Return learning rate(s). + + Returns: + Union[float, List[float]]: Model's initial learning rates. + """ + pass + + def get_scheduler(self, optimizer: torch.optim.Optimizer): + pass + + def get_criterion(self): + pass + + def format_batch(self): + pass + +``` + + diff --git a/docs/source/training_a_model.md b/docs/source/training_a_model.md index aadd741e..deb94e85 100644 --- a/docs/source/training_a_model.md +++ b/docs/source/training_a_model.md @@ -5,27 +5,30 @@ Each model has a different set of pros and cons that define the run-time efficiency and the voice quality. It is up to you to decide what model servers your needs. Other than referring to the papers, one easy way is to test the 🐸TTS community models and see how fast and good each of the models. Or you can start a discussion on our communication channels. -2. Understand the configuration class, its fields and values of your model. +2. Understand the configuration, its fields and values of your model. For instance, if you want to train a `Tacotron` model then see the `TacotronConfig` class and make sure you understand it. 3. Go to the recipes and check the recipe of your target model. - Recipes do not promise perfect models but they provide a good start point for `Nervous Beginners`. A recipe script training - a `GlowTTS` model on `LJSpeech` dataset looks like below. Let's be creative and call this script `train_glowtts.py`. + Recipes do not promise perfect models but they provide a good start point for `Nervous Beginners`. A recipe script for + `GlowTTS` using `LJSpeech` dataset looks like below. Let's be creative and call this `train_glowtts.py`. ```python # train_glowtts.py - import os - - from TTS.tts.configs import GlowTTSConfig - from TTS.tts.configs import BaseDatasetConfig - from TTS.trainer import init_training, Trainer, TrainingArgs + import os + from TTS.trainer import Trainer, TrainingArgs + from TTS.tts.configs import BaseDatasetConfig, GlowTTSConfig + from TTS.tts.datasets import load_tts_samples + from TTS.tts.models.glow_tts import GlowTTS + from TTS.utils.audio import AudioProcessor output_path = os.path.dirname(os.path.abspath(__file__)) - dataset_config = BaseDatasetConfig(name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/")) + dataset_config = BaseDatasetConfig( + name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/") + ) config = GlowTTSConfig( batch_size=32, eval_batch_size=16, @@ -34,33 +37,50 @@ run_eval=True, test_delay_epochs=-1, epochs=1000, - text_cleaner="english_cleaners", - use_phonemes=False, + text_cleaner="phoneme_cleaners", + use_phonemes=True, phoneme_language="en-us", phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), print_step=25, - print_eval=True, - mixed_precision=False, + print_eval=False, + mixed_precision=True, output_path=output_path, - datasets=[dataset_config] + datasets=[dataset_config], + ) + + # init audio processor + ap = AudioProcessor(**config.audio.to_dict()) + + # load training samples + train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + + # init model + model = GlowTTS(config) + + # init the trainer and 🚀 + trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, ) - args, config, output_path, _, c_logger, tb_logger = init_training(TrainingArgs(), config) - trainer = Trainer(args, config, output_path, c_logger, tb_logger) trainer.fit() + ``` - You need to change fields of the `BaseDatasetConfig` to match your own dataset and then update `GlowTTSConfig` + You need to change fields of the `BaseDatasetConfig` to match your dataset and then update `GlowTTSConfig` fields as you need. 4. Run the training. - You need to run the training script. - ```bash $ CUDA_VISIBLE_DEVICES="0" python train_glowtts.py ``` - Notice that you set the GPU you want to use on your system by setting `CUDA_VISIBLE_DEVICES` environment variable. + Notice that we set the GPU for the training by `CUDA_VISIBLE_DEVICES` environment variable. To see available GPUs on your system, you can use `nvidia-smi` command on the terminal. If you like to run a multi-gpu training using DDP back-end, @@ -71,7 +91,7 @@ The example above runs a multi-gpu training using GPUs `0, 1, 2`. - The beginning of a training run looks like below. + Beginning of a training log looks like this: ```console > Experiment folder: /your/output_path/-Juni-23-2021_02+52-78899209 @@ -140,11 +160,11 @@ $ tensorboard --logdir= ``` -6. Check the logs and the Tensorboard and monitor the training. +6. Monitor the training process. - On the terminal and Tensorboard, you can monitor the losses and their changes over time. Also Tensorboard provides certain figures and sample outputs. + On the terminal and Tensorboard, you can monitor the progress of your model. Also Tensorboard provides certain figures and sample outputs. - Note that different models have different metrics, visuals and outputs to be displayed. + Note that different models have different metrics, visuals and outputs. You should also check the [FAQ page](https://github.com/coqui-ai/TTS/wiki/FAQ) for common problems and solutions that occur in a training. diff --git a/docs/source/tutorial_for_nervous_beginners.md b/docs/source/tutorial_for_nervous_beginners.md index a81e8fa7..dc5e9a6c 100644 --- a/docs/source/tutorial_for_nervous_beginners.md +++ b/docs/source/tutorial_for_nervous_beginners.md @@ -23,63 +23,104 @@ each line. ### Pure Python Way -```python -import os +1. Define `train.py`. -# GlowTTSConfig: all model related values for training, validating and testing. -from TTS.tts.configs import GlowTTSConfig + ```python + import os -# BaseDatasetConfig: defines name, formatter and path of the dataset. -from TTS.tts.configs import BaseDatasetConfig + # GlowTTSConfig: all model related values for training, validating and testing. + from TTS.tts.configs import GlowTTSConfig -# init_training: Initialize and setup the training environment. -# Trainer: Where the ✨️ happens. -# TrainingArgs: Defines the set of arguments of the Trainer. -from TTS.trainer import init_training, Trainer, TrainingArgs + # BaseDatasetConfig: defines name, formatter and path of the dataset. + from TTS.tts.configs import BaseDatasetConfig -# we use the same path as this script as our training folder. -output_path = os.path.dirname(os.path.abspath(__file__)) + # init_training: Initialize and setup the training environment. + # Trainer: Where the ✨️ happens. + # TrainingArgs: Defines the set of arguments of the Trainer. + from TTS.trainer import init_training, Trainer, TrainingArgs -# set LJSpeech as our target dataset and define its path so that the Trainer knows what data formatter it needs. -dataset_config = BaseDatasetConfig(name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/")) + # we use the same path as this script as our training folder. + output_path = os.path.dirname(os.path.abspath(__file__)) -# Configure the model. Every config class inherits the BaseTTSConfig to have all the fields defined for the Trainer. -config = GlowTTSConfig( - batch_size=32, - eval_batch_size=16, - num_loader_workers=4, - num_eval_loader_workers=4, - run_eval=True, - test_delay_epochs=-1, - epochs=1000, - text_cleaner="english_cleaners", - use_phonemes=False, - phoneme_language="en-us", - phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), - print_step=25, - print_eval=True, - mixed_precision=False, - output_path=output_path, - datasets=[dataset_config] -) + # set LJSpeech as our target dataset and define its path so that the Trainer knows what data formatter it needs. + dataset_config = BaseDatasetConfig(name="ljspeech", meta_file_train="metadata.csv", path=os.path.join(output_path, "../LJSpeech-1.1/")) -# Take the config and the default Trainer arguments, setup the training environment and override the existing -# config values from the terminal. So you can do the following. -# >>> python train.py --coqpit.batch_size 128 -args, config, output_path, _, _, _= init_training(TrainingArgs(), config) + # Configure the model. Every config class inherits the BaseTTSConfig to have all the fields defined for the Trainer. + config = GlowTTSConfig( + batch_size=32, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=False, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=True, + mixed_precision=False, + output_path=output_path, + datasets=[dataset_config] + ) -# Initiate the Trainer. -# Trainer provides a generic API to train all the 🐸TTS models with all its perks like mixed-precision training, -# distributed training etc. -trainer = Trainer(args, config, output_path) + # initialize the audio processor used for feature extraction and audio I/O. + # It is mainly used by the dataloader and the training loggers. + ap = AudioProcessor(**config.audio.to_dict()) -# And kick it 🚀 -trainer.fit() -``` + # load a list of training samples + # Each sample is a list of ```[text, audio_file_path, speaker_name]``` + train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + + # initialize the model + # Models only takes the config object as input. + model = GlowTTS(config) + + # Initiate the Trainer. + # Trainer provides a generic API to train all the 🐸TTS models with all its perks like mixed-precision training, + # distributed training etc. + trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, + ) + + # And kick it 🚀 + trainer.fit() + ``` + +2. Run the script. + + ```bash + CUDA_VISIBLE_DEVICES=0 python train.py + ``` + + - Continue a previous run. + + ```bash + CUDA_VISIBLE_DEVICES=0 python train.py --continue_path path/to/previous/run/folder/ + ``` + + - Fine-tune a model. + + ```bash + CUDA_VISIBLE_DEVICES=0 python train.py --restore_path path/to/model/checkpoint.pth.tar + ``` + + - Run multi-gpu training. + + ```bash + CUDA_VISIBLE_DEVICES=0,1,2 python TTS/bin/distribute.py --script train.py + ``` ### CLI Way -We still support running training from CLI like in the old days. The same training can be started as follows. +We still support running training from CLI like in the old days. The same training run can also be started as follows. 1. Define your `config.json` @@ -111,45 +152,63 @@ We still support running training from CLI like in the old days. The same traini $ CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tts.py --config_path config.json ``` - - ## Training a `vocoder` Model ```python import os +from TTS.trainer import Trainer, TrainingArgs +from TTS.utils.audio import AudioProcessor from TTS.vocoder.configs import HifiganConfig -from TTS.trainer import init_training, Trainer, TrainingArgs - +from TTS.vocoder.datasets.preprocess import load_wav_data +from TTS.vocoder.models.gan import GAN output_path = os.path.dirname(os.path.abspath(__file__)) + config = HifiganConfig( batch_size=32, eval_batch_size=16, num_loader_workers=4, num_eval_loader_workers=4, run_eval=True, - test_delay_epochs=-1, + test_delay_epochs=5, epochs=1000, seq_len=8192, pad_short=2000, use_noise_augment=True, eval_split_size=10, print_step=25, - print_eval=True, + print_eval=False, mixed_precision=False, lr_gen=1e-4, lr_disc=1e-4, - # `vocoder` only needs a data path and they read recursively all the `.wav` files underneath. data_path=os.path.join(output_path, "../LJSpeech-1.1/wavs/"), output_path=output_path, ) -args, config, output_path, _, c_logger, tb_logger = init_training(TrainingArgs(), config) -trainer = Trainer(args, config, output_path, c_logger, tb_logger) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size) + +# init model +model = GAN(config) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) trainer.fit() ``` -❗️ Note that you can also start the training run from CLI as the `tts` model above. +❗️ Note that you can also use ```train_vocoder.py``` as the ```tts``` models above. ## Synthesizing Speech From 55d9209221862d21c6b55869fe20522d9a103b6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:58:26 +0000 Subject: [PATCH 22/64] Remote STT tokenizer --- TTS/trainer.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index d75b8e14..006a702b 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -19,7 +19,6 @@ from torch import nn from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data import DataLoader -from TTS.stt.datasets.tokenizer import Tokenizer from TTS.utils.callbacks import TrainerCallback from TTS.utils.distribute import init_distributed from TTS.utils.generic_utils import ( @@ -103,7 +102,6 @@ class Trainer: get_data_samples: Callable = None, train_samples: List = None, eval_samples: List = None, - tokenizer: Tokenizer = None, cudnn_benchmark: bool = False, training_assets: Dict = {}, parse_command_line_args: bool = True, @@ -237,9 +235,6 @@ class Trainer: self.use_apex = self._is_apex_available() self.use_amp_scaler = self.config.mixed_precision and self.use_cuda - # init tokenizer - self.tokenizer = tokenizer - # load data samples if train_samples is None and get_data_samples is None: raise ValueError("[!] `train_samples` and `get_data_samples` cannot both be None.") From 7edbe04fe0235bf9822de4d24983d81fff516e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 16:20:12 +0000 Subject: [PATCH 23/64] Fix WaveRNN config and test --- TTS/vocoder/configs/shared_configs.py | 8 ++++---- tests/vocoder_tests/test_vocoder_wavernn.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/TTS/vocoder/configs/shared_configs.py b/TTS/vocoder/configs/shared_configs.py index 6891ce6c..a2b7b866 100644 --- a/TTS/vocoder/configs/shared_configs.py +++ b/TTS/vocoder/configs/shared_configs.py @@ -17,11 +17,11 @@ class BaseVocoderConfig(BaseTrainingConfig): Number of instances used for evaluation. Defaults to 10. data_path (str): Root path of the training data. All the audio files found recursively from this root path are used for - training. Defaults to MISSING. + training. Defaults to `""`. feature_path (str): Root path to the precomputed feature files. Defaults to None. seq_len (int): - Length of the waveform segments used for training. Defaults to MISSING. + Length of the waveform segments used for training. Defaults to 1000. pad_short (int): Extra padding for the waveforms shorter than `seq_len`. Defaults to 0. conv_path (int): @@ -45,9 +45,9 @@ class BaseVocoderConfig(BaseTrainingConfig): use_noise_augment: bool = False # enable/disable random noise augmentation in spectrograms. eval_split_size: int = 10 # number of samples used for evaluation. # dataset - data_path: str = MISSING # root data path. It finds all wav files recursively from there. + data_path: str = "" # root data path. It finds all wav files recursively from there. feature_path: str = None # if you use precomputed features - seq_len: int = MISSING # signal length used in training. + seq_len: int = 1000 # signal length used in training. pad_short: int = 0 # additional padding for short wavs conv_pad: int = 0 # additional padding against convolutions applied to spectrograms use_cache: bool = False # use in memory cache to keep the computed features. This might cause OOM. diff --git a/tests/vocoder_tests/test_vocoder_wavernn.py b/tests/vocoder_tests/test_vocoder_wavernn.py index b5c769ee..d4a7b8dd 100644 --- a/tests/vocoder_tests/test_vocoder_wavernn.py +++ b/tests/vocoder_tests/test_vocoder_wavernn.py @@ -12,7 +12,7 @@ def test_wavernn(): config.model_args = WavernnArgs( rnn_dims=512, fc_dims=512, - mode=10, + mode="mold", mulaw=False, pad=2, use_aux_net=True, @@ -37,13 +37,13 @@ def test_wavernn(): assert np.all(output.shape == (2, 1280, 30)), output.shape # mode: gauss - config.model_params.mode = "gauss" + config.model_args.mode = "gauss" model = Wavernn(config) output = model(dummy_x, dummy_m) assert np.all(output.shape == (2, 1280, 2)), output.shape # mode: quantized - config.model_params.mode = 4 + config.model_args.mode = 4 model = Wavernn(config) output = model(dummy_x, dummy_m) assert np.all(output.shape == (2, 1280, 2 ** 4)), output.shape From 0b1986384fb953da76dabbf16b82c8575924e927 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 16:21:18 +0000 Subject: [PATCH 24/64] Make style --- TTS/model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/TTS/model.py b/TTS/model.py index 604a1ffa..532d05a6 100644 --- a/TTS/model.py +++ b/TTS/model.py @@ -6,7 +6,6 @@ import torch from coqpit import Coqpit from torch import nn - # pylint: skip-file @@ -80,7 +79,7 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: + def train_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: """Create visualizations and waveform examples for training. For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to @@ -113,7 +112,7 @@ class BaseModel(nn.Module, ABC): ... return outputs_dict, loss_dict - def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets:Dict, steps:int) -> None: + def eval_log(self, batch: Dict, outputs: Dict, logger: "Logger", assets: Dict, steps: int) -> None: """The same as `train_log()`""" pass From 37959ad0c7b678630f3c44a9ba10df40fd11757f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 23:02:16 +0000 Subject: [PATCH 25/64] Make linter --- TTS/speaker_encoder/utils/training.py | 1 - TTS/trainer.py | 11 ++++++----- TTS/tts/models/align_tts.py | 4 +--- TTS/tts/models/base_tacotron.py | 4 +--- TTS/tts/models/forward_tts.py | 1 - TTS/tts/models/glow_tts.py | 1 - TTS/tts/models/tacotron.py | 3 --- TTS/tts/models/tacotron2.py | 3 --- TTS/tts/models/vits.py | 3 +-- TTS/vocoder/configs/shared_configs.py | 2 -- TTS/vocoder/models/wavegrad.py | 1 - recipes/ljspeech/speedy_speech/train_speedy_speech.py | 1 - 12 files changed, 9 insertions(+), 26 deletions(-) diff --git a/TTS/speaker_encoder/utils/training.py b/TTS/speaker_encoder/utils/training.py index 0edbcaf4..a32f43bd 100644 --- a/TTS/speaker_encoder/utils/training.py +++ b/TTS/speaker_encoder/utils/training.py @@ -1,5 +1,4 @@ import os -from typing import List, Union from coqpit import Coqpit diff --git a/TTS/trainer.py b/TTS/trainer.py index 006a702b..e515ad04 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -90,7 +90,7 @@ class TrainingArgs(Coqpit): class Trainer: - def __init__( + def __init__( # pylint: disable=dangerous-default-value self, args: Union[Coqpit, Namespace], config: Coqpit, @@ -335,7 +335,9 @@ class Trainer: args.parse_args(training_args) return args, coqpit_overrides - def init_training(self, args: TrainingArgs, coqpit_overrides: Dict, config: Coqpit = None): + def init_training( + self, args: TrainingArgs, coqpit_overrides: Dict, config: Coqpit = None + ): # pylint: disable=no-self-use """Initialize training and update model configs from command line arguments. Args: @@ -387,14 +389,13 @@ class Trainer: @staticmethod def run_get_data_samples(config: Coqpit, get_data_samples: Callable) -> nn.Module: - if isinstance(get_data_samples, Callable): + if callable(get_data_samples): if len(signature(get_data_samples).sig.parameters) == 1: train_samples, eval_samples = get_data_samples(config) else: train_samples, eval_samples = get_data_samples() return train_samples, eval_samples - else: - return None, None + return None, None def restore_model( self, diff --git a/TTS/tts/models/align_tts.py b/TTS/tts/models/align_tts.py index 3b0a848d..a634aa6e 100644 --- a/TTS/tts/models/align_tts.py +++ b/TTS/tts/models/align_tts.py @@ -1,5 +1,4 @@ from dataclasses import dataclass, field -from typing import Dict, Tuple import torch from coqpit import Coqpit @@ -13,7 +12,6 @@ from TTS.tts.layers.generic.pos_encoding import PositionalEncoding from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask from TTS.tts.utils.visual import plot_alignment, plot_spectrogram -from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_fsspec @@ -360,7 +358,7 @@ class AlignTTS(BaseTTS): return outputs, loss_dict - def _create_logs(self, batch, outputs, ap): + def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use model_outputs = outputs["model_outputs"] alignments = outputs["alignments"] mel_input = batch["mel_input"] diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index 22dba586..b47a5751 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -1,17 +1,15 @@ import copy from abc import abstractmethod -from dataclasses import dataclass from typing import Dict, List import torch -from coqpit import MISSING, Coqpit +from coqpit import Coqpit from torch import nn from TTS.tts.layers.losses import TacotronLoss from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import sequence_mask from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager -from TTS.tts.utils.text import make_symbols from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.training import gradual_training_scheduler diff --git a/TTS/tts/models/forward_tts.py b/TTS/tts/models/forward_tts.py index 6d0497a9..b83f12d4 100644 --- a/TTS/tts/models/forward_tts.py +++ b/TTS/tts/models/forward_tts.py @@ -14,7 +14,6 @@ from TTS.tts.layers.glow_tts.duration_predictor import DurationPredictor from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import average_over_durations, generate_path, maximum_path, sequence_mask from TTS.tts.utils.visual import plot_alignment, plot_pitch, plot_spectrogram -from TTS.utils.audio import AudioProcessor @dataclass diff --git a/TTS/tts/models/glow_tts.py b/TTS/tts/models/glow_tts.py index bcc46cec..e5c62b0e 100644 --- a/TTS/tts/models/glow_tts.py +++ b/TTS/tts/models/glow_tts.py @@ -14,7 +14,6 @@ from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask from TTS.tts.utils.speakers import get_speaker_manager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.visual import plot_alignment, plot_spectrogram -from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_fsspec diff --git a/TTS/tts/models/tacotron.py b/TTS/tts/models/tacotron.py index 3a7dd339..9d2fceeb 100644 --- a/TTS/tts/models/tacotron.py +++ b/TTS/tts/models/tacotron.py @@ -1,7 +1,5 @@ # coding: utf-8 -from typing import Dict, Tuple - import torch from coqpit import Coqpit from torch import nn @@ -11,7 +9,6 @@ from TTS.tts.layers.tacotron.tacotron import Decoder, Encoder, PostCBHG from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.visual import plot_alignment, plot_spectrogram -from TTS.utils.audio import AudioProcessor class Tacotron(BaseTacotron): diff --git a/TTS/tts/models/tacotron2.py b/TTS/tts/models/tacotron2.py index 300cf903..6b695e2d 100644 --- a/TTS/tts/models/tacotron2.py +++ b/TTS/tts/models/tacotron2.py @@ -1,7 +1,5 @@ # coding: utf-8 -from typing import Dict, Tuple - import torch from coqpit import Coqpit from torch import nn @@ -11,7 +9,6 @@ from TTS.tts.layers.tacotron.tacotron2 import Decoder, Encoder, Postnet from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.visual import plot_alignment, plot_spectrogram -from TTS.utils.audio import AudioProcessor class Tacotron2(BaseTacotron): diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 0ede3d13..8bb786a5 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -17,7 +17,6 @@ from TTS.tts.utils.helpers import generate_path, maximum_path, rand_segments, se from TTS.tts.utils.speakers import get_speaker_manager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.visual import plot_alignment -from TTS.utils.audio import AudioProcessor from TTS.utils.trainer_utils import get_optimizer, get_scheduler from TTS.vocoder.models.hifigan_generator import HifiganGenerator from TTS.vocoder.utils.generic_utils import plot_results @@ -576,7 +575,7 @@ class Vits(BaseTTS): ) return outputs, loss_dict - def _log(self, ap, batch, outputs, name_prefix="train"): + def _log(self, ap, batch, outputs, name_prefix="train"): # pylint: disable=unused-argument,no-self-use y_hat = outputs[0]["model_outputs"] y = outputs[0]["waveform_seg"] figures = plot_results(y_hat, y, ap, name_prefix) diff --git a/TTS/vocoder/configs/shared_configs.py b/TTS/vocoder/configs/shared_configs.py index a2b7b866..c5d6a8b4 100644 --- a/TTS/vocoder/configs/shared_configs.py +++ b/TTS/vocoder/configs/shared_configs.py @@ -1,7 +1,5 @@ from dataclasses import dataclass, field -from coqpit import MISSING - from TTS.config import BaseAudioConfig, BaseTrainingConfig diff --git a/TTS/vocoder/models/wavegrad.py b/TTS/vocoder/models/wavegrad.py index 5755a9a7..ed4f4b37 100644 --- a/TTS/vocoder/models/wavegrad.py +++ b/TTS/vocoder/models/wavegrad.py @@ -9,7 +9,6 @@ from torch.nn.utils import weight_norm from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler -from TTS.utils.audio import AudioProcessor from TTS.utils.io import load_fsspec from TTS.utils.trainer_utils import get_optimizer, get_scheduler from TTS.vocoder.datasets import WaveGradDataset diff --git a/recipes/ljspeech/speedy_speech/train_speedy_speech.py b/recipes/ljspeech/speedy_speech/train_speedy_speech.py index 27639e6b..974823ac 100644 --- a/recipes/ljspeech/speedy_speech/train_speedy_speech.py +++ b/recipes/ljspeech/speedy_speech/train_speedy_speech.py @@ -6,7 +6,6 @@ from TTS.tts.configs import SpeedySpeechConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.forward_tts import ForwardTTS from TTS.utils.audio import AudioProcessor -from TTS.utils.manage import ModelManager output_path = os.path.dirname(os.path.abspath(__file__)) dataset_config = BaseDatasetConfig( From 4dbe7ed0de30146e6a10ab28ecda0b0fe48f6a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 1 Oct 2021 09:20:07 +0000 Subject: [PATCH 26/64] Fix all-zero duration case for GlowTTS --- TTS/tts/models/glow_tts.py | 2 +- tests/tts_tests/test_glow_tts_train.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/TTS/tts/models/glow_tts.py b/TTS/tts/models/glow_tts.py index e5c62b0e..e3a5ff3c 100644 --- a/TTS/tts/models/glow_tts.py +++ b/TTS/tts/models/glow_tts.py @@ -310,7 +310,7 @@ class GlowTTS(BaseTTS): o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # compute output durations w = (torch.exp(o_dur_log) - 1) * x_mask * self.length_scale - w_ceil = torch.ceil(w) + w_ceil = torch.clamp_min(torch.ceil(w), 1) y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() y_max_length = None # compute masks diff --git a/tests/tts_tests/test_glow_tts_train.py b/tests/tts_tests/test_glow_tts_train.py index 24c5c4cf..7da4fd33 100644 --- a/tests/tts_tests/test_glow_tts_train.py +++ b/tests/tts_tests/test_glow_tts_train.py @@ -10,7 +10,7 @@ output_path = os.path.join(get_tests_output_path(), "train_outputs") config = GlowTTSConfig( - batch_size=8, + batch_size=2, eval_batch_size=8, num_loader_workers=0, num_eval_loader_workers=0, @@ -27,6 +27,7 @@ config = GlowTTSConfig( test_sentences=[ "Be a voice, not an echo.", ], + data_dep_init_steps=1.0, ) config.audio.do_trim_silence = True config.audio.trim_db = 60 From 21cc0517a3627a1aaaaf931ac3be4fbcbb2f9c26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 1 Oct 2021 10:21:37 +0000 Subject: [PATCH 27/64] Fix WaveRNN test --- TTS/vocoder/datasets/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/vocoder/datasets/preprocess.py b/TTS/vocoder/datasets/preprocess.py index 62bd4ba5..91dad459 100644 --- a/TTS/vocoder/datasets/preprocess.py +++ b/TTS/vocoder/datasets/preprocess.py @@ -30,7 +30,7 @@ def preprocess_wav_files(out_path: str, config: Coqpit, ap: AudioProcessor): np.save(mel_path, mel) if isinstance(config.mode, int): quant = ( - ap.mulaw_encode(y, qc=config.mode) if config.model_params.mulaw else ap.quantize(y, bits=config.mode) + ap.mulaw_encode(y, qc=config.mode) if config.model_args.mulaw else ap.quantize(y, bits=config.mode) ) np.save(quant_path, quant) From e15bc157d85d4744c1293898128e3dc3b4f0a729 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 14 Oct 2021 14:39:45 +0000 Subject: [PATCH 28/64] Fix #873 --- TTS/trainer.py | 10 +++------- TTS/vocoder/datasets/preprocess.py | 4 +--- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index e515ad04..afe51e04 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -626,17 +626,13 @@ class Trainer: # https://nvidia.github.io/apex/advanced.html?highlight=accumulate#backward-passes-with-multiple-optimizers with amp.scale_loss(loss_dict["loss"], optimizer) as scaled_loss: scaled_loss.backward() - grad_norm = torch.nn.utils.clip_grad_norm_( - amp.master_params(optimizer), grad_clip, error_if_nonfinite=False - ) + grad_norm = torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), grad_clip) else: # model optimizer step in mixed precision mode scaler.scale(loss_dict["loss"]).backward() if grad_clip > 0: scaler.unscale_(optimizer) - grad_norm = torch.nn.utils.clip_grad_norm_( - self.master_params(optimizer), grad_clip, error_if_nonfinite=False - ) + grad_norm = torch.nn.utils.clip_grad_norm_(self.master_params(optimizer), grad_clip) # pytorch skips the step when the norm is 0. So ignore the norm value when it is NaN if torch.isnan(grad_norm) or torch.isinf(grad_norm): grad_norm = 0 @@ -648,7 +644,7 @@ class Trainer: # main model optimizer step loss_dict["loss"].backward() if grad_clip > 0: - grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip, error_if_nonfinite=False) + grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) optimizer.step() step_time = time.time() - step_start_time diff --git a/TTS/vocoder/datasets/preprocess.py b/TTS/vocoder/datasets/preprocess.py index 91dad459..d8cc350a 100644 --- a/TTS/vocoder/datasets/preprocess.py +++ b/TTS/vocoder/datasets/preprocess.py @@ -29,9 +29,7 @@ def preprocess_wav_files(out_path: str, config: Coqpit, ap: AudioProcessor): mel = ap.melspectrogram(y) np.save(mel_path, mel) if isinstance(config.mode, int): - quant = ( - ap.mulaw_encode(y, qc=config.mode) if config.model_args.mulaw else ap.quantize(y, bits=config.mode) - ) + quant = ap.mulaw_encode(y, qc=config.mode) if config.model_args.mulaw else ap.quantize(y, bits=config.mode) np.save(quant_path, quant) From 0565457faa85649a6f848ec80523a498450b1958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 14 Oct 2021 14:46:14 +0000 Subject: [PATCH 29/64] Fix #846 --- TTS/tts/models/vits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 8bb786a5..26d4e7fa 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -456,7 +456,7 @@ class Vits(BaseTTS): x, m_p, logs_p, x_mask = self.text_encoder(x, x_lengths) - if self.num_speakers > 0 and sid: + if self.num_speakers > 0 and sid is not None: g = self.emb_g(sid).unsqueeze(-1) if self.args.use_sdp: From 073a2d2eb018c7df71fbb8557f627c7da3e4c1b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 15 Oct 2021 10:20:00 +0000 Subject: [PATCH 30/64] Refactor VITS multi-speaker initialization --- TTS/trainer.py | 5 --- TTS/tts/configs/shared_configs.py | 4 -- TTS/tts/configs/vits_config.py | 33 ++++++++++++++ TTS/tts/models/vits.py | 72 +++++++++++++++++++++++-------- 4 files changed, 88 insertions(+), 26 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index afe51e04..aa925972 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -252,11 +252,6 @@ class Trainer: else: self.run_get_model(self.config, get_model) - # TODO: out! - # init multispeaker settings of the model - if hasattr(self.model, "init_multispeaker"): - self.model.init_multispeaker(self.config, self.train_samples + self.eval_samples) - # setup criterion self.criterion = self.get_criterion(self.model) diff --git a/TTS/tts/configs/shared_configs.py b/TTS/tts/configs/shared_configs.py index e208c16c..60ef7276 100644 --- a/TTS/tts/configs/shared_configs.py +++ b/TTS/tts/configs/shared_configs.py @@ -218,7 +218,3 @@ class BaseTTSConfig(BaseTrainingConfig): lr_scheduler_params: dict = field(default_factory=lambda: {}) # testing test_sentences: List[str] = field(default_factory=lambda: []) - # multi-speaker - use_speaker_embedding: bool = False - use_d_vector_file: bool = False - d_vector_dim: int = 0 diff --git a/TTS/tts/configs/vits_config.py b/TTS/tts/configs/vits_config.py index 39479231..c9475a6a 100644 --- a/TTS/tts/configs/vits_config.py +++ b/TTS/tts/configs/vits_config.py @@ -139,3 +139,36 @@ class VitsConfig(BaseTTSConfig): "Prior to November 22, 1963.", ] ) + + # multi-speaker settings + # use speaker embedding layer + num_speakers: int = 0 + use_speaker_embedding: bool = False + speakers_file: str = None + speaker_embedding_channels: int = 256 + + # use d-vectors + use_d_vector_file: bool = False + d_vector_file: str = False + d_vector_dim: int = None + + def __post_init__(self): + # Pass multi-speaker parameters to the model args as `model.init_multispeaker()` looks for it there. + if self.num_speakers > 0: + self.model_args.num_speakers = self.num_speakers + + # speaker embedding settings + if self.use_speaker_embedding: + self.model_args.use_speaker_embedding = True + if self.speakers_file: + self.model_args.speakers_file = self.speakers_file + if self.speaker_embedding_channels: + self.model_args.speaker_embedding_channels = self.speaker_embedding_channels + + # d-vector settings + if self.use_d_vector_file: + self.model_args.use_d_vector_file = True + if self.d_vector_dim is not None and self.d_vector_dim > 0: + self.model_args.d_vector_dim = self.d_vector_dim + if self.d_vector_file: + self.model_args.d_vector_file = self.d_vector_file diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 26d4e7fa..724ff342 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -1,4 +1,6 @@ import math +import os +import random from dataclasses import dataclass, field from itertools import chain from typing import Dict, List, Tuple @@ -14,7 +16,7 @@ from TTS.tts.layers.vits.networks import PosteriorEncoder, ResidualCouplingBlock from TTS.tts.layers.vits.stochastic_duration_predictor import StochasticDurationPredictor from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, rand_segments, segment, sequence_mask -from TTS.tts.utils.speakers import get_speaker_manager +from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.visual import plot_alignment from TTS.utils.trainer_utils import get_optimizer, get_scheduler @@ -180,6 +182,7 @@ class VitsArgs(Coqpit): speakers_file: str = None speaker_embedding_channels: int = 256 use_d_vector_file: bool = False + d_vector_file: str = None d_vector_dim: int = 0 detach_dp_input: bool = True @@ -315,27 +318,50 @@ class Vits(BaseTTS): """Initialize multi-speaker modules of a model. A model can be trained either with a speaker embedding layer or with external `d_vectors` computed from a speaker encoder model. - If you need a different behaviour, override this function for your model. - Args: config (Coqpit): Model configuration. data (List, optional): Dataset items to infer number of speakers. Defaults to None. """ + self.embedded_speaker_dim = 0 if hasattr(config, "model_args"): config = config.model_args - self.embedded_speaker_dim = 0 - # init speaker manager - self.speaker_manager = get_speaker_manager(config, data=data) - if config.num_speakers > 0 and self.speaker_manager.num_speakers == 0: - self.speaker_manager.num_speakers = config.num_speakers - self.num_speakers = self.speaker_manager.num_speakers - # init speaker embedding layer - if config.use_speaker_embedding and not config.use_d_vector_file: - self.embedded_speaker_dim = config.speaker_embedding_channels - self.emb_g = nn.Embedding(config.num_speakers, config.speaker_embedding_channels) - # init d-vector usage + + self.num_speakers = config.num_speakers + + if config.use_speaker_embedding: + self._init_speaker_embedding(config) + if config.use_d_vector_file: - self.embedded_speaker_dim = config.d_vector_dim + self._init_d_vector(config) + + def _init_speaker_embedding(self, config): + # pylint: disable=attribute-defined-outside-init + if config.speakers_file is not None: + self.speaker_manager = SpeakerManager(speaker_id_file_path=config.speakers_file_path) + + if self.num_speakers > 0: + print(" > initialization of speaker-embedding layers.") + self.embedded_speaker_dim = config.speaker_embedding_channels + self.emb_g = nn.Embedding(self.num_speakers, self.embedded_speaker_dim) + + def _init_d_vector(self, config): + # pylint: disable=attribute-defined-outside-init + if hasattr(self, "emb_g"): + raise ValueError("[!] Speaker embedding layer already initialized before d_vector settings.") + self.speaker_manager = SpeakerManager(d_vectors_file_path=config.d_vector_file) + self.embedded_speaker_dim = config.d_vector_dim + + def on_init_start(self, trainer): + """Save the speaker.json at the beginning of the training. And update the config.json with the + speakers.json file path.""" + if self.speaker_manager is not None: + output_path = os.path.join(trainer.output_path, "speakers.json") + self.speaker_manager.save_speaker_ids_to_file(output_path) + trainer.config.speakers_file = output_path + trainer.config.model_args.speakers_file = output_path + trainer.config.save_json(os.path.join(trainer.output_path, "config.json")) + print(f" > `speakers.json` is saved to {output_path}.") + print(f" > `speakers_file` is updated in the config.json.") @staticmethod def _set_cond_input(aux_input: Dict): @@ -349,6 +375,10 @@ class Vits(BaseTTS): g = aux_input["d_vectors"] return sid, g + def get_aux_input(self, aux_input: Dict): + sid, g = self._set_cond_input(aux_input) + return {"speaker_id": sid, "style_wav": None, "d_vector": g} + def forward( self, x: torch.tensor, @@ -633,7 +663,15 @@ class Vits(BaseTTS): test_audios = {} test_figures = {} test_sentences = self.config.test_sentences - aux_inputs = self.get_aux_input() + aux_inputs = { + "speaker_id": None + if not self.config.use_speaker_embedding + else random.sample(sorted(self.speaker_manager.speaker_ids.values()), 1), + "d_vector": None + if not self.config.use_d_vector_file + else random.samples(sorted(self.speaker_manager.d_vectors.values()), 1), + "style_wav": None, + } for idx, sen in enumerate(test_sentences): wav, alignment, _, _ = synthesis( self, @@ -670,7 +708,7 @@ class Vits(BaseTTS): ) # add the speaker embedding layer if hasattr(self, "emb_g"): - gen_parameters = chain(gen_parameters, self.emb_g) + gen_parameters = chain(gen_parameters, self.emb_g.parameters()) optimizer0 = get_optimizer( self.config.optimizer, self.config.optimizer_params, self.config.lr_gen, parameters=gen_parameters ) From 700b056117e36d814e77aa0fcdeff9aab71b527e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 15 Oct 2021 10:21:12 +0000 Subject: [PATCH 31/64] Update Synthesizer multi-speaker handling --- TTS/utils/synthesizer.py | 78 +++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 46 deletions(-) diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py index 236e78a9..9ecb5be9 100644 --- a/TTS/utils/synthesizer.py +++ b/TTS/utils/synthesizer.py @@ -87,52 +87,15 @@ class Synthesizer(object): """ return pysbd.Segmenter(language=lang, clean=True) - def _load_speakers(self, speaker_file: str) -> None: - """Load the SpeakerManager to organize multi-speaker TTS. It loads the speakers meta-data and the speaker - encoder if it is defined. - - Args: - speaker_file (str): path to the speakers meta-data file. - """ - print("Loading speakers ...") - self.speaker_manager = SpeakerManager( - encoder_model_path=self.encoder_checkpoint, encoder_config_path=self.encoder_config - ) - self.speaker_manager.load_d_vectors_file(self.tts_config.get("d_vector_file", speaker_file)) - self.num_speakers = self.speaker_manager.num_speakers - self.d_vector_dim = self.speaker_manager.d_vector_dim - - def _set_tts_speaker_file(self): - """Set the TTS speaker file used by a multi-speaker model.""" - # setup if multi-speaker settings are in the global model config - if hasattr(self.tts_config, "use_speaker_embedding") and self.tts_config.use_speaker_embedding is True: - if self.tts_config.use_d_vector_file: - self.tts_speakers_file = ( - self.tts_speakers_file if self.tts_speakers_file else self.tts_config["d_vector_file"] - ) - self.tts_config["d_vector_file"] = self.tts_speakers_file - else: - self.tts_speakers_file = ( - self.tts_speakers_file if self.tts_speakers_file else self.tts_config["speakers_file"] - ) - - # setup if multi-speaker settings are in the model args config - if ( - self.tts_speakers_file is None - and hasattr(self.tts_config, "model_args") - and hasattr(self.tts_config.model_args, "use_speaker_embedding") - and self.tts_config.model_args.use_speaker_embedding - ): - _args = self.tts_config.model_args - if _args.use_d_vector_file: - self.tts_speakers_file = self.tts_speakers_file if self.tts_speakers_file else _args["d_vector_file"] - _args["d_vector_file"] = self.tts_speakers_file - else: - self.tts_speakers_file = self.tts_speakers_file if self.tts_speakers_file else _args["speakers_file"] - def _load_tts(self, tts_checkpoint: str, tts_config_path: str, use_cuda: bool) -> None: """Load the TTS model. + 1. Load the model config. + 2. Init the AudioProcessor. + 3. Init the model from the config. + 4. Move the model to the GPU if CUDA is enabled. + 5. Init the speaker manager for the model. + Args: tts_checkpoint (str): path to the model checkpoint. tts_config_path (str): path to the model config file. @@ -148,11 +111,34 @@ class Synthesizer(object): self.tts_model.load_checkpoint(self.tts_config, tts_checkpoint, eval=True) if use_cuda: self.tts_model.cuda() - self._set_tts_speaker_file() + speaker_manager = self._init_speaker_manager() + self.tts_model.speaker_manager = speaker_manager + + def _init_speaker_manager(self): + """Initialize the SpeakerManager""" + # setup if multi-speaker settings are in the global model config + speaker_manager = None + if hasattr(self.tts_config, "use_speaker_embedding") and self.tts_config.use_speaker_embedding is True: + if self.tts_speakers_file: + speaker_manager = SpeakerManager(speaker_id_file_path=self.tts_speakers_file) + if self.tts_config.get("speakers_file", None): + speaker_manager = SpeakerManager(speaker_id_file_path=self.tts_config.speakers_file) + + if hasattr(self.tts_config, "use_d_vector_file") and self.tts_config.use_speaker_embedding is True: + if self.tts_speakers_file: + speaker_manager = SpeakerManager(d_vectors_file_path=self.tts_speakers_file) + if self.tts_config.get("d_vector_file", None): + speaker_manager = SpeakerManager(d_vectors_file_path=self.tts_config.d_vector_file) + return speaker_manager def _load_vocoder(self, model_file: str, model_config: str, use_cuda: bool) -> None: """Load the vocoder model. + 1. Load the vocoder config. + 2. Init the AudioProcessor for the vocoder. + 3. Init the vocoder model from the config. + 4. Move the model to the GPU if CUDA is enabled. + Args: model_file (str): path to the model checkpoint. model_config (str): path to the model config file. @@ -207,7 +193,7 @@ class Synthesizer(object): # handle multi-speaker speaker_embedding = None speaker_id = None - if self.tts_speakers_file: + if self.tts_speakers_file or hasattr(self.tts_model.speaker_manager, "speaker_ids"): if speaker_idx and isinstance(speaker_idx, str): if self.tts_config.use_d_vector_file: # get the speaker embedding from the saved d_vectors. @@ -226,7 +212,7 @@ class Synthesizer(object): else: if speaker_idx: raise ValueError( - f" [!] Missing speaker.json file path for selecting speaker {speaker_idx}." + f" [!] Missing speakers.json file path for selecting speaker {speaker_idx}." "Define path for speaker.json if it is a multi-speaker model or remove defined speaker idx. " ) From 33b633515f33e5f1423c8eb7f9d7264e7832c6d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 15 Oct 2021 10:21:29 +0000 Subject: [PATCH 32/64] Update recipes README.md --- recipes/README.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/recipes/README.md b/recipes/README.md index 041693a2..cf3f3de9 100644 --- a/recipes/README.md +++ b/recipes/README.md @@ -1,13 +1,16 @@ # 🐸💬 TTS Training Recipes -TTS recipes intended to host bash scripts running all the necessary steps to train a TTS model with a particular dataset. +TTS recipes intended to host scripts running all the necessary steps to train a TTS model on a particular dataset. -Run each script from the root TTS folder as follows +For each dataset, you need to download the dataset once. Then you run the training for the model you want. + +Run each script from the root TTS folder as follows. ```console -$ bash ./recipes///run.sh +$ sh ./recipes//download_.sh +$ python recipes///train.py ``` -All the outputs are held under the recipe directory unless you change the paths in the bash script. +If you train a new model using TTS, feel free to share your training to expand the list of recipes. -If you train a new model using TTS, feel free to share your training to expand the list of recipes. \ No newline at end of file +You can also open a new discussion and share your progress with the 🐸 community. \ No newline at end of file From fcbfc53cb707ff6f7cbebb0bb579e26032558080 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Fri, 15 Oct 2021 10:24:19 +0000 Subject: [PATCH 33/64] Fix linter --- TTS/tts/models/vits.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 724ff342..c738f50f 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -16,7 +16,7 @@ from TTS.tts.layers.vits.networks import PosteriorEncoder, ResidualCouplingBlock from TTS.tts.layers.vits.stochastic_duration_predictor import StochasticDurationPredictor from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, rand_segments, segment, sequence_mask -from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.visual import plot_alignment from TTS.utils.trainer_utils import get_optimizer, get_scheduler @@ -361,7 +361,7 @@ class Vits(BaseTTS): trainer.config.model_args.speakers_file = output_path trainer.config.save_json(os.path.join(trainer.output_path, "config.json")) print(f" > `speakers.json` is saved to {output_path}.") - print(f" > `speakers_file` is updated in the config.json.") + print(" > `speakers_file` is updated in the config.json.") @staticmethod def _set_cond_input(aux_input: Dict): From b4b890df03b24f3bff2ce24f98b3a7fe22210cef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 18 Oct 2021 08:53:19 +0000 Subject: [PATCH 34/64] Update trainer's initialization --- TTS/trainer.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index aa925972..7a38616d 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -182,16 +182,24 @@ class Trainer: - TPU training - NOTE: Consider moving `training_assets` to the model implementation. """ + if parse_command_line_args: - # parse command-line arguments for TrainingArgs() + # parse command-line arguments for TrainerArgs() args, coqpit_overrides = self.parse_argv(args) # get ready for training and parse command-line arguments for the model config config = self.init_training(args, coqpit_overrides, config) - # define the experiment path and create the folder - output_path = get_experiment_folder_path(config.output_path, config.run_name) - os.makedirs(output_path, exist_ok=True) + # set the output path + if args.continue_path: + # use the same path as the continuing run + output_path = args.continue_path + else: + # override the output path if it is provided + output_path = config.output_path if output_path is None else output_path + # create a new output folder name + output_path = get_experiment_folder_path(config.output_path, config.run_name) + os.makedirs(output_path, exist_ok=True) # copy training assets to the output folder copy_model_files(config, output_path, new_fields=None) From a0a5d580e97e852939dbe9e3113e4c5cd983d9cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 18 Oct 2021 08:54:02 +0000 Subject: [PATCH 35/64] Approximate audio length from file size --- TTS/tts/configs/tacotron_config.py | 2 +- TTS/tts/datasets/dataset.py | 2 +- TTS/tts/models/base_tacotron.py | 2 +- TTS/tts/models/base_tts.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/TTS/tts/configs/tacotron_config.py b/TTS/tts/configs/tacotron_config.py index 89fb8d81..2577fc51 100644 --- a/TTS/tts/configs/tacotron_config.py +++ b/TTS/tts/configs/tacotron_config.py @@ -106,7 +106,7 @@ class TacotronConfig(BaseTTSConfig): Weight decay coefficient. Defaults to `1e-6`. grad_clip (float): Gradient clipping threshold. Defaults to `5`. - seq_len_notm (bool): + seq_len_norm (bool): enable / disable the sequnce length normalization in the loss functions. If set True, loss of a sample is divided by the sequence length. Defaults to False. loss_masking (bool): diff --git a/TTS/tts/datasets/dataset.py b/TTS/tts/datasets/dataset.py index c81e0e6c..bfe0d778 100644 --- a/TTS/tts/datasets/dataset.py +++ b/TTS/tts/datasets/dataset.py @@ -330,7 +330,7 @@ class TTSDataset(Dataset): if by_audio_len: lengths = [] for item in self.items: - lengths.append(os.path.getsize(item[1])) + lengths.append(os.path.getsize(item[1]) / 16 * 8) # assuming 16bit audio lengths = np.array(lengths) else: lengths = np.array([len(ins[0]) for ins in self.items]) diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index b47a5751..c661c4cc 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -242,4 +242,4 @@ class BaseTacotron(BaseTTS): self.decoder.set_r(r) if trainer.config.bidirectional_decoder: trainer.model.decoder_backward.set_r(r) - print(f"\n > Number of output frames: {self.decoder.r}") + print(f"\n > Number of output frames: {self.decoder.r}") \ No newline at end of file diff --git a/TTS/tts/models/base_tts.py b/TTS/tts/models/base_tts.py index 0c9f60e8..9f4d70c8 100644 --- a/TTS/tts/models/base_tts.py +++ b/TTS/tts/models/base_tts.py @@ -20,9 +20,9 @@ from TTS.utils.audio import AudioProcessor class BaseTTS(BaseModel): - """Abstract `tts` class. Every new `tts` model must inherit this. + """Base `tts` class. Every new `tts` model must inherit this. - It defines `tts` specific functions on top of `Model`. + It defines common `tts` specific functions on top of `Model` implementation. Notes on input/output tensor shapes: Any input or output tensor of the model must be shaped as From 127571423c0efb8d273402810ddb7ee5154d6643 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 18 Oct 2021 08:54:41 +0000 Subject: [PATCH 36/64] Update multi-speaker init in BaseTTS --- TTS/tts/models/base_tts.py | 45 +++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/TTS/tts/models/base_tts.py b/TTS/tts/models/base_tts.py index 9f4d70c8..d4044c7e 100644 --- a/TTS/tts/models/base_tts.py +++ b/TTS/tts/models/base_tts.py @@ -72,35 +72,21 @@ class BaseTTS(BaseModel): def get_speaker_manager(config: Coqpit, restore_path: str, data: List, out_path: str = None) -> SpeakerManager: return get_speaker_manager(config, restore_path, data, out_path) - def init_multispeaker(self, config: Coqpit, data: List = None): - """Initialize a speaker embedding layer if needen and define expected embedding channel size for defining - `in_channels` size of the connected layers. - - This implementation yields 3 possible outcomes: - - 1. If `config.use_speaker_embedding` and `config.use_d_vector_file are False, do nothing. - 2. If `config.use_d_vector_file` is True, set expected embedding channel size to `config.d_vector_dim` or 512. - 3. If `config.use_speaker_embedding`, initialize a speaker embedding layer with channel size of - `config.d_vector_dim` or 512. - - You can override this function for new models.0 + def init_multispeaker(self, config: Coqpit): + """Init speaker embedding layer if `use_speaker_embedding` is True and set the expected speaker embedding + vector dimension in the network. If model uses d-vectors, then it only sets the expected dimension. Args: config (Coqpit): Model configuration. - data (List, optional): Dataset items to infer number of speakers. Defaults to None. """ # init speaker manager - self.speaker_manager = get_speaker_manager(config, data=data) + if self.speaker_manager is None: + raise ValueError(" > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model.") + + print(f" > Number of speakers : {len(self.speaker_manager.speaker_ids)}") # set number of speakers - if num_speakers is set in config, use it, otherwise use speaker_manager - if data is not None or self.speaker_manager.speaker_ids: - self.num_speakers = self.speaker_manager.num_speakers - else: - self.num_speakers = ( - config.num_speakers - if "num_speakers" in config and config.num_speakers != 0 - else self.speaker_manager.num_speakers - ) + self.num_speakers = self.speaker_manager.num_speakers # set ultimate speaker embedding size if config.use_speaker_embedding or config.use_d_vector_file: @@ -109,6 +95,7 @@ class BaseTTS(BaseModel): ) # init speaker embedding layer if config.use_speaker_embedding and not config.use_d_vector_file: + print(" > Init speaker_embedding layer.") self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim) self.speaker_embedding.weight.data.normal_(0, 0.3) @@ -345,3 +332,17 @@ class BaseTTS(BaseModel): outputs_dict["outputs"]["alignments"], output_fig=False ) return test_figures, test_audios + + def on_init_start(self, trainer): + """Save the speaker.json at the beginning of the training. And update the config.json with the + speakers.json file path.""" + if self.speaker_manager is not None: + output_path = os.path.join(trainer.output_path, "speakers.json") + self.speaker_manager.save_speaker_ids_to_file(output_path) + trainer.config.speakers_file = output_path + # some models don't have `model_args` set + if hasattr(trainer.config, "model_args"): + trainer.config.model_args.speakers_file = output_path + trainer.config.save_json(os.path.join(trainer.output_path, "config.json")) + print(f" > `speakers.json` is saved to {output_path}.") + print(" > `speakers_file` is updated in the config.json.") From c514351c0ec1de75acb9d644a915f063ff7f62ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Mon, 18 Oct 2021 08:55:45 +0000 Subject: [PATCH 37/64] Refactor multi-speaker init in BaseTTS-Tacotron1-2 --- TTS/tts/models/tacotron.py | 57 +++++++++++++++++---------- TTS/tts/models/tacotron2.py | 77 +++++++++++++++++++++++++------------ TTS/tts/models/vits.py | 17 ++------ TTS/utils/audio.py | 2 + 4 files changed, 94 insertions(+), 59 deletions(-) diff --git a/TTS/tts/models/tacotron.py b/TTS/tts/models/tacotron.py index 9d2fceeb..a17e1b2b 100644 --- a/TTS/tts/models/tacotron.py +++ b/TTS/tts/models/tacotron.py @@ -3,11 +3,13 @@ import torch from coqpit import Coqpit from torch import nn +from torch.cuda.amp.autocast_mode import autocast from TTS.tts.layers.tacotron.gst_layers import GST from TTS.tts.layers.tacotron.tacotron import Decoder, Encoder, PostCBHG from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.visual import plot_alignment, plot_spectrogram @@ -15,11 +17,17 @@ class Tacotron(BaseTacotron): """Tacotron as in https://arxiv.org/abs/1703.10135 It's an autoregressive encoder-attention-decoder-postnet architecture. Check `TacotronConfig` for the arguments. + + Args: + config (TacotronConfig): Configuration for the Tacotron model. + speaker_manager (SpeakerManager): Speaker manager to handle multi-speaker settings. Only use if the model is + a multi-speaker model. Defaults to None. """ - def __init__(self, config: Coqpit): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): super().__init__(config) + self.speaker_manager = speaker_manager chars, self.config, _ = self.get_characters(config) config.num_chars = self.num_chars = len(chars) @@ -240,21 +248,22 @@ class Tacotron(BaseTacotron): outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # compute loss - loss_dict = criterion( - outputs["model_outputs"], - outputs["decoder_outputs"], - mel_input, - linear_input, - outputs["stop_tokens"], - stop_targets, - stop_target_lengths, - mel_lengths, - outputs["decoder_outputs_backward"], - outputs["alignments"], - alignment_lengths, - outputs["alignments_backward"], - text_lengths, - ) + with autocast(enabled=False): # use float32 for the criterion + loss_dict = criterion( + outputs["model_outputs"].float(), + outputs["decoder_outputs"].float(), + mel_input.float(), + linear_input.float(), + outputs["stop_tokens"].float(), + stop_targets.float(), + stop_target_lengths, + mel_lengths, + outputs["decoder_outputs_backward"].float(), + outputs["alignments"].float(), + alignment_lengths, + outputs["alignments_backward"].float(), + text_lengths, + ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) @@ -263,17 +272,23 @@ class Tacotron(BaseTacotron): def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] + decoder_outputs = outputs["decoder_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] mel_input = batch["mel_input"] + linear_input = batch["linear_input"] - pred_spec = postnet_outputs[0].data.cpu().numpy() - gt_spec = mel_input[0].data.cpu().numpy() + pred_linear_spec = postnet_outputs[0].data.cpu().numpy() + pred_mel_spec = decoder_outputs[0].data.cpu().numpy() + gt_linear_spec = linear_input[0].data.cpu().numpy() + gt_mel_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { - "prediction": plot_spectrogram(pred_spec, ap, output_fig=False), - "ground_truth": plot_spectrogram(gt_spec, ap, output_fig=False), + "pred_linear_spec": plot_spectrogram(pred_linear_spec, ap, output_fig=False), + "real_linear_spec": plot_spectrogram(gt_linear_spec, ap, output_fig=False), + "pred_mel_spec": plot_spectrogram(pred_mel_spec, ap, output_fig=False), + "real_mel_spec": plot_spectrogram(gt_mel_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } @@ -281,7 +296,7 @@ class Tacotron(BaseTacotron): figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio - audio = ap.inv_spectrogram(pred_spec.T) + audio = ap.inv_spectrogram(pred_linear_spec.T) return figures, {"audio": audio} def train_log( diff --git a/TTS/tts/models/tacotron2.py b/TTS/tts/models/tacotron2.py index 6b695e2d..e2ae8532 100644 --- a/TTS/tts/models/tacotron2.py +++ b/TTS/tts/models/tacotron2.py @@ -3,22 +3,45 @@ import torch from coqpit import Coqpit from torch import nn +from torch.cuda.amp.autocast_mode import autocast from TTS.tts.layers.tacotron.gst_layers import GST from TTS.tts.layers.tacotron.tacotron2 import Decoder, Encoder, Postnet from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.visual import plot_alignment, plot_spectrogram class Tacotron2(BaseTacotron): - """Tacotron2 as in https://arxiv.org/abs/1712.05884 - Check `TacotronConfig` for the arguments. + """Tacotron2 model implementation inherited from :class:`TTS.tts.models.base_tacotron.BaseTacotron`. + + Paper:: + https://arxiv.org/abs/1712.05884 + + Paper abstract:: + This paper describes Tacotron 2, a neural network architecture for speech synthesis directly from text. + The system is composed of a recurrent sequence-to-sequence feature prediction network that maps character + embeddings to mel-scale spectrograms, followed by a modified WaveNet model acting as a vocoder to synthesize + timedomain waveforms from those spectrograms. Our model achieves a mean opinion score (MOS) of 4.53 comparable + to a MOS of 4.58 for professionally recorded speech. To validate our design choices, we present ablation + studies of key components of our system and evaluate the impact of using mel spectrograms as the input to + WaveNet instead of linguistic, duration, and F0 features. We further demonstrate that using a compact acoustic + intermediate representation enables significant simplification of the WaveNet architecture. + + Check :class:`TTS.tts.configs.tacotron2_config.Tacotron2Config` for model arguments. + + Args: + config (TacotronConfig): + Configuration for the Tacotron2 model. + speaker_manager (SpeakerManager): + Speaker manager for multi-speaker training. Uuse only for multi-speaker training. Defaults to None. """ - def __init__(self, config: Coqpit): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): super().__init__(config) + self.speaker_manager = speaker_manager chars, self.config, _ = self.get_characters(config) config.num_chars = len(chars) self.decoder_output_dim = config.out_channels @@ -28,9 +51,7 @@ class Tacotron2(BaseTacotron): for key in config: setattr(self, key, config[key]) - # set speaker embedding channel size for determining `in_channels` for the connected layers. - # `init_multispeaker` needs to be called once more in training to initialize the speaker embedding layer based - # on the number of speakers infered from the dataset. + # init multi-speaker layers if self.use_speaker_embedding or self.use_d_vector_file: self.init_multispeaker(config) self.decoder_in_features += self.embedded_speaker_dim # add speaker embedding dim @@ -100,6 +121,7 @@ class Tacotron2(BaseTacotron): @staticmethod def shape_outputs(mel_outputs, mel_outputs_postnet, alignments): + """Final reshape of the model output tensors.""" mel_outputs = mel_outputs.transpose(1, 2) mel_outputs_postnet = mel_outputs_postnet.transpose(1, 2) return mel_outputs, mel_outputs_postnet, alignments @@ -107,7 +129,8 @@ class Tacotron2(BaseTacotron): def forward( # pylint: disable=dangerous-default-value self, text, text_lengths, mel_specs=None, mel_lengths=None, aux_input={"speaker_ids": None, "d_vectors": None} ): - """ + """Forward pass for training with Teacher Forcing. + Shapes: text: [B, T_in] text_lengths: [B] @@ -174,6 +197,12 @@ class Tacotron2(BaseTacotron): @torch.no_grad() def inference(self, text, aux_input=None): + """Forward pass for inference with no Teacher-Forcing. + + Shapes: + text: :math:`[B, T_in]` + text_lengths: :math:`[B]` + """ aux_input = self._format_aux_input(aux_input) embedded_inputs = self.embedding(text).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) @@ -208,7 +237,7 @@ class Tacotron2(BaseTacotron): return outputs def train_step(self, batch, criterion): - """Perform a single training step by fetching the right set if samples from the batch. + """A single training step. Forward pass and loss computation. Args: batch ([type]): [description] @@ -218,7 +247,6 @@ class Tacotron2(BaseTacotron): text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] - linear_input = batch["linear_input"] stop_targets = batch["stop_targets"] stop_target_lengths = batch["stop_target_lengths"] speaker_ids = batch["speaker_ids"] @@ -245,21 +273,22 @@ class Tacotron2(BaseTacotron): outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # compute loss - loss_dict = criterion( - outputs["model_outputs"], - outputs["decoder_outputs"], - mel_input, - linear_input, - outputs["stop_tokens"], - stop_targets, - stop_target_lengths, - mel_lengths, - outputs["decoder_outputs_backward"], - outputs["alignments"], - alignment_lengths, - outputs["alignments_backward"], - text_lengths, - ) + with autocast(enabled=False): # use float32 for the criterion + loss_dict = criterion( + outputs["model_outputs"].float(), + outputs["decoder_outputs"].float(), + mel_input.float(), + None, + outputs["stop_tokens"].float(), + stop_targets.float(), + stop_target_lengths, + mel_lengths, + None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), + outputs["alignments"].float(), + alignment_lengths, + None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), + text_lengths, + ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index c738f50f..7561780f 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -217,12 +217,13 @@ class Vits(BaseTTS): # pylint: disable=dangerous-default-value - def __init__(self, config: Coqpit): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): super().__init__(config) self.END2END = True + self.speaker_manager = speaker_manager if config.__class__.__name__ == "VitsConfig": # loading from VitsConfig if "num_chars" not in config: @@ -314,7 +315,7 @@ class Vits(BaseTTS): if args.init_discriminator: self.disc = VitsDiscriminator(use_spectral_norm=args.use_spectral_norm_disriminator) - def init_multispeaker(self, config: Coqpit, data: List = None): + def init_multispeaker(self, config: Coqpit): """Initialize multi-speaker modules of a model. A model can be trained either with a speaker embedding layer or with external `d_vectors` computed from a speaker encoder model. @@ -351,18 +352,6 @@ class Vits(BaseTTS): self.speaker_manager = SpeakerManager(d_vectors_file_path=config.d_vector_file) self.embedded_speaker_dim = config.d_vector_dim - def on_init_start(self, trainer): - """Save the speaker.json at the beginning of the training. And update the config.json with the - speakers.json file path.""" - if self.speaker_manager is not None: - output_path = os.path.join(trainer.output_path, "speakers.json") - self.speaker_manager.save_speaker_ids_to_file(output_path) - trainer.config.speakers_file = output_path - trainer.config.model_args.speakers_file = output_path - trainer.config.save_json(os.path.join(trainer.output_path, "config.json")) - print(f" > `speakers.json` is saved to {output_path}.") - print(" > `speakers_file` is updated in the config.json.") - @staticmethod def _set_cond_input(aux_input: Dict): """Set the speaker conditioning input based on the multi-speaker mode.""" diff --git a/TTS/utils/audio.py b/TTS/utils/audio.py index f5fb1d7f..19a16e5e 100644 --- a/TTS/utils/audio.py +++ b/TTS/utils/audio.py @@ -108,6 +108,8 @@ class TorchSTFT(nn.Module): # pylint: disable=abstract-method class AudioProcessor(object): """Audio Processor for TTS used by all the data pipelines. + TODO: Make this a dataclass to replace `BaseAudioConfig`. + Note: All the class arguments are set to default values to enable a flexible initialization of the class with the model config. They are not meaningful for all the arguments. From 3c7848e9b1525fc87c1201c2a6297fcfa9d9db2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Tue, 19 Oct 2021 16:32:16 +0000 Subject: [PATCH 38/64] Don't OOR values in train console log --- TTS/utils/logging/console_logger.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/TTS/utils/logging/console_logger.py b/TTS/utils/logging/console_logger.py index 0c1aa862..74371342 100644 --- a/TTS/utils/logging/console_logger.py +++ b/TTS/utils/logging/console_logger.py @@ -47,11 +47,19 @@ class ConsoleLogger: tcolors.BOLD, step, batch_steps, global_step, tcolors.ENDC ) for key, value in loss_dict.items(): - # print the avg value if given if f"avg_{key}" in avg_loss_dict.keys(): - log_text += "{}{}: {:.5f} ({:.5f})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"]) + # print the avg value if given + if isinstance(value, float) and round(value, 5) == 0: + # do not round the number if it is zero when rounded + log_text += "{}{}: {} ({})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"]) + else: + # print the rounded value + log_text += "{}{}: {:.5f} ({:.5f})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"]) else: - log_text += "{}{}: {:.5f} \n".format(indent, key, value) + if isinstance(value, float) and round(value, 5) == 0: + log_text += "{}{}: {} \n".format(indent, key, value) + else: + log_text += "{}{}: {:.5f} \n".format(indent, key, value) print(log_text, flush=True) # pylint: disable=unused-argument From 588da1a24e3f828496bcc78510df5b3128f5fcc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Tue, 19 Oct 2021 16:33:04 +0000 Subject: [PATCH 39/64] Simplify grad_norm handling in trainer --- TTS/trainer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index 7a38616d..40d1ab6f 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -362,7 +362,7 @@ class Trainer: # override config values from command-line args # TODO: Maybe it is better to do it outside if len(coqpit_overrides) > 0: - config.parse_known_args(coqpit_overrides, relaxed_parser=True) + config.parse_known_args(coqpit_overrides, arg_prefix="coqpit", relaxed_parser=True) experiment_path = args.continue_path # update the config.json fields and copy it to the output folder @@ -618,10 +618,8 @@ class Trainer: else: grad_clip = 0.0 # meaning no gradient clipping - if grad_clip <= 0: - grad_norm = 0 - # optimizer step + grad_norm = 0 update_lr_scheduler = True if self.use_amp_scaler: if self.use_apex: @@ -636,13 +634,11 @@ class Trainer: if grad_clip > 0: scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(self.master_params(optimizer), grad_clip) - # pytorch skips the step when the norm is 0. So ignore the norm value when it is NaN - if torch.isnan(grad_norm) or torch.isinf(grad_norm): - grad_norm = 0 scale_prev = scaler.get_scale() scaler.step(optimizer) scaler.update() update_lr_scheduler = scale_prev <= scaler.get_scale() + loss_dict["amp_scaler"] = scaler.get_scale() # for logging else: # main model optimizer step loss_dict["loss"].backward() @@ -650,6 +646,10 @@ class Trainer: grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) optimizer.step() + # pytorch skips the step when the norm is 0. So ignore the norm value when it is NaN + if isinstance(grad_norm ,torch.Tensor) and (torch.isnan(grad_norm) or torch.isinf(grad_norm)): + grad_norm = 0 + step_time = time.time() - step_start_time # setup lr @@ -1147,7 +1147,7 @@ class Trainer: if isinstance(value, (int, float)): loss_dict_detached[key] = value else: - loss_dict_detached[key] = value.detach() + loss_dict_detached[key] = value.detach().item() return loss_dict_detached def _pick_target_avg_loss(self, keep_avg_target: KeepAverage) -> Dict: From 0a3d1cc7ee03a85dd64c9ccc4774a170c4ea75bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:11:36 +0000 Subject: [PATCH 40/64] Pass speaker manager to the model in synthesizer --- TTS/utils/synthesizer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py index 9ecb5be9..6d394378 100644 --- a/TTS/utils/synthesizer.py +++ b/TTS/utils/synthesizer.py @@ -107,12 +107,12 @@ class Synthesizer(object): self.use_phonemes = self.tts_config.use_phonemes self.ap = AudioProcessor(verbose=False, **self.tts_config.audio) - self.tts_model = setup_tts_model(config=self.tts_config) + speaker_manager = self._init_speaker_manager() + + self.tts_model = setup_tts_model(config=self.tts_config, speaker_manager=speaker_manager) self.tts_model.load_checkpoint(self.tts_config, tts_checkpoint, eval=True) if use_cuda: self.tts_model.cuda() - speaker_manager = self._init_speaker_manager() - self.tts_model.speaker_manager = speaker_manager def _init_speaker_manager(self): """Initialize the SpeakerManager""" From 92b6d98443212b31ce0798af517e0168cddaab57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:12:38 +0000 Subject: [PATCH 41/64] Set pitch frame alignment wrt spec computation --- TTS/utils/audio.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/TTS/utils/audio.py b/TTS/utils/audio.py index 19a16e5e..dd9c5701 100644 --- a/TTS/utils/audio.py +++ b/TTS/utils/audio.py @@ -645,6 +645,10 @@ class AudioProcessor(object): >>> wav = ap.load_wav(WAV_FILE, sr=22050)[:5 * 22050] >>> pitch = ap.compute_f0(wav) """ + # align F0 length to the spectrogram length + if len(x) % self.hop_length == 0: + x = np.pad(x, (0, self.hop_length // 2), mode="reflect") + f0, t = pw.dio( x.astype(np.double), fs=self.sample_rate, @@ -747,6 +751,14 @@ class AudioProcessor(object): wav_norm = wav * (32767 / max(0.01, np.max(np.abs(wav)))) scipy.io.wavfile.write(path, sr if sr else self.sample_rate, wav_norm.astype(np.int16)) + def get_duration(self, filename: str) -> float: + """Get the duration of a wav file using Librosa. + + Args: + filename (str): Path to the wav file. + """ + return librosa.get_duration(filename) + @staticmethod def mulaw_encode(wav: np.ndarray, qc: int) -> np.ndarray: mu = 2 ** qc - 1 From 3da79a4de410789922911f6d949ff617941c9767 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:14:04 +0000 Subject: [PATCH 42/64] Comment Tacotron2 model --- TTS/tts/models/tacotron.py | 2 +- TTS/tts/models/tacotron2.py | 27 +++++++++++++++------------ TTS/tts/models/vits.py | 1 - TTS/tts/utils/ssim.py | 6 ++++-- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/TTS/tts/models/tacotron.py b/TTS/tts/models/tacotron.py index a17e1b2b..9ed5dc91 100644 --- a/TTS/tts/models/tacotron.py +++ b/TTS/tts/models/tacotron.py @@ -24,7 +24,7 @@ class Tacotron(BaseTacotron): a multi-speaker model. Defaults to None. """ - def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager = None): super().__init__(config) self.speaker_manager = speaker_manager diff --git a/TTS/tts/models/tacotron2.py b/TTS/tts/models/tacotron2.py index e2ae8532..4307c90e 100644 --- a/TTS/tts/models/tacotron2.py +++ b/TTS/tts/models/tacotron2.py @@ -1,5 +1,6 @@ # coding: utf-8 +from typing import Dict import torch from coqpit import Coqpit from torch import nn @@ -38,7 +39,7 @@ class Tacotron2(BaseTacotron): Speaker manager for multi-speaker training. Uuse only for multi-speaker training. Defaults to None. """ - def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager = None): super().__init__(config) self.speaker_manager = speaker_manager @@ -132,11 +133,11 @@ class Tacotron2(BaseTacotron): """Forward pass for training with Teacher Forcing. Shapes: - text: [B, T_in] - text_lengths: [B] - mel_specs: [B, T_out, C] - mel_lengths: [B] - aux_input: 'speaker_ids': [B, 1] and 'd_vectors':[B, C] + text: :math:`[B, T_in]` + text_lengths: :math:`[B]` + mel_specs: :math:`[B, T_out, C]` + mel_lengths: :math:`[B]` + aux_input: 'speaker_ids': :math:`[B, 1]` and 'd_vectors': :math:`[B, C]` """ aux_input = self._format_aux_input(aux_input) outputs = {"alignments_backward": None, "decoder_outputs_backward": None} @@ -199,9 +200,9 @@ class Tacotron2(BaseTacotron): def inference(self, text, aux_input=None): """Forward pass for inference with no Teacher-Forcing. - Shapes: - text: :math:`[B, T_in]` - text_lengths: :math:`[B]` + Shapes: + text: :math:`[B, T_in]` + text_lengths: :math:`[B]` """ aux_input = self._format_aux_input(aux_input) embedded_inputs = self.embedding(text).transpose(1, 2) @@ -236,12 +237,12 @@ class Tacotron2(BaseTacotron): } return outputs - def train_step(self, batch, criterion): + def train_step(self, batch:Dict, criterion:torch.nn.Module): """A single training step. Forward pass and loss computation. Args: - batch ([type]): [description] - criterion ([type]): [description] + batch ([Dict]): A dictionary of input tensors. + criterion ([type]): Callable criterion to compute model loss. """ text_input = batch["text_input"] text_lengths = batch["text_lengths"] @@ -296,6 +297,7 @@ class Tacotron2(BaseTacotron): return outputs, loss_dict def _create_logs(self, batch, outputs, ap): + """Create dashboard log information.""" postnet_outputs = outputs["model_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] @@ -321,6 +323,7 @@ class Tacotron2(BaseTacotron): def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use + """Log training progress.""" ap = assets["audio_processor"] figures, audios = self._create_logs(batch, outputs, ap) logger.train_figures(steps, figures) diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 7561780f..3b7df353 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -1,5 +1,4 @@ import math -import os import random from dataclasses import dataclass, field from itertools import chain diff --git a/TTS/tts/utils/ssim.py b/TTS/tts/utils/ssim.py index caed575f..883efdb8 100644 --- a/TTS/tts/utils/ssim.py +++ b/TTS/tts/utils/ssim.py @@ -23,8 +23,10 @@ def _ssim(img1, img2, window, window_size, channel, size_average=True): mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel) mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel) - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) + # TODO: check if you need AMP disabled + # with torch.cuda.amp.autocast(enabled=False): + mu1_sq = mu1.float().pow(2) + mu2_sq = mu2.float().pow(2) mu1_mu2 = mu1 * mu2 sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq From 0ebc2a400eb4f44a04a6bbcadab649afa08eaae3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:15:20 +0000 Subject: [PATCH 43/64] Implement `_set_speaker_embedding` in GlowTTS --- TTS/tts/models/glow_tts.py | 133 +++++++++++++++++++------------------ 1 file changed, 68 insertions(+), 65 deletions(-) diff --git a/TTS/tts/models/glow_tts.py b/TTS/tts/models/glow_tts.py index e3a5ff3c..c1e4c2ac 100644 --- a/TTS/tts/models/glow_tts.py +++ b/TTS/tts/models/glow_tts.py @@ -1,17 +1,18 @@ import math -from typing import Dict, Tuple +from typing import Dict, Tuple, Union import torch +from coqpit import Coqpit from torch import nn from torch.cuda.amp.autocast_mode import autocast from torch.nn import functional as F -from TTS.tts.configs import GlowTTSConfig +from TTS.tts.configs.glow_tts_config import GlowTTSConfig from TTS.tts.layers.glow_tts.decoder import Decoder from TTS.tts.layers.glow_tts.encoder import Encoder from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask -from TTS.tts.utils.speakers import get_speaker_manager +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.io import load_fsspec @@ -38,17 +39,19 @@ class GlowTTS(BaseTTS): Check :class:`TTS.tts.configs.glow_tts_config.GlowTTSConfig` for class arguments. Examples: - >>> from TTS.tts.configs import GlowTTSConfig + >>> from TTS.tts.configs.glow_tts_config import GlowTTSConfig >>> from TTS.tts.models.glow_tts import GlowTTS >>> config = GlowTTSConfig() >>> model = GlowTTS(config) """ - def __init__(self, config: GlowTTSConfig): + def __init__(self, config: GlowTTSConfig, speaker_manager: SpeakerManager = None): super().__init__(config) + self.speaker_manager = speaker_manager + # pass all config fields to `self` # for fewer code change self.config = config @@ -58,19 +61,10 @@ class GlowTTS(BaseTTS): _, self.config, self.num_chars = self.get_characters(config) self.decoder_output_dim = config.out_channels + # init multi-speaker layers if necessary self.init_multispeaker(config) - # if is a multispeaker and c_in_channels is 0, set to 256 - self.c_in_channels = 0 - if self.num_speakers > 1: - if self.d_vector_dim: - self.c_in_channels = self.d_vector_dim - elif self.c_in_channels == 0 and not self.d_vector_dim: - # TODO: make this adjustable - self.c_in_channels = 256 - self.run_data_dep_init = config.data_dep_init_steps > 0 - self.encoder = Encoder( self.num_chars, out_channels=self.out_channels, @@ -98,28 +92,35 @@ class GlowTTS(BaseTTS): c_in_channels=self.c_in_channels, ) - def init_multispeaker(self, config: "Coqpit", data: list = None) -> None: - """Initialize multi-speaker modules of a model. A model can be trained either with a speaker embedding layer - or with external `d_vectors` computed from a speaker encoder model. - - If you need a different behaviour, override this function for your model. + def init_multispeaker(self, config: Coqpit): + """Init speaker embedding layer if `use_speaker_embedding` is True and set the expected speaker embedding + vector dimension in the network. If model uses d-vectors, then it only sets the expected dimension. Args: config (Coqpit): Model configuration. - data (List, optional): Dataset items to infer number of speakers. Defaults to None. """ + self.embedded_speaker_dim = 0 # init speaker manager - self.speaker_manager = get_speaker_manager(config, data=data) - self.num_speakers = self.speaker_manager.num_speakers - if config.use_d_vector_file: - self.external_d_vector_dim = config.d_vector_dim - else: - self.external_d_vector_dim = 0 + if self.speaker_manager is None and (self.use_speaker_embedding or self.use_d_vector_file): + raise ValueError( + " > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model." + ) + # set number of speakers - if num_speakers is set in config, use it, otherwise use speaker_manager + if self.speaker_manager is not None: + self.num_speakers = self.speaker_manager.num_speakers + # set ultimate speaker embedding size + if config.use_speaker_embedding or config.use_d_vector_file: + self.embedded_speaker_dim = ( + config.d_vector_dim if "d_vector_dim" in config and config.d_vector_dim is not None else 512 + ) # init speaker embedding layer if config.use_speaker_embedding and not config.use_d_vector_file: - self.embedded_speaker_dim = self.c_in_channels - self.emb_g = nn.Embedding(self.num_speakers, self.embedded_speaker_dim) + print(" > Init speaker_embedding layer.") + self.embedded_speaker_dim = self.hidden_channels_enc + self.emb_g = nn.Embedding(self.num_speakers, self.hidden_channels_enc) nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) + # set conditioning dimensions + self.c_in_channels = self.embedded_speaker_dim @staticmethod def compute_outputs(attn, o_mean, o_log_scale, x_mask): @@ -146,6 +147,35 @@ class GlowTTS(BaseTTS): if getattr(f, "set_ddi", False): f.set_ddi(False) + def _set_speaker_input(self, aux_input: Dict): + if aux_input is None: + d_vectors = None + speaker_ids = None + else: + d_vectors = aux_input.get("d_vectors", None) + speaker_ids = aux_input.get("speaker_ids", None) + + if d_vectors is not None and speaker_ids is not None: + raise ValueError("[!] Cannot use d-vectors and speaker-ids together.") + + if speaker_ids is not None and not hasattr(self, "emb_g"): + raise ValueError("[!] Cannot use speaker-ids without enabling speaker embedding.") + + g = speaker_ids if speaker_ids is not None else d_vectors + return g + + def _speaker_embedding(self, aux_input: Dict) -> Union[torch.tensor, None]: + g = self._set_speaker_input(aux_input) + # speaker embedding + if g is not None: + if hasattr(self, "emb_g"): + # use speaker embedding layer + g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h, 1] + else: + # use d-vector + g = F.normalize(g).unsqueeze(-1) # [b, h, 1] + return g + def forward( self, x, x_lengths, y, y_lengths=None, aux_input={"d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value @@ -161,12 +191,7 @@ class GlowTTS(BaseTTS): y = y.transpose(1, 2) y_max_length = y.size(2) # norm speaker embeddings - g = aux_input["d_vectors"] if aux_input is not None and "d_vectors" in aux_input else None - if self.use_speaker_embedding or self.use_d_vector_file: - if not self.use_d_vector_file: - g = F.normalize(g).unsqueeze(-1) - else: - g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h, 1] + g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # drop redisual frames wrt num_squeeze and set y_lengths. @@ -217,12 +242,7 @@ class GlowTTS(BaseTTS): y = y.transpose(1, 2) y_max_length = y.size(2) # norm speaker embeddings - g = aux_input["d_vectors"] if aux_input is not None and "d_vectors" in aux_input else None - if self.use_speaker_embedding or self.use_d_vector_file: - if not self.use_d_vector_file: - g = F.normalize(g).unsqueeze(-1) - else: - g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h, 1] + g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # drop redisual frames wrt num_squeeze and set y_lengths. @@ -272,22 +292,12 @@ class GlowTTS(BaseTTS): """ y = y.transpose(1, 2) y_max_length = y.size(2) - g = aux_input["d_vectors"] if aux_input is not None and "d_vectors" in aux_input else None - # norm speaker embeddings - if g is not None: - if self.external_d_vector_dim: - g = F.normalize(g).unsqueeze(-1) - else: - g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h, 1] - + g = self._speaker_embedding(aux_input) y_mask = torch.unsqueeze(sequence_mask(y_lengths, y_max_length), 1).to(y.dtype) - # decoder pass z, logdet = self.decoder(y, y_mask, g=g, reverse=False) - # reverse decoder and predict y, logdet = self.decoder(z, y_mask, g=g, reverse=True) - outputs = {} outputs["model_outputs"] = y.transpose(1, 2) outputs["logdet"] = logdet @@ -298,14 +308,7 @@ class GlowTTS(BaseTTS): self, x, aux_input={"x_lengths": None, "d_vectors": None, "speaker_ids": None} ): # pylint: disable=dangerous-default-value x_lengths = aux_input["x_lengths"] - g = aux_input["d_vectors"] if aux_input is not None and "d_vectors" in aux_input else None - - if g is not None: - if self.d_vector_dim: - g = F.normalize(g).unsqueeze(-1) - else: - g = F.normalize(self.emb_g(g)).unsqueeze(-1) # [b, h] - + g = self._speaker_embedding(aux_input) # embedding pass o_mean, o_log_scale, o_dur_log, x_mask = self.encoder(x, x_lengths, g=g) # compute output durations @@ -389,15 +392,15 @@ class GlowTTS(BaseTTS): def _create_logs(self, batch, outputs, ap): alignments = outputs["alignments"] - text_input = batch["text_input"] + text_input = batch["text_input"][:1] if batch["text_input"] is not None else None text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] - d_vectors = batch["d_vectors"] - speaker_ids = batch["speaker_ids"] + d_vectors = batch["d_vectors"][:1] if batch["d_vectors"] is not None else None + speaker_ids = batch["speaker_ids"][:1] if batch["speaker_ids"] is not None else None # model runs reverse flow to predict spectrograms pred_outputs = self.inference( - text_input[:1], + text_input, aux_input={"x_lengths": text_lengths[:1], "d_vectors": d_vectors, "speaker_ids": speaker_ids}, ) model_outputs = pred_outputs["model_outputs"] @@ -448,7 +451,7 @@ class GlowTTS(BaseTTS): test_audios = {} test_figures = {} test_sentences = self.config.test_sentences - aux_inputs = self.get_aux_input() + aux_inputs = self._get_test_aux_input() if len(test_sentences) == 0: print(" | [!] No test sentences provided.") else: From aa25f70b95758ff26c4a0a3bced9bb966f8006fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:16:41 +0000 Subject: [PATCH 44/64] Update ForwardTTS for multi-speaker --- TTS/tts/models/forward_tts.py | 146 +++++++++++++++++++++------------- 1 file changed, 89 insertions(+), 57 deletions(-) diff --git a/TTS/tts/models/forward_tts.py b/TTS/tts/models/forward_tts.py index b83f12d4..b2c41df5 100644 --- a/TTS/tts/models/forward_tts.py +++ b/TTS/tts/models/forward_tts.py @@ -13,6 +13,7 @@ from TTS.tts.layers.generic.pos_encoding import PositionalEncoding from TTS.tts.layers.glow_tts.duration_predictor import DurationPredictor from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import average_over_durations, generate_path, maximum_path, sequence_mask +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.visual import plot_alignment, plot_pitch, plot_spectrogram @@ -31,9 +32,6 @@ class ForwardTTSArgs(Coqpit): hidden_channels (int): Number of base hidden channels of the model. Defaults to 512. - num_speakers (int): - Number of speakers for the speaker embedding layer. Defaults to 0. - use_aligner (bool): Whether to use aligner network to learn the text to speech alignment or use pre-computed durations. If set False, durations should be computed by `TTS/bin/compute_attention_masks.py` and path to the @@ -86,12 +84,6 @@ class ForwardTTSArgs(Coqpit): decoder_params (str): Parameters of the decoder module. Defaults to ```{"hidden_channels_ffn": 1024, "num_heads": 1, "num_layers": 6, "dropout_p": 0.1}``` - use_d_vetor (bool): - Whether to use precomputed d-vectors for multi-speaker training. Defaults to False. - - d_vector_dim (int): - Number of channels of the d-vectors. Defaults to 0. - detach_duration_predictor (bool): Detach the input to the duration predictor from the earlier computation graph so that the duraiton loss does not pass to the earlier layers. Defaults to True. @@ -99,12 +91,26 @@ class ForwardTTSArgs(Coqpit): max_duration (int): Maximum duration accepted by the model. Defaults to 75. + num_speakers (int): + Number of speakers for the speaker embedding layer. Defaults to 0. + + speakers_file (str): + Path to the speaker mapping file for the Speaker Manager. Defaults to None. + + speaker_embedding_channels (int): + Number of speaker embedding channels. Defaults to 256. + + use_d_vector_file (bool): + Enable/Disable the use of d-vectors for multi-speaker training. Defaults to False. + + d_vector_dim (int): + Number of d-vector channels. Defaults to 0. + """ num_chars: int = None out_channels: int = 80 hidden_channels: int = 384 - num_speakers: int = 0 use_aligner: bool = True use_pitch: bool = True pitch_predictor_hidden_channels: int = 256 @@ -125,10 +131,14 @@ class ForwardTTSArgs(Coqpit): decoder_params: dict = field( default_factory=lambda: {"hidden_channels_ffn": 1024, "num_heads": 1, "num_layers": 6, "dropout_p": 0.1} ) - use_d_vector: bool = False - d_vector_dim: int = 0 detach_duration_predictor: bool = False max_duration: int = 75 + num_speakers: int = 1 + use_speaker_embedding: bool = False + speakers_file: str = None + use_d_vector_file: bool = False + d_vector_dim: int = None + d_vector_file: str = None class ForwardTTS(BaseTTS): @@ -150,6 +160,8 @@ class ForwardTTS(BaseTTS): Args: config (Coqpit): Model coqpit class. + speaker_manager (SpeakerManager): Speaker manager for multi-speaker training. Only used for multi-speaker models. + Defaults to None. Examples: >>> from TTS.tts.models.fast_pitch import ForwardTTS, ForwardTTSArgs @@ -158,10 +170,13 @@ class ForwardTTS(BaseTTS): """ # pylint: disable=dangerous-default-value - def __init__(self, config: Coqpit): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager = None): super().__init__(config) + self.speaker_manager = speaker_manager + self.init_multispeaker(config) + self.max_duration = self.args.max_duration self.use_aligner = self.args.use_aligner self.use_pitch = self.args.use_pitch @@ -178,7 +193,7 @@ class ForwardTTS(BaseTTS): self.args.hidden_channels, self.args.encoder_type, self.args.encoder_params, - self.args.d_vector_dim, + self.embedded_speaker_dim, ) if self.args.positional_encoding: @@ -192,7 +207,7 @@ class ForwardTTS(BaseTTS): ) self.duration_predictor = DurationPredictor( - self.args.hidden_channels + self.args.d_vector_dim, + self.args.hidden_channels + self.embedded_speaker_dim, self.args.duration_predictor_hidden_channels, self.args.duration_predictor_kernel_size, self.args.duration_predictor_dropout_p, @@ -200,7 +215,7 @@ class ForwardTTS(BaseTTS): if self.args.use_pitch: self.pitch_predictor = DurationPredictor( - self.args.hidden_channels + self.args.d_vector_dim, + self.args.hidden_channels + self.embedded_speaker_dim, self.args.pitch_predictor_hidden_channels, self.args.pitch_predictor_kernel_size, self.args.pitch_predictor_dropout_p, @@ -212,19 +227,37 @@ class ForwardTTS(BaseTTS): padding=int((self.args.pitch_embedding_kernel_size - 1) / 2), ) - if self.args.num_speakers > 1 and not self.args.use_d_vector: - # speaker embedding layer - self.emb_g = nn.Embedding(self.args.num_speakers, self.args.d_vector_dim) - nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) - - if self.args.d_vector_dim > 0 and self.args.d_vector_dim != self.args.hidden_channels: - self.proj_g = nn.Conv1d(self.args.d_vector_dim, self.args.hidden_channels, 1) - if self.args.use_aligner: self.aligner = AlignmentNetwork( in_query_channels=self.args.out_channels, in_key_channels=self.args.hidden_channels ) + def init_multispeaker(self, config: Coqpit): + """Init for multi-speaker training. + + Args: + config (Coqpit): Model configuration. + """ + self.embedded_speaker_dim = 0 + # init speaker manager + if self.speaker_manager is None and (config.use_d_vector_file or config.use_speaker_embedding): + raise ValueError( + " > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model." + ) + # set number of speakers + if self.speaker_manager is not None: + self.num_speakers = self.speaker_manager.num_speakers + # init d-vector embedding + if config.use_d_vector_file: + self.embedded_speaker_dim = config.d_vector_dim + if self.args.d_vector_dim != self.args.hidden_channels: + self.proj_g = nn.Conv1d(self.args.d_vector_dim, self.args.hidden_channels, 1) + # init speaker embedding layer + if config.use_speaker_embedding and not config.use_d_vector_file: + print(" > Init speaker_embedding layer.") + self.emb_g = nn.Embedding(self.args.num_speakers, self.args.hidden_channels) + nn.init.uniform_(self.emb_g.weight, -0.1, 0.1) + @staticmethod def generate_attn(dr, x_mask, y_mask=None): """Generate an attention mask from the durations. @@ -289,18 +322,6 @@ class ForwardTTS(BaseTTS): o_dr = torch.round(o_dr) return o_dr - @staticmethod - def _concat_speaker_embedding(o_en, g): - g_exp = g.expand(-1, -1, o_en.size(-1)) # [B, C, T_en] - o_en = torch.cat([o_en, g_exp], 1) - return o_en - - def _sum_speaker_embedding(self, x, g): - # project g to decoder dim. - if hasattr(self, "proj_g"): - g = self.proj_g(g) - return x + g - def _forward_encoder( self, x: torch.LongTensor, x_mask: torch.FloatTensor, g: torch.FloatTensor = None ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: @@ -309,7 +330,7 @@ class ForwardTTS(BaseTTS): 1. Embed speaker IDs if multi-speaker mode. 2. Embed character sequences. 3. Run the encoder network. - 4. Concat speaker embedding to the encoder output for the duration predictor. + 4. Sum encoder outputs and speaker embeddings Args: x (torch.LongTensor): Input sequence IDs. @@ -327,19 +348,18 @@ class ForwardTTS(BaseTTS): - g: :math:`(B, C)` """ if hasattr(self, "emb_g"): - g = nn.functional.normalize(self.emb_g(g)) # [B, C, 1] + g = self.emb_g(g) # [B, C, 1] if g is not None: g = g.unsqueeze(-1) # [B, T, C] x_emb = self.emb(x) # encoder pass o_en = self.encoder(torch.transpose(x_emb, 1, -1), x_mask) - # speaker conditioning for duration predictor + # speaker conditioning + # TODO: try different ways of conditioning if g is not None: - o_en_dp = self._concat_speaker_embedding(o_en, g) - else: - o_en_dp = o_en - return o_en, o_en_dp, x_mask, g, x_emb + o_en = o_en + g + return o_en, x_mask, g, x_emb def _forward_decoder( self, @@ -373,9 +393,6 @@ class ForwardTTS(BaseTTS): # positional encoding if hasattr(self, "pos_encoder"): o_en_ex = self.pos_encoder(o_en_ex, y_mask) - # speaker embedding - if g is not None: - o_en_ex = self._sum_speaker_embedding(o_en_ex, g) # decoder pass o_de = self.decoder(o_en_ex, y_mask, g=g) return o_de.transpose(1, 2), attn.transpose(1, 2) @@ -457,6 +474,19 @@ class ForwardTTS(BaseTTS): alignment_soft = alignment_soft.squeeze(1).transpose(1, 2) return o_alignment_dur, alignment_soft, alignment_logprob, alignment_mas + def _set_speaker_input(self, aux_input: Dict): + d_vectors = aux_input.get("d_vectors", None) + speaker_ids = aux_input.get("speaker_ids", None) + + if d_vectors is not None and speaker_ids is not None: + raise ValueError("[!] Cannot use d-vectors and speaker-ids together.") + + if speaker_ids is not None and not hasattr(self, "emb_g"): + raise ValueError("[!] Cannot use speaker-ids without enabling speaker embedding.") + + g = speaker_ids if speaker_ids is not None else d_vectors + return g + def forward( self, x: torch.LongTensor, @@ -487,17 +517,17 @@ class ForwardTTS(BaseTTS): - g: :math:`[B, C]` - pitch: :math:`[B, 1, T]` """ - g = aux_input["d_vectors"] if "d_vectors" in aux_input else None + g = self._set_speaker_input(aux_input) # compute sequence masks y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).float() x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.shape[1]), 1).float() # encoder pass - o_en, o_en_dp, x_mask, g, x_emb = self._forward_encoder(x, x_mask, g) + o_en, x_mask, g, x_emb = self._forward_encoder(x, x_mask, g) # duration predictor pass if self.args.detach_duration_predictor: - o_dr_log = self.duration_predictor(o_en_dp.detach(), x_mask) + o_dr_log = self.duration_predictor(o_en.detach(), x_mask) else: - o_dr_log = self.duration_predictor(o_en_dp, x_mask) + o_dr_log = self.duration_predictor(o_en, x_mask) o_dr = torch.clamp(torch.exp(o_dr_log) - 1, 0, self.max_duration) # generate attn mask from predicted durations o_attn = self.generate_attn(o_dr.squeeze(1), x_mask) @@ -517,10 +547,12 @@ class ForwardTTS(BaseTTS): o_pitch = None avg_pitch = None if self.args.use_pitch: - o_pitch_emb, o_pitch, avg_pitch = self._forward_pitch_predictor(o_en_dp, x_mask, pitch, dr) + o_pitch_emb, o_pitch, avg_pitch = self._forward_pitch_predictor(o_en, x_mask, pitch, dr) o_en = o_en + o_pitch_emb # decoder pass - o_de, attn = self._forward_decoder(o_en, dr, x_mask, y_lengths, g=g) + o_de, attn = self._forward_decoder( + o_en, dr, x_mask, y_lengths, g=None + ) # TODO: maybe pass speaker embedding (g) too outputs = { "model_outputs": o_de, # [B, T, C] "durations_log": o_dr_log.squeeze(1), # [B, T] @@ -551,22 +583,22 @@ class ForwardTTS(BaseTTS): - x_lengths: [B] - g: [B, C] """ - g = aux_input["d_vectors"] if "d_vectors" in aux_input else None + g = self._set_speaker_input(aux_input) x_lengths = torch.tensor(x.shape[1:2]).to(x.device) x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.shape[1]), 1).to(x.dtype).float() # encoder pass - o_en, o_en_dp, x_mask, g, _ = self._forward_encoder(x, x_mask, g) + o_en, x_mask, g, _ = self._forward_encoder(x, x_mask, g) # duration predictor pass - o_dr_log = self.duration_predictor(o_en_dp, x_mask) + o_dr_log = self.duration_predictor(o_en, x_mask) o_dr = self.format_durations(o_dr_log, x_mask).squeeze(1) y_lengths = o_dr.sum(1) # pitch predictor pass o_pitch = None if self.args.use_pitch: - o_pitch_emb, o_pitch = self._forward_pitch_predictor(o_en_dp, x_mask) + o_pitch_emb, o_pitch = self._forward_pitch_predictor(o_en, x_mask) o_en = o_en + o_pitch_emb # decoder pass - o_de, attn = self._forward_decoder(o_en, o_dr, x_mask, y_lengths, g=g) + o_de, attn = self._forward_decoder(o_en, o_dr, x_mask, y_lengths, g=None) outputs = { "model_outputs": o_de, "alignments": attn, From 330ee7d208ca33ccd32b2f922b96f1fbbac22037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:17:25 +0000 Subject: [PATCH 45/64] Comment BaseTacotron and remove unused funcs --- TTS/tts/models/base_tacotron.py | 67 +++++++-------------------------- 1 file changed, 14 insertions(+), 53 deletions(-) diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index c661c4cc..d0cc81cc 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -9,15 +9,15 @@ from torch import nn from TTS.tts.layers.losses import TacotronLoss from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import sequence_mask -from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.training import gradual_training_scheduler class BaseTacotron(BaseTTS): + """Base class shared by Tacotron and Tacotron2""" + def __init__(self, config: Coqpit): - """Abstract Tacotron class""" super().__init__(config) # pass all config fields as class attributes @@ -45,6 +45,7 @@ class BaseTacotron(BaseTTS): @staticmethod def _format_aux_input(aux_input: Dict) -> Dict: + """Set missing fields to their default values""" if aux_input: return format_aux_input({"d_vectors": None, "speaker_ids": None}, aux_input) return None @@ -53,14 +54,12 @@ class BaseTacotron(BaseTTS): # INIT FUNCTIONS ############################# - def _init_states(self): - self.embedded_speakers = None - self.embedded_speakers_projected = None - def _init_backward_decoder(self): + """Init the backward decoder for Forward-Backward decoding.""" self.decoder_backward = copy.deepcopy(self.decoder) def _init_coarse_decoder(self): + """Init the coarse decoder for Double-Decoder Consistency.""" self.coarse_decoder = copy.deepcopy(self.decoder) self.coarse_decoder.r_init = self.ddc_r self.coarse_decoder.set_r(self.ddc_r) @@ -80,6 +79,13 @@ class BaseTacotron(BaseTTS): def load_checkpoint( self, config, checkpoint_path, eval=False ): # pylint: disable=unused-argument, redefined-builtin + """Load model checkpoint and set up internals. + + Args: + config (Coqpi): model configuration. + checkpoint_path (str): path to checkpoint file. + eval (bool): whether to load model for evaluation. + """ state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) # TODO: set r in run-time by taking it from the new config @@ -98,45 +104,9 @@ class BaseTacotron(BaseTTS): assert not self.training def get_criterion(self) -> nn.Module: + """Get the model criterion used in training.""" return TacotronLoss(self.config) - @staticmethod - def get_speaker_manager(config: Coqpit, restore_path: str, data: List, out_path: str = None) -> SpeakerManager: - return get_speaker_manager(config, restore_path, data, out_path) - - def get_aux_input(self, **kwargs) -> Dict: - """Compute Tacotron's auxiliary inputs based on model config. - - speaker d_vector - - style wav for GST - - speaker ID for speaker embedding - """ - # setup speaker_id - if self.config.use_speaker_embedding: - speaker_id = kwargs.get("speaker_id", 0) - else: - speaker_id = None - # setup d_vector - d_vector = ( - self.speaker_manager.get_d_vectors_by_speaker(self.speaker_manager.speaker_names[0]) - if self.config.use_d_vector_file and self.config.use_speaker_embedding - else None - ) - # setup style_mel - if "style_wav" in kwargs: - style_wav = kwargs["style_wav"] - elif self.config.has("gst_style_input"): - style_wav = self.config.gst_style_input - else: - style_wav = None - if style_wav is None and "use_gst" in self.config and self.config.use_gst: - # inicialize GST with zero dict. - style_wav = {} - print("WARNING: You don't provided a gst style wav, for this reason we use a zero tensor!") - for i in range(self.config.gst["gst_num_style_tokens"]): - style_wav[str(i)] = 0 - aux_inputs = {"speaker_id": speaker_id, "style_wav": style_wav, "d_vector": d_vector} - return aux_inputs - ############################# # COMMON COMPUTE FUNCTIONS ############################# @@ -182,15 +152,6 @@ class BaseTacotron(BaseTTS): # EMBEDDING FUNCTIONS ############################# - def compute_speaker_embedding(self, speaker_ids): - """Compute speaker embedding vectors""" - if hasattr(self, "speaker_embedding") and speaker_ids is None: - raise RuntimeError(" [!] Model has speaker embedding layer but speaker_id is not provided") - if hasattr(self, "speaker_embedding") and speaker_ids is not None: - self.embedded_speakers = self.speaker_embedding(speaker_ids).unsqueeze(1) - if hasattr(self, "speaker_project_mel") and speaker_ids is not None: - self.embedded_speakers_projected = self.speaker_project_mel(self.embedded_speakers).squeeze(1) - def compute_gst(self, inputs, style_input, speaker_embedding=None): """Compute global style token""" if isinstance(style_input, dict): @@ -242,4 +203,4 @@ class BaseTacotron(BaseTTS): self.decoder.set_r(r) if trainer.config.bidirectional_decoder: trainer.model.decoder_backward.set_r(r) - print(f"\n > Number of output frames: {self.decoder.r}") \ No newline at end of file + print(f"\n > Number of output frames: {self.decoder.r}") From 7c2cb7cc30d881af1157875acf41d2b7e2c0d8ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:18:22 +0000 Subject: [PATCH 46/64] Update BaseTTS --- TTS/tts/models/base_tts.py | 40 ++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/TTS/tts/models/base_tts.py b/TTS/tts/models/base_tts.py index d4044c7e..cd84bf08 100644 --- a/TTS/tts/models/base_tts.py +++ b/TTS/tts/models/base_tts.py @@ -1,4 +1,5 @@ import os +import random from typing import Dict, List, Tuple import torch @@ -9,12 +10,12 @@ from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from TTS.model import BaseModel +from TTS.tts.configs.shared_configs import CharactersConfig from TTS.tts.datasets.dataset import TTSDataset from TTS.tts.utils.speakers import SpeakerManager, get_speaker_manager from TTS.tts.utils.synthesis import synthesis from TTS.tts.utils.text import make_symbols from TTS.tts.utils.visual import plot_alignment, plot_spectrogram -from TTS.utils.audio import AudioProcessor # pylint: skip-file @@ -64,7 +65,7 @@ class BaseTTS(BaseModel): else: from TTS.tts.utils.text.symbols import parse_symbols, phonemes, symbols - config.characters = parse_symbols() + config.characters = CharactersConfig(**parse_symbols()) model_characters = phonemes if config.use_phonemes else symbols num_chars = len(model_characters) + getattr(config, "add_blank", False) return model_characters, config, num_chars @@ -80,14 +81,13 @@ class BaseTTS(BaseModel): config (Coqpit): Model configuration. """ # init speaker manager - if self.speaker_manager is None: - raise ValueError(" > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model.") - - print(f" > Number of speakers : {len(self.speaker_manager.speaker_ids)}") - - # set number of speakers - if num_speakers is set in config, use it, otherwise use speaker_manager - self.num_speakers = self.speaker_manager.num_speakers - + if self.speaker_manager is None and (config.use_speaker_embedding or config.use_d_vector_file): + raise ValueError( + " > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model." + ) + # set number of speakers + if self.speaker_manager is not None: + self.num_speakers = self.speaker_manager.num_speakers # set ultimate speaker embedding size if config.use_speaker_embedding or config.use_d_vector_file: self.embedded_speaker_dim = ( @@ -99,10 +99,6 @@ class BaseTTS(BaseModel): self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim) self.speaker_embedding.weight.data.normal_(0, 0.3) - def get_aux_input(self, **kwargs) -> Dict: - """Prepare and return `aux_input` used by `forward()`""" - return {"speaker_id": None, "style_wav": None, "d_vector": None} - def format_batch(self, batch: Dict) -> Dict: """Generic batch formatting for `TTSDataset`. @@ -293,6 +289,20 @@ class BaseTTS(BaseModel): ) return loader + def _get_test_aux_input( + self, + ) -> Dict: + aux_inputs = { + "speaker_id": None + if not self.config.use_speaker_embedding + else random.sample(sorted(self.speaker_manager.speaker_ids.values()), 1), + "d_vector": None + if not self.config.use_d_vector_file + else random.samples(sorted(self.speaker_manager.d_vectors.values()), 1), + "style_wav": None, # TODO: handle GST style input + } + return aux_inputs + def test_run(self, assets: Dict) -> Tuple[Dict, Dict]: """Generic test run for `tts` models used by `Trainer`. @@ -309,7 +319,7 @@ class BaseTTS(BaseModel): test_audios = {} test_figures = {} test_sentences = self.config.test_sentences - aux_inputs = self.get_aux_input() + aux_inputs = self._get_test_aux_input() for idx, sen in enumerate(test_sentences): outputs_dict = synthesis( self, From 0e768dd4c5bf950a5913fae2123a6ec2f34dbbbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:21:26 +0000 Subject: [PATCH 47/64] Update comments --- TTS/tts/layers/losses.py | 5 ----- TTS/tts/layers/tacotron/attentions.py | 21 +++++++++------------ TTS/tts/models/__init__.py | 4 ++-- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/TTS/tts/layers/losses.py b/TTS/tts/layers/losses.py index f465c638..0ea342e8 100644 --- a/TTS/tts/layers/losses.py +++ b/TTS/tts/layers/losses.py @@ -410,11 +410,6 @@ class TacotronLoss(torch.nn.Module): return_dict["postnet_ssim_loss"] = postnet_ssim_loss return_dict["loss"] = loss - - # check if any loss is NaN - for key, loss in return_dict.items(): - if torch.isnan(loss): - raise RuntimeError(f" [!] NaN loss with {key}.") return return_dict diff --git a/TTS/tts/layers/tacotron/attentions.py b/TTS/tts/layers/tacotron/attentions.py index a01ccc49..8c30a00a 100644 --- a/TTS/tts/layers/tacotron/attentions.py +++ b/TTS/tts/layers/tacotron/attentions.py @@ -126,27 +126,24 @@ class GravesAttention(nn.Module): class OriginalAttention(nn.Module): - """Bahdanau Attention with various optional modifications. Proposed below. + """Bahdanau Attention with various optional modifications. - Location sensitive attnetion: https://arxiv.org/abs/1712.05884 - Forward Attention: https://arxiv.org/abs/1807.06736 + state masking at inference - Using sigmoid instead of softmax normalization - Attention windowing at inference time Note: - Location Sensitive Attention is an attention mechanism that extends the additive attention mechanism - to use cumulative attention weights from previous decoder time steps as an additional feature. + Location Sensitive Attention extends the additive attention mechanism + to use cumulative attention weights from previous decoder time steps with the current time step features. - Forward attention considers only the alignment paths that satisfy the monotonic condition at each - decoder timestep. The modified attention probabilities at each timestep are computed recursively - using a forward algorithm. + Forward attention computes most probable monotonic alignment. The modified attention probabilities at each + timestep are computed recursively by the forward algorithm. - Transition agent for forward attention is further proposed, which helps the attention mechanism - to make decisions whether to move forward or stay at each decoder timestep. - - Attention windowing applies a sliding windows to time steps of the input tensor centering at the last - time step with the largest attention weight. It is especially useful at inference to keep the attention - alignment diagonal. + Transition agent in the forward attention explicitly gates the attention mechanism whether to move forward or + stay at each decoder timestep. + Attention windowing is a inductive prior that prevents the model from attending to previous and future timesteps + beyond a certain window. Args: query_dim (int): number of channels in the query tensor. diff --git a/TTS/tts/models/__init__.py b/TTS/tts/models/__init__.py index 1236fa76..780f22cd 100644 --- a/TTS/tts/models/__init__.py +++ b/TTS/tts/models/__init__.py @@ -2,7 +2,7 @@ from TTS.tts.utils.text.symbols import make_symbols, parse_symbols from TTS.utils.generic_utils import find_module -def setup_model(config): +def setup_model(config, speaker_manager: "SpeakerManager" = None): print(" > Using model: {}".format(config.model)) # fetch the right model implementation. if "base_model" in config and config["base_model"] is not None: @@ -31,7 +31,7 @@ def setup_model(config): config.model_params.num_chars = num_chars if "model_args" in config: config.model_args.num_chars = num_chars - model = MyModel(config) + model = MyModel(config, speaker_manager=speaker_manager) return model From cea8e1739bb2750d842b53edc2c624d1178fcec4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Wed, 20 Oct 2021 18:22:41 +0000 Subject: [PATCH 48/64] Update AlignTTS to use SpeakerManager --- TTS/tts/models/align_tts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/TTS/tts/models/align_tts.py b/TTS/tts/models/align_tts.py index a634aa6e..8e001630 100644 --- a/TTS/tts/models/align_tts.py +++ b/TTS/tts/models/align_tts.py @@ -11,6 +11,7 @@ from TTS.tts.layers.feed_forward.encoder import Encoder from TTS.tts.layers.generic.pos_encoding import PositionalEncoding from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask +from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.io import load_fsspec @@ -99,9 +100,10 @@ class AlignTTS(BaseTTS): # pylint: disable=dangerous-default-value - def __init__(self, config: Coqpit): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): super().__init__(config) + self.speaker_manager = speaker_manager self.config = config self.phase = -1 self.length_scale = ( From 3ab009ca8d22ef94b4bcf8c4c10c9a8347bab5c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 13:51:37 +0000 Subject: [PATCH 49/64] Edit model configs for multi-speaker --- TTS/tts/configs/fast_pitch_config.py | 27 +++++++++++++++++++++++- TTS/tts/configs/fast_speech_config.py | 25 ++++++++++++++++++++++ TTS/tts/configs/speedy_speech_config.py | 28 +++++++++++++++++++++++-- 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/TTS/tts/configs/fast_pitch_config.py b/TTS/tts/configs/fast_pitch_config.py index 668ea227..8f063102 100644 --- a/TTS/tts/configs/fast_pitch_config.py +++ b/TTS/tts/configs/fast_pitch_config.py @@ -11,7 +11,7 @@ class FastPitchConfig(BaseTTSConfig): Example: - >>> from TTS.tts.configs import FastPitchConfig + >>> from TTS.tts.configs.fast_pitch_config import FastPitchConfig >>> config = FastPitchConfig() Args: @@ -30,6 +30,10 @@ class FastPitchConfig(BaseTTSConfig): Activation Normalization that pre-computes normalization stats at the beginning and use the same values for the rest. Defaults to 10. + speakers_file (str): + Path to the file containing the list of speakers. Needed at inference for loading matching speaker ids to + speaker names. Defaults to `None`. + use_speaker_embedding (bool): enable / disable using speaker embeddings for multi-speaker models. If set True, the model is in the multi-speaker mode. Defaults to False. @@ -105,6 +109,8 @@ class FastPitchConfig(BaseTTSConfig): model_args: ForwardTTSArgs = ForwardTTSArgs() # multi-speaker settings + num_speakers: int = 0 + speakers_file: str = None use_speaker_embedding: bool = False use_d_vector_file: bool = False d_vector_file: str = False @@ -149,3 +155,22 @@ class FastPitchConfig(BaseTTSConfig): "Prior to November 22, 1963.", ] ) + + def __post_init__(self): + # Pass multi-speaker parameters to the model args as `model.init_multispeaker()` looks for it there. + if self.num_speakers > 0: + self.model_args.num_speakers = self.num_speakers + + # speaker embedding settings + if self.use_speaker_embedding: + self.model_args.use_speaker_embedding = True + if self.speakers_file: + self.model_args.speakers_file = self.speakers_file + + # d-vector settings + if self.use_d_vector_file: + self.model_args.use_d_vector_file = True + if self.d_vector_dim is not None and self.d_vector_dim > 0: + self.model_args.d_vector_dim = self.d_vector_dim + if self.d_vector_file: + self.model_args.d_vector_file = self.d_vector_file diff --git a/TTS/tts/configs/fast_speech_config.py b/TTS/tts/configs/fast_speech_config.py index bba47bb3..682a69bb 100644 --- a/TTS/tts/configs/fast_speech_config.py +++ b/TTS/tts/configs/fast_speech_config.py @@ -30,6 +30,11 @@ class FastSpeechConfig(BaseTTSConfig): Activation Normalization that pre-computes normalization stats at the beginning and use the same values for the rest. Defaults to 10. + speakers_file (str): + Path to the file containing the list of speakers. Needed at inference for loading matching speaker ids to + speaker names. Defaults to `None`. + + use_speaker_embedding (bool): enable / disable using speaker embeddings for multi-speaker models. If set True, the model is in the multi-speaker mode. Defaults to False. @@ -105,6 +110,7 @@ class FastSpeechConfig(BaseTTSConfig): model_args: ForwardTTSArgs = ForwardTTSArgs(use_pitch=False) # multi-speaker settings + speakers_file: str = None use_speaker_embedding: bool = False use_d_vector_file: bool = False d_vector_file: str = False @@ -149,3 +155,22 @@ class FastSpeechConfig(BaseTTSConfig): "Prior to November 22, 1963.", ] ) + + def __post_init__(self): + # Pass multi-speaker parameters to the model args as `model.init_multispeaker()` looks for it there. + if self.num_speakers > 0: + self.model_args.num_speakers = self.num_speakers + + # speaker embedding settings + if self.use_speaker_embedding: + self.model_args.use_speaker_embedding = True + if self.speakers_file: + self.model_args.speakers_file = self.speakers_file + + # d-vector settings + if self.use_d_vector_file: + self.model_args.use_d_vector_file = True + if self.d_vector_dim is not None and self.d_vector_dim > 0: + self.model_args.d_vector_dim = self.d_vector_dim + if self.d_vector_file: + self.model_args.d_vector_file = self.d_vector_file diff --git a/TTS/tts/configs/speedy_speech_config.py b/TTS/tts/configs/speedy_speech_config.py index ba561c89..6007b741 100644 --- a/TTS/tts/configs/speedy_speech_config.py +++ b/TTS/tts/configs/speedy_speech_config.py @@ -1,8 +1,8 @@ from dataclasses import dataclass, field from typing import List -from TTS.tts.configs.shared_configs import BaseTTSConfig from TTS.tts.models.forward_tts import ForwardTTSArgs +from TTS.tts.configs.shared_configs import BaseTTSConfig @dataclass @@ -30,6 +30,10 @@ class SpeedySpeechConfig(BaseTTSConfig): Activation Normalization that pre-computes normalization stats at the beginning and use the same values for the rest. Defaults to 10. + speakers_file (str): + Path to the file containing the list of speakers. Needed at inference for loading matching speaker ids to + speaker names. Defaults to `None`. + use_speaker_embedding (bool): enable / disable using speaker embeddings for multi-speaker models. If set True, the model is in the multi-speaker mode. Defaults to False. @@ -117,12 +121,13 @@ class SpeedySpeechConfig(BaseTTSConfig): }, out_channels=80, hidden_channels=128, - num_speakers=0, positional_encoding=True, detach_duration_predictor=True, ) # multi-speaker settings + num_speakers: int = 0 + speakers_file: str = None use_speaker_embedding: bool = False use_d_vector_file: bool = False d_vector_file: str = False @@ -166,3 +171,22 @@ class SpeedySpeechConfig(BaseTTSConfig): "Prior to November 22, 1963.", ] ) + + def __post_init__(self): + # Pass multi-speaker parameters to the model args as `model.init_multispeaker()` looks for it there. + if self.num_speakers > 0: + self.model_args.num_speakers = self.num_speakers + + # speaker embedding settings + if self.use_speaker_embedding: + self.model_args.use_speaker_embedding = True + if self.speakers_file: + self.model_args.speakers_file = self.speakers_file + + # d-vector settings + if self.use_d_vector_file: + self.model_args.use_d_vector_file = True + if self.d_vector_dim is not None and self.d_vector_dim > 0: + self.model_args.d_vector_dim = self.d_vector_dim + if self.d_vector_file: + self.model_args.d_vector_file = self.d_vector_file From 1987aaaaed09870c845b43760884d93fc6a2877c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 13:53:25 +0000 Subject: [PATCH 50/64] Update d-vector reshape in synthesizer --- TTS/utils/synthesizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py index 6d394378..af07419f 100644 --- a/TTS/utils/synthesizer.py +++ b/TTS/utils/synthesizer.py @@ -198,6 +198,7 @@ class Synthesizer(object): if self.tts_config.use_d_vector_file: # get the speaker embedding from the saved d_vectors. speaker_embedding = self.tts_model.speaker_manager.get_d_vectors_by_speaker(speaker_idx)[0] + speaker_embedding = np.array(speaker_embedding)[None, :] # [1 x embedding_dim] else: # get speaker idx from the speaker name speaker_id = self.tts_model.speaker_manager.speaker_ids[speaker_idx] From aea90e250124899fa4f1c04298b595550078eaa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 13:53:45 +0000 Subject: [PATCH 51/64] Comment synthesis.py --- TTS/tts/utils/synthesis.py | 48 +++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/TTS/tts/utils/synthesis.py b/TTS/tts/utils/synthesis.py index ca15f4cc..5185139e 100644 --- a/TTS/tts/utils/synthesis.py +++ b/TTS/tts/utils/synthesis.py @@ -172,7 +172,7 @@ def speaker_id_to_torch(speaker_id, cuda=False): def embedding_to_torch(d_vector, cuda=False): if d_vector is not None: d_vector = np.asarray(d_vector) - d_vector = torch.from_numpy(d_vector).unsqueeze(0).type(torch.FloatTensor) + d_vector = torch.from_numpy(d_vector).type(torch.FloatTensor) if cuda: return d_vector.cuda() return d_vector @@ -210,20 +210,42 @@ def synthesis( d_vector=None, backend="torch", ): - """Synthesize voice for the given text. + """Synthesize voice for the given text using Griffin-Lim vocoder or just compute output features to be passed to + the vocoder model. Args: - model (TTS.tts.models): model to synthesize. - text (str): target text - CONFIG (dict): config dictionary to be loaded from config.json. - use_cuda (bool): enable cuda. - ap (TTS.tts.utils.audio.AudioProcessor): audio processor to process - model outputs. - speaker_id (int): id of speaker - style_wav (str | Dict[str, float]): Uses for style embedding of GST. - enable_eos_bos_chars (bool): enable special chars for end of sentence and start of sentence. - do_trim_silence (bool): trim silence after synthesis. - backend (str): tf or torch + model (TTS.tts.models): + The TTS model to synthesize audio with. + + text (str): + The input text to convert to speech. + + CONFIG (Coqpit): + Model configuration. + + use_cuda (bool): + Enable/disable CUDA. + + ap (TTS.tts.utils.audio.AudioProcessor): + The audio processor for extracting features and pre/post-processing audio. + + speaker_id (int): + Speaker ID passed to the speaker embedding layer in multi-speaker model. Defaults to None. + + style_wav (str | Dict[str, float]): + Path or tensor to/of a waveform used for computing the style embedding. Defaults to None. + + enable_eos_bos_chars (bool): + enable special chars for end of sentence and start of sentence. Defaults to False. + + do_trim_silence (bool): + trim silence after synthesis. Defaults to False. + + d_vector (torch.Tensor): + d-vector for multi-speaker models in share :math:`[1, D]`. Defaults to None. + + backend (str): + tf or torch. Defaults to "torch". """ # GST processing style_mel = None From 3cb07fb6b59c67604e33ce1bd49e6316f22391f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 13:54:39 +0000 Subject: [PATCH 52/64] Fix SpeakerManager init with data items --- TTS/tts/utils/speakers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/TTS/tts/utils/speakers.py b/TTS/tts/utils/speakers.py index e58f0cfb..13696a20 100644 --- a/TTS/tts/utils/speakers.py +++ b/TTS/tts/utils/speakers.py @@ -63,7 +63,6 @@ class SpeakerManager: use_cuda: bool = False, ): - self.data_items = [] self.d_vectors = {} self.speaker_ids = {} self.clip_ids = [] @@ -72,7 +71,7 @@ class SpeakerManager: self.use_cuda = use_cuda if data_items: - self.speaker_ids, self.speaker_names, _ = self.parse_speakers_from_data(self.data_items) + self.speaker_ids, _ = self.parse_speakers_from_data(data_items) if d_vectors_file_path: self.set_d_vectors_from_file(d_vectors_file_path) From 82fed4add285f166d57e96ea85bfd692cb40f34f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:05:51 +0000 Subject: [PATCH 53/64] Make style --- TTS/tts/configs/speedy_speech_config.py | 2 +- TTS/tts/datasets/dataset.py | 1 + TTS/tts/models/align_tts.py | 2 +- TTS/tts/models/base_tacotron.py | 2 +- TTS/tts/models/tacotron.py | 4 ++-- TTS/tts/models/tacotron2.py | 3 ++- TTS/tts/models/vits.py | 2 +- 7 files changed, 9 insertions(+), 7 deletions(-) diff --git a/TTS/tts/configs/speedy_speech_config.py b/TTS/tts/configs/speedy_speech_config.py index 6007b741..c4bfa991 100644 --- a/TTS/tts/configs/speedy_speech_config.py +++ b/TTS/tts/configs/speedy_speech_config.py @@ -1,8 +1,8 @@ from dataclasses import dataclass, field from typing import List -from TTS.tts.models.forward_tts import ForwardTTSArgs from TTS.tts.configs.shared_configs import BaseTTSConfig +from TTS.tts.models.forward_tts import ForwardTTSArgs @dataclass diff --git a/TTS/tts/datasets/dataset.py b/TTS/tts/datasets/dataset.py index bfe0d778..04314bab 100644 --- a/TTS/tts/datasets/dataset.py +++ b/TTS/tts/datasets/dataset.py @@ -419,6 +419,7 @@ class TTSDataset(Dataset): d_vectors = [self.d_vector_mapping[w]["embedding"] for w in wav_files_names] else: d_vectors = None + # get numerical speaker ids from speaker names if self.speaker_id_mapping: speaker_ids = [self.speaker_id_mapping[sn] for sn in batch["speaker_name"]] diff --git a/TTS/tts/models/align_tts.py b/TTS/tts/models/align_tts.py index 8e001630..eef06f12 100644 --- a/TTS/tts/models/align_tts.py +++ b/TTS/tts/models/align_tts.py @@ -100,7 +100,7 @@ class AlignTTS(BaseTTS): # pylint: disable=dangerous-default-value - def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager = None): super().__init__(config) self.speaker_manager = speaker_manager diff --git a/TTS/tts/models/base_tacotron.py b/TTS/tts/models/base_tacotron.py index d0cc81cc..ca8f3bb9 100644 --- a/TTS/tts/models/base_tacotron.py +++ b/TTS/tts/models/base_tacotron.py @@ -1,6 +1,6 @@ import copy from abc import abstractmethod -from typing import Dict, List +from typing import Dict import torch from coqpit import Coqpit diff --git a/TTS/tts/models/tacotron.py b/TTS/tts/models/tacotron.py index 9ed5dc91..4e46d252 100644 --- a/TTS/tts/models/tacotron.py +++ b/TTS/tts/models/tacotron.py @@ -258,10 +258,10 @@ class Tacotron(BaseTacotron): stop_targets.float(), stop_target_lengths, mel_lengths, - outputs["decoder_outputs_backward"].float(), + None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), outputs["alignments"].float(), alignment_lengths, - outputs["alignments_backward"].float(), + None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), text_lengths, ) diff --git a/TTS/tts/models/tacotron2.py b/TTS/tts/models/tacotron2.py index 4307c90e..ead3bf2b 100644 --- a/TTS/tts/models/tacotron2.py +++ b/TTS/tts/models/tacotron2.py @@ -1,6 +1,7 @@ # coding: utf-8 from typing import Dict + import torch from coqpit import Coqpit from torch import nn @@ -237,7 +238,7 @@ class Tacotron2(BaseTacotron): } return outputs - def train_step(self, batch:Dict, criterion:torch.nn.Module): + def train_step(self, batch: Dict, criterion: torch.nn.Module): """A single training step. Forward pass and loss computation. Args: diff --git a/TTS/tts/models/vits.py b/TTS/tts/models/vits.py index 3b7df353..a449a575 100644 --- a/TTS/tts/models/vits.py +++ b/TTS/tts/models/vits.py @@ -216,7 +216,7 @@ class Vits(BaseTTS): # pylint: disable=dangerous-default-value - def __init__(self, config: Coqpit, speaker_manager: SpeakerManager=None): + def __init__(self, config: Coqpit, speaker_manager: SpeakerManager = None): super().__init__(config) From e62d3c5cf7031e8f8113aca68f35508db8453f55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:08:13 +0000 Subject: [PATCH 54/64] Use absolute imports for tts configs and models --- TTS/config/__init__.py | 3 ++- TTS/tts/configs/__init__.py | 22 +++++++++---------- tests/data_tests/test_loader.py | 2 +- tests/tts_tests/test_align_tts_train.py | 2 +- tests/tts_tests/test_fast_pitch_train.py | 2 +- tests/tts_tests/test_glow_tts.py | 2 +- tests/tts_tests/test_glow_tts_train.py | 2 +- tests/tts_tests/test_speedy_speech_train.py | 2 +- .../test_tacotron2_d-vectors_train.py | 4 ++-- tests/tts_tests/test_tacotron2_model.py | 4 ++-- .../test_tacotron2_speaker_emb_train.py | 2 +- tests/tts_tests/test_tacotron2_tf_model.py | 2 +- tests/tts_tests/test_tacotron2_train.py | 2 +- .../test_tacotron2_train_fsspec_path.py | 2 +- tests/tts_tests/test_tacotron_model.py | 3 ++- tests/tts_tests/test_tacotron_train.py | 2 +- tests/tts_tests/test_vits_train.py | 2 +- 17 files changed, 31 insertions(+), 29 deletions(-) diff --git a/TTS/config/__init__.py b/TTS/config/__init__.py index ea98f431..f626163f 100644 --- a/TTS/config/__init__.py +++ b/TTS/config/__init__.py @@ -36,10 +36,11 @@ def register_config(model_name: str) -> Coqpit: Coqpit: config class. """ config_class = None + config_name = model_name + "_config" paths = ["TTS.tts.configs", "TTS.vocoder.configs", "TTS.speaker_encoder"] for path in paths: try: - config_class = find_module(path, model_name + "_config") + config_class = find_module(path, config_name) except ModuleNotFoundError: pass if config_class is None: diff --git a/TTS/tts/configs/__init__.py b/TTS/tts/configs/__init__.py index 5ad4fe8c..3146ac1c 100644 --- a/TTS/tts/configs/__init__.py +++ b/TTS/tts/configs/__init__.py @@ -3,15 +3,15 @@ import os from inspect import isclass # import all files under configs/ -configs_dir = os.path.dirname(__file__) -for file in os.listdir(configs_dir): - path = os.path.join(configs_dir, file) - if not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)): - config_name = file[: file.find(".py")] if file.endswith(".py") else file - module = importlib.import_module("TTS.tts.configs." + config_name) - for attribute_name in dir(module): - attribute = getattr(module, attribute_name) +# configs_dir = os.path.dirname(__file__) +# for file in os.listdir(configs_dir): +# path = os.path.join(configs_dir, file) +# if not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)): +# config_name = file[: file.find(".py")] if file.endswith(".py") else file +# module = importlib.import_module("TTS.tts.configs." + config_name) +# for attribute_name in dir(module): +# attribute = getattr(module, attribute_name) - if isclass(attribute): - # Add the class to this package's variables - globals()[attribute_name] = attribute +# if isclass(attribute): +# # Add the class to this package's variables +# globals()[attribute_name] = attribute diff --git a/tests/data_tests/test_loader.py b/tests/data_tests/test_loader.py index 18066ef3..8a20c261 100644 --- a/tests/data_tests/test_loader.py +++ b/tests/data_tests/test_loader.py @@ -7,7 +7,7 @@ import torch from torch.utils.data import DataLoader from tests import get_tests_output_path -from TTS.tts.configs import BaseTTSConfig +from TTS.tts.configs.shared_configs import BaseTTSConfig from TTS.tts.datasets import TTSDataset from TTS.tts.datasets.formatters import ljspeech from TTS.utils.audio import AudioProcessor diff --git a/tests/tts_tests/test_align_tts_train.py b/tests/tts_tests/test_align_tts_train.py index f04a2358..f5d60d7c 100644 --- a/tests/tts_tests/test_align_tts_train.py +++ b/tests/tts_tests/test_align_tts_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import AlignTTSConfig +from TTS.tts.configs.align_tts_config import AlignTTSConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_fast_pitch_train.py b/tests/tts_tests/test_fast_pitch_train.py index 89176ac9..71ba8b25 100644 --- a/tests/tts_tests/test_fast_pitch_train.py +++ b/tests/tts_tests/test_fast_pitch_train.py @@ -4,7 +4,7 @@ import shutil from tests import get_device_id, get_tests_output_path, run_cli from TTS.config.shared_configs import BaseAudioConfig -from TTS.tts.configs import FastPitchConfig +from TTS.tts.configs.fast_pitch_config import FastPitchConfig config_path = os.path.join(get_tests_output_path(), "test_fast_pitch_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_glow_tts.py b/tests/tts_tests/test_glow_tts.py index e139562c..82d0ec3b 100644 --- a/tests/tts_tests/test_glow_tts.py +++ b/tests/tts_tests/test_glow_tts.py @@ -6,7 +6,7 @@ import torch from torch import optim from tests import get_tests_input_path -from TTS.tts.configs import GlowTTSConfig +from TTS.tts.configs.glow_tts_config import GlowTTSConfig from TTS.tts.layers.losses import GlowTTSLoss from TTS.tts.models.glow_tts import GlowTTS from TTS.utils.audio import AudioProcessor diff --git a/tests/tts_tests/test_glow_tts_train.py b/tests/tts_tests/test_glow_tts_train.py index 7da4fd33..e5901076 100644 --- a/tests/tts_tests/test_glow_tts_train.py +++ b/tests/tts_tests/test_glow_tts_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import GlowTTSConfig +from TTS.tts.configs.glow_tts_config import GlowTTSConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_speedy_speech_train.py b/tests/tts_tests/test_speedy_speech_train.py index a181ac24..7d7ecc7c 100644 --- a/tests/tts_tests/test_speedy_speech_train.py +++ b/tests/tts_tests/test_speedy_speech_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import SpeedySpeechConfig +from TTS.tts.configs.speedy_speech_config import SpeedySpeechConfig config_path = os.path.join(get_tests_output_path(), "test_speedy_speech_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_tacotron2_d-vectors_train.py b/tests/tts_tests/test_tacotron2_d-vectors_train.py index 1a8d78bf..c817badc 100644 --- a/tests/tts_tests/test_tacotron2_d-vectors_train.py +++ b/tests/tts_tests/test_tacotron2_d-vectors_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import Tacotron2Config +from TTS.tts.configs.tacotron2_config import Tacotron2Config config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") @@ -23,7 +23,7 @@ config = Tacotron2Config( epochs=1, print_step=1, print_eval=True, - use_speaker_embedding=True, + use_speaker_embedding=False, use_d_vector_file=True, test_sentences=[ "Be a voice, not an echo.", diff --git a/tests/tts_tests/test_tacotron2_model.py b/tests/tts_tests/test_tacotron2_model.py index 65d2bd9d..df184a6a 100644 --- a/tests/tts_tests/test_tacotron2_model.py +++ b/tests/tts_tests/test_tacotron2_model.py @@ -6,8 +6,8 @@ import torch from torch import nn, optim from tests import get_tests_input_path -from TTS.tts.configs import Tacotron2Config from TTS.tts.configs.shared_configs import GSTConfig +from TTS.tts.configs.tacotron2_config import Tacotron2Config from TTS.tts.layers.losses import MSELossMasked from TTS.tts.models.tacotron2 import Tacotron2 from TTS.utils.audio import AudioProcessor @@ -114,7 +114,7 @@ class MultiSpeakerTacotronTrainTest(unittest.TestCase): assert (param - param_ref).sum() == 0, param count += 1 optimizer = optim.Adam(model.parameters(), lr=config.lr) - for i in range(5): + for _ in range(5): outputs = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, aux_input={"speaker_ids": speaker_ids} ) diff --git a/tests/tts_tests/test_tacotron2_speaker_emb_train.py b/tests/tts_tests/test_tacotron2_speaker_emb_train.py index 41d694f6..095016d8 100644 --- a/tests/tts_tests/test_tacotron2_speaker_emb_train.py +++ b/tests/tts_tests/test_tacotron2_speaker_emb_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import Tacotron2Config +from TTS.tts.configs.tacotron2_config import Tacotron2Config config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_tacotron2_tf_model.py b/tests/tts_tests/test_tacotron2_tf_model.py index 515a6834..fb1efcde 100644 --- a/tests/tts_tests/test_tacotron2_tf_model.py +++ b/tests/tts_tests/test_tacotron2_tf_model.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow as tf import torch -from TTS.tts.configs import Tacotron2Config +from TTS.tts.configs.tacotron2_config import Tacotron2Config from TTS.tts.tf.models.tacotron2 import Tacotron2 from TTS.tts.tf.utils.tflite import convert_tacotron2_to_tflite, load_tflite_model diff --git a/tests/tts_tests/test_tacotron2_train.py b/tests/tts_tests/test_tacotron2_train.py index e947a54a..4f37ef89 100644 --- a/tests/tts_tests/test_tacotron2_train.py +++ b/tests/tts_tests/test_tacotron2_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import Tacotron2Config +from TTS.tts.configs.tacotron2_config import Tacotron2Config config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_tacotron2_train_fsspec_path.py b/tests/tts_tests/test_tacotron2_train_fsspec_path.py index 9e4ee102..5d14a983 100644 --- a/tests/tts_tests/test_tacotron2_train_fsspec_path.py +++ b/tests/tts_tests/test_tacotron2_train_fsspec_path.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import Tacotron2Config +from TTS.tts.configs.tacotron2_config import Tacotron2Config config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_tacotron_model.py b/tests/tts_tests/test_tacotron_model.py index 3f570276..6e0e712b 100644 --- a/tests/tts_tests/test_tacotron_model.py +++ b/tests/tts_tests/test_tacotron_model.py @@ -6,7 +6,8 @@ import torch from torch import nn, optim from tests import get_tests_input_path -from TTS.tts.configs import GSTConfig, TacotronConfig +from TTS.tts.configs.shared_configs import GSTConfig +from TTS.tts.configs.tacotron_config import TacotronConfig from TTS.tts.layers.losses import L1LossMasked from TTS.tts.models.tacotron import Tacotron from TTS.utils.audio import AudioProcessor diff --git a/tests/tts_tests/test_tacotron_train.py b/tests/tts_tests/test_tacotron_train.py index 0c35ee28..68071c66 100644 --- a/tests/tts_tests/test_tacotron_train.py +++ b/tests/tts_tests/test_tacotron_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import TacotronConfig +from TTS.tts.configs.tacotron_config import TacotronConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") diff --git a/tests/tts_tests/test_vits_train.py b/tests/tts_tests/test_vits_train.py index db9d2fc1..6398955e 100644 --- a/tests/tts_tests/test_vits_train.py +++ b/tests/tts_tests/test_vits_train.py @@ -3,7 +3,7 @@ import os import shutil from tests import get_device_id, get_tests_output_path, run_cli -from TTS.tts.configs import VitsConfig +from TTS.tts.configs.vits_config import VitsConfig config_path = os.path.join(get_tests_output_path(), "test_model_config.json") output_path = os.path.join(get_tests_output_path(), "train_outputs") From 2b7d15938344c1878ca30956055486901f603506 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:12:22 +0000 Subject: [PATCH 55/64] Update BaseTTS for multi-speaker training --- TTS/tts/models/base_tts.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/TTS/tts/models/base_tts.py b/TTS/tts/models/base_tts.py index cd84bf08..b77c1e23 100644 --- a/TTS/tts/models/base_tts.py +++ b/TTS/tts/models/base_tts.py @@ -80,14 +80,12 @@ class BaseTTS(BaseModel): Args: config (Coqpit): Model configuration. """ - # init speaker manager - if self.speaker_manager is None and (config.use_speaker_embedding or config.use_d_vector_file): - raise ValueError( - " > SpeakerManager is not provided. You must provide the SpeakerManager before initializing a multi-speaker model." - ) # set number of speakers if self.speaker_manager is not None: self.num_speakers = self.speaker_manager.num_speakers + elif hasattr(config, "num_speakers"): + self.num_speakers = config.num_speakers + # set ultimate speaker embedding size if config.use_speaker_embedding or config.use_d_vector_file: self.embedded_speaker_dim = ( @@ -189,13 +187,9 @@ class BaseTTS(BaseModel): ap = assets["audio_processor"] # setup multi-speaker attributes - if hasattr(self, "speaker_manager"): + if hasattr(self, "speaker_manager") and self.speaker_manager is not None: speaker_id_mapping = self.speaker_manager.speaker_ids if config.use_speaker_embedding else None - d_vector_mapping = ( - self.speaker_manager.d_vectors - if config.use_speaker_embedding and config.use_d_vector_file - else None - ) + d_vector_mapping = self.speaker_manager.d_vectors if config.use_d_vector_file else None else: speaker_id_mapping = None d_vector_mapping = None @@ -228,9 +222,7 @@ class BaseTTS(BaseModel): use_noise_augment=not is_eval, verbose=verbose, speaker_id_mapping=speaker_id_mapping, - d_vector_mapping=d_vector_mapping - if config.use_speaker_embedding and config.use_d_vector_file - else None, + d_vector_mapping=d_vector_mapping if config.use_d_vector_file else None, ) # pre-compute phonemes @@ -292,13 +284,17 @@ class BaseTTS(BaseModel): def _get_test_aux_input( self, ) -> Dict: + + d_vector = None + if self.config.use_d_vector_file: + d_vector = [self.speaker_manager.d_vectors[name]["embedding"] for name in self.speaker_manager.d_vectors] + d_vector = (random.sample(sorted(d_vector), 1),) + aux_inputs = { "speaker_id": None if not self.config.use_speaker_embedding else random.sample(sorted(self.speaker_manager.speaker_ids.values()), 1), - "d_vector": None - if not self.config.use_d_vector_file - else random.samples(sorted(self.speaker_manager.d_vectors.values()), 1), + "d_vector": d_vector, "style_wav": None, # TODO: handle GST style input } return aux_inputs From a409e0f8f8b39daa65d470a6ff1132c301c2b23c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:15:41 +0000 Subject: [PATCH 56/64] Update train_tts for multi-speaker --- TTS/bin/train_tts.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/TTS/bin/train_tts.py b/TTS/bin/train_tts.py index cfd092f1..e28e9dec 100644 --- a/TTS/bin/train_tts.py +++ b/TTS/bin/train_tts.py @@ -4,6 +4,7 @@ from TTS.config import load_config, register_config from TTS.trainer import Trainer, TrainingArgs from TTS.tts.datasets import load_tts_samples from TTS.tts.models import setup_model +from TTS.tts.utils.speakers import SpeakerManager from TTS.utils.audio import AudioProcessor @@ -43,8 +44,16 @@ def main(): # setup audio processor ap = AudioProcessor(**config.audio) + # init speaker manager + if config.use_speaker_embedding: + speaker_manager = SpeakerManager(data_items=train_samples + eval_samples) + elif config.use_d_vector_file: + speaker_manager = SpeakerManager(d_vectors_file_path=config.d_vector_file) + else: + speaker_manager = None + # init the model from config - model = setup_model(config) + model = setup_model(config, speaker_manager) # init the trainer and 🚀 trainer = Trainer( From 70e4d0e52445b6e27554777435a84d423bcf3501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:16:50 +0000 Subject: [PATCH 57/64] Fix grad_norm handling --- TTS/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index 40d1ab6f..9fcd77a7 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -647,7 +647,7 @@ class Trainer: optimizer.step() # pytorch skips the step when the norm is 0. So ignore the norm value when it is NaN - if isinstance(grad_norm ,torch.Tensor) and (torch.isnan(grad_norm) or torch.isinf(grad_norm)): + if isinstance(grad_norm, torch.Tensor) and (torch.isnan(grad_norm) or torch.isinf(grad_norm)): grad_norm = 0 step_time = time.time() - step_start_time From 71180c796227b9bcad75a43c1bd9cad53f99d1e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:19:19 +0000 Subject: [PATCH 58/64] =?UTF-8?q?VCTK=20recipes=20(finally=20=F0=9F=9A=80)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- recipes/ljspeech/align_tts/train_aligntts.py | 2 +- .../ljspeech/fast_pitch/train_fast_pitch.py | 4 +- .../ljspeech/fast_speech/train_fast_speech.py | 2 +- recipes/ljspeech/glow_tts/train_glowtts.py | 3 +- .../speedy_speech/train_speedy_speech.py | 2 +- recipes/vctk/download_vctk.sh | 12 +++ recipes/vctk/fast_pitch/train_fast_pitch.py | 80 +++++++++++++++++ recipes/vctk/fast_speech/train_fast_speech.py | 80 +++++++++++++++++ recipes/vctk/glow_tts/train_glow_tts.py | 62 +++++++++++++ .../vctk/speedy_speech/train_speedy_speech.py | 80 +++++++++++++++++ .../vctk/tacotron-DDC/train_tacotron-DDC.py | 80 +++++++++++++++++ .../vctk/tacotron2-DDC/train_tacotron2-ddc.py | 87 +++++++++++++++++++ recipes/vctk/vits/train_vits.py | 86 ++++++++++++++++++ 13 files changed, 574 insertions(+), 6 deletions(-) create mode 100644 recipes/vctk/download_vctk.sh create mode 100644 recipes/vctk/fast_pitch/train_fast_pitch.py create mode 100644 recipes/vctk/fast_speech/train_fast_speech.py create mode 100644 recipes/vctk/glow_tts/train_glow_tts.py create mode 100644 recipes/vctk/speedy_speech/train_speedy_speech.py create mode 100644 recipes/vctk/tacotron-DDC/train_tacotron-DDC.py create mode 100644 recipes/vctk/tacotron2-DDC/train_tacotron2-ddc.py create mode 100644 recipes/vctk/vits/train_vits.py diff --git a/recipes/ljspeech/align_tts/train_aligntts.py b/recipes/ljspeech/align_tts/train_aligntts.py index 76409374..68b67d66 100644 --- a/recipes/ljspeech/align_tts/train_aligntts.py +++ b/recipes/ljspeech/align_tts/train_aligntts.py @@ -1,7 +1,7 @@ import os from TTS.trainer import Trainer, TrainingArgs -from TTS.tts.configs import AlignTTSConfig, BaseDatasetConfig +from TTS.tts.configs.align_tts_config import AlignTTSConfig, BaseDatasetConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.align_tts import AlignTTS from TTS.utils.audio import AudioProcessor diff --git a/recipes/ljspeech/fast_pitch/train_fast_pitch.py b/recipes/ljspeech/fast_pitch/train_fast_pitch.py index fead67a0..0a4a965b 100644 --- a/recipes/ljspeech/fast_pitch/train_fast_pitch.py +++ b/recipes/ljspeech/fast_pitch/train_fast_pitch.py @@ -1,8 +1,8 @@ import os -from TTS.config import BaseAudioConfig, BaseDatasetConfig +from TTS.config.shared_configs import BaseAudioConfig, BaseDatasetConfig from TTS.trainer import Trainer, TrainingArgs -from TTS.tts.configs import FastPitchConfig +from TTS.tts.configs.fast_pitch_config import FastPitchConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.forward_tts import ForwardTTS from TTS.utils.audio import AudioProcessor diff --git a/recipes/ljspeech/fast_speech/train_fast_speech.py b/recipes/ljspeech/fast_speech/train_fast_speech.py index 56557c26..a71da94b 100644 --- a/recipes/ljspeech/fast_speech/train_fast_speech.py +++ b/recipes/ljspeech/fast_speech/train_fast_speech.py @@ -2,7 +2,7 @@ import os from TTS.config import BaseAudioConfig, BaseDatasetConfig from TTS.trainer import Trainer, TrainingArgs -from TTS.tts.configs import FastSpeechConfig +from TTS.tts.configs.fast_speech_config import FastSpeechConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.forward_tts import ForwardTTS from TTS.utils.audio import AudioProcessor diff --git a/recipes/ljspeech/glow_tts/train_glowtts.py b/recipes/ljspeech/glow_tts/train_glowtts.py index 29077eeb..6cfa3878 100644 --- a/recipes/ljspeech/glow_tts/train_glowtts.py +++ b/recipes/ljspeech/glow_tts/train_glowtts.py @@ -1,7 +1,8 @@ import os from TTS.trainer import Trainer, TrainingArgs -from TTS.tts.configs import BaseDatasetConfig, GlowTTSConfig +from TTS.tts.configs.glow_tts_config import GlowTTSConfig +from TTS.tts.configs.shared_configs import BaseDatasetConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.glow_tts import GlowTTS from TTS.utils.audio import AudioProcessor diff --git a/recipes/ljspeech/speedy_speech/train_speedy_speech.py b/recipes/ljspeech/speedy_speech/train_speedy_speech.py index 974823ac..6b9683af 100644 --- a/recipes/ljspeech/speedy_speech/train_speedy_speech.py +++ b/recipes/ljspeech/speedy_speech/train_speedy_speech.py @@ -2,7 +2,7 @@ import os from TTS.config import BaseAudioConfig, BaseDatasetConfig from TTS.trainer import Trainer, TrainingArgs -from TTS.tts.configs import SpeedySpeechConfig +from TTS.tts.configs.speedy_speech_config import SpeedySpeechConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.forward_tts import ForwardTTS from TTS.utils.audio import AudioProcessor diff --git a/recipes/vctk/download_vctk.sh b/recipes/vctk/download_vctk.sh new file mode 100644 index 00000000..c0cea743 --- /dev/null +++ b/recipes/vctk/download_vctk.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# take the scripts's parent's directory to prefix all the output paths. +RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +echo $RUN_DIR +# download LJSpeech dataset +wget https://datashare.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip -O VCTK-Corpus-0.92.zip +# extract +mkdir VCTK +unzip VCTK-Corpus-0.92 -d VCTK +# create train-val splits +mv VCTK $RUN_DIR/recipes/vctk/ +rm VCTK-Corpus-0.92.zip diff --git a/recipes/vctk/fast_pitch/train_fast_pitch.py b/recipes/vctk/fast_pitch/train_fast_pitch.py new file mode 100644 index 00000000..f40587e0 --- /dev/null +++ b/recipes/vctk/fast_pitch/train_fast_pitch.py @@ -0,0 +1,80 @@ +import os + +from TTS.config import BaseAudioConfig, BaseDatasetConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.fast_pitch_config import FastPitchConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=23.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = FastPitchConfig( + run_name="fast_pitch_ljspeech", + audio=audio_config, + batch_size=32, + eval_batch_size=16, + num_loader_workers=8, + num_eval_loader_workers=4, + compute_input_seq_cache=True, + compute_f0=True, + f0_cache_path=os.path.join(output_path, "f0_cache"), + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=True, + use_espeak_phonemes=False, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=50, + print_eval=False, + mixed_precision=False, + sort_by_audio_len=True, + max_seq_len=500000, + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, +) + +# init audio processor +ap = AudioProcessor(**config.audio) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it maps speaker-id to speaker-name in the model and data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.model_args.num_speakers = speaker_manager.num_speakers + +# init model +model = ForwardTTS(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/fast_speech/train_fast_speech.py b/recipes/vctk/fast_speech/train_fast_speech.py new file mode 100644 index 00000000..b2988809 --- /dev/null +++ b/recipes/vctk/fast_speech/train_fast_speech.py @@ -0,0 +1,80 @@ +import os + +from TTS.config import BaseAudioConfig, BaseDatasetConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.fast_speech_config import FastSpeechConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=23.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = FastSpeechConfig( + run_name="fast_pitch_ljspeech", + audio=audio_config, + batch_size=32, + eval_batch_size=16, + num_loader_workers=8, + num_eval_loader_workers=4, + compute_input_seq_cache=True, + compute_f0=True, + f0_cache_path=os.path.join(output_path, "f0_cache"), + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=True, + use_espeak_phonemes=False, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=50, + print_eval=False, + mixed_precision=False, + sort_by_audio_len=True, + max_seq_len=500000, + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, +) + +# init audio processor +ap = AudioProcessor(**config.audio) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it maps speaker-id to speaker-name in the model and data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.model_args.num_speakers = speaker_manager.num_speakers + +# init model +model = ForwardTTS(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/glow_tts/train_glow_tts.py b/recipes/vctk/glow_tts/train_glow_tts.py new file mode 100644 index 00000000..da8872db --- /dev/null +++ b/recipes/vctk/glow_tts/train_glow_tts.py @@ -0,0 +1,62 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.glow_tts_config import GlowTTSConfig +from TTS.tts.configs.shared_configs import BaseDatasetConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.glow_tts import GlowTTS +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig(sample_rate=22050, do_trim_silence=True, trim_db=23.0) + +config = GlowTTSConfig( + batch_size=64, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=False, + mixed_precision=True, + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it maps speaker-id to speaker-name in the model and data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.num_speakers = speaker_manager.num_speakers + +# init model +model = GlowTTS(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/speedy_speech/train_speedy_speech.py b/recipes/vctk/speedy_speech/train_speedy_speech.py new file mode 100644 index 00000000..81f78d26 --- /dev/null +++ b/recipes/vctk/speedy_speech/train_speedy_speech.py @@ -0,0 +1,80 @@ +import os + +from TTS.config import BaseAudioConfig, BaseDatasetConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.speedy_speech_config import SpeedySpeechConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.forward_tts import ForwardTTS +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig( + sample_rate=22050, + do_trim_silence=True, + trim_db=23.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = SpeedySpeechConfig( + run_name="fast_pitch_ljspeech", + audio=audio_config, + batch_size=32, + eval_batch_size=16, + num_loader_workers=8, + num_eval_loader_workers=4, + compute_input_seq_cache=True, + compute_f0=True, + f0_cache_path=os.path.join(output_path, "f0_cache"), + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=True, + use_espeak_phonemes=False, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=50, + print_eval=False, + mixed_precision=False, + sort_by_audio_len=True, + max_seq_len=500000, + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, +) + +# init audio processor +ap = AudioProcessor(**config.audio) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it maps speaker-id to speaker-name in the model and data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.model_args.num_speakers = speaker_manager.num_speakers + +# init model +model = ForwardTTS(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/tacotron-DDC/train_tacotron-DDC.py b/recipes/vctk/tacotron-DDC/train_tacotron-DDC.py new file mode 100644 index 00000000..b0030f17 --- /dev/null +++ b/recipes/vctk/tacotron-DDC/train_tacotron-DDC.py @@ -0,0 +1,80 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.shared_configs import BaseDatasetConfig +from TTS.tts.configs.tacotron_config import TacotronConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.tacotron import Tacotron +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig( + sample_rate=22050, + resample=True, # Resample to 22050 Hz. It slows down training. Use `TTS/bin/resample.py` to pre-resample and set this False for faster training. + do_trim_silence=True, + trim_db=23.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + ref_level_db=20, + preemphasis=0.0, +) + +config = TacotronConfig( # This is the config that is saved for the future use + audio=audio_config, + batch_size=48, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + r=6, + gradual_training=[[0, 6, 48], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]], + double_decoder_consistency=True, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=False, + mixed_precision=True, + sort_by_audio_len=True, + min_seq_len=0, + max_seq_len=44000 * 10, # 44k is the original sampling rate before resampling, corresponds to 10 seconds of audio + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, # set this to enable multi-sepeaker training +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it mainly handles speaker-id to speaker-name for the model and the data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) + +# init model +model = Tacotron(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/tacotron2-DDC/train_tacotron2-ddc.py b/recipes/vctk/tacotron2-DDC/train_tacotron2-ddc.py new file mode 100644 index 00000000..346d650b --- /dev/null +++ b/recipes/vctk/tacotron2-DDC/train_tacotron2-ddc.py @@ -0,0 +1,87 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.shared_configs import BaseDatasetConfig +from TTS.tts.configs.tacotron2_config import Tacotron2Config +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.tacotron2 import Tacotron2 +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +audio_config = BaseAudioConfig( + sample_rate=22050, + resample=False, # Resample to 22050 Hz. It slows down training. Use `TTS/bin/resample.py` to pre-resample and set this False for faster training. + do_trim_silence=True, + trim_db=23.0, + signal_norm=False, + mel_fmin=0.0, + mel_fmax=8000, + spec_gain=1.0, + log_func="np.log", + preemphasis=0.0, +) + +config = Tacotron2Config( # This is the config that is saved for the future use + audio=audio_config, + batch_size=32, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + r=2, + # gradual_training=[[0, 6, 48], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]], + double_decoder_consistency=False, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=150, + print_eval=False, + mixed_precision=True, + sort_by_audio_len=True, + min_seq_len=14800, + max_seq_len=22050 * 10, # 44k is the original sampling rate before resampling, corresponds to 10 seconds of audio + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, # set this to enable multi-sepeaker training + decoder_ssim_alpha=0.0, # disable ssim losses that causes NaN for some runs. + postnet_ssim_alpha=0.0, + postnet_diff_spec_alpha=0.0, + decoder_diff_spec_alpha=0.0, + attention_norm="softmax", + optimizer="Adam", + lr_scheduler=None, + lr=3e-5, +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it mainly handles speaker-id to speaker-name for the model and the data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) + +# init model +model = Tacotron2(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() diff --git a/recipes/vctk/vits/train_vits.py b/recipes/vctk/vits/train_vits.py new file mode 100644 index 00000000..19074ce3 --- /dev/null +++ b/recipes/vctk/vits/train_vits.py @@ -0,0 +1,86 @@ +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts.configs.shared_configs import BaseDatasetConfig +from TTS.tts.configs.vits_config import VitsConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.models.vits import Vits +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + + +audio_config = BaseAudioConfig( + sample_rate=22050, + win_length=1024, + hop_length=256, + num_mels=80, + preemphasis=0.0, + ref_level_db=20, + log_func="np.log", + do_trim_silence=True, + trim_db=23.0, + mel_fmin=0, + mel_fmax=None, + spec_gain=1.0, + signal_norm=False, + do_amp_to_db_linear=False, + resample=True, +) + +config = VitsConfig( + audio=audio_config, + run_name="vits_vctk", + use_speaker_embedding=True, + batch_size=32, + eval_batch_size=16, + batch_group_size=5, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="english_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + compute_input_seq_cache=True, + print_step=25, + print_eval=False, + mixed_precision=True, + sort_by_audio_len=True, + min_seq_len=32 * 256 * 4, + max_seq_len=1500000, + output_path=output_path, + datasets=[dataset_config], +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# init speaker manager for multi-speaker training +# it maps speaker-id to speaker-name in the model and data-loader +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.model_args.num_speakers = speaker_manager.num_speakers + +# init model +model = Vits(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() From 5e0d0539c5d9623f59e719f0281017f3f07eec66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:19:48 +0000 Subject: [PATCH 59/64] Remove unmaintained notebooks --- ...MultiBand_MelGAN_Example_Synthetizer.ipynb | 606 ------------------ ...DDC_TTS_and_MultiBand_MelGAN_Example.ipynb | 342 ---------- ..._TTS_and_MultiBand_MelGAN_TF_Example.ipynb | 346 ---------- .../DDC_TTS_and_ParallelWaveGAN_Example.ipynb | 342 ---------- 4 files changed, 1636 deletions(-) delete mode 100644 notebooks/Chinese_Mandarin_DDC_GST_Tacotron2_TTS_and_MultiBand_MelGAN_Example_Synthetizer.ipynb delete mode 100644 notebooks/DDC_TTS_and_MultiBand_MelGAN_Example.ipynb delete mode 100644 notebooks/DDC_TTS_and_MultiBand_MelGAN_TF_Example.ipynb delete mode 100644 notebooks/DDC_TTS_and_ParallelWaveGAN_Example.ipynb diff --git a/notebooks/Chinese_Mandarin_DDC_GST_Tacotron2_TTS_and_MultiBand_MelGAN_Example_Synthetizer.ipynb b/notebooks/Chinese_Mandarin_DDC_GST_Tacotron2_TTS_and_MultiBand_MelGAN_Example_Synthetizer.ipynb deleted file mode 100644 index 1be93a82..00000000 --- a/notebooks/Chinese_Mandarin_DDC_GST_Tacotron2_TTS_and_MultiBand_MelGAN_Example_Synthetizer.ipynb +++ /dev/null @@ -1,606 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "6LWsNd3_M3MP" - }, - "source": [ - "# Mozilla TTS on CPU Real-Time Speech Synthesis " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FAqrSIWgLyP0" - }, - "source": [ - "We use Tacotron2 and MultiBand-Melgan models and Baker dataset (chinese mandarin).\n", - "\n", - "Tacotron2 is trained using [Double Decoder Consistency](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/) (DDC) only for 126K steps (3 days) with a single GPU.\n", - "\n", - "MultiBand-Melgan is trained 1.45M steps with real spectrograms.\n", - "\n", - "Note that both model performances can be improved with more training." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ku-dA4DKoeXk" - }, - "source": [ - "### Download Models" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Zlgi8fPdpRF0" - }, - "source": [ - "### Define TTS function" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ZksegYQepkFg" - }, - "source": [ - "### Load Models" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "oVa0kOamprgj" - }, - "outputs": [], - "source": [ - "import os\n", - "import torch\n", - "import IPython\n", - "\n", - "from TTS.utils.synthesizer import Synthesizer\n", - "from TTS.utils.manage import ModelManager\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "EY-sHVO8IFSH" - }, - "outputs": [], - "source": [ - "# runtime settings\n", - "use_cuda = False" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "# tts and vocoder name\n", - "TTS_NAME = \"tts_models/zh-CN/baker/tacotron2-DDC-GST\"\n", - "VOCODER_NAME = \"vocoder_models/en/ljspeech/multiband-melgan\"\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "manager = ModelManager(\"../TTS/.models.json\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > tts_models/zh-CN/baker/tacotron2-DDC-GST is already downloaded.\n", - " > vocoder_models/en/ljspeech/multiband-melgan is already downloaded.\n" - ] - } - ], - "source": [ - "tts_checkpoint_file, tts_config_file, tts_json_dict = manager.download_model(TTS_NAME)\n", - "vocoder_checkpoint_file, vocoder_config_file, vocoder_json_dict = manager.download_model(VOCODER_NAME)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Using model: tacotron2\n", - " > Generator Model: multiband_melgan_generator\n" - ] - } - ], - "source": [ - "synthesizer = Synthesizer(tts_checkpoint_file, tts_config_file, vocoder_checkpoint_file, vocoder_config_file, use_cuda)\n", - "sample_rate = synthesizer.tts_config.audio[\"sample_rate\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ws_YkPKsLgo-" - }, - "source": [ - "## Run Inference" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "# Here some test sentences for you to play with :\n", - "sentences= [\"我从来不会说很标准的中文。\",\n", - "\"我喜欢听人工智能的博客。\",\n", - "\"我来自一个法国郊区的地方。\",\n", - "\"不比不知道,一比吓一跳!\",\n", - "\"台湾是一个真的很好玩的地方!\",\n", - "\"干一行,行一行,行行都行。\",\n", - "\"我要盖被子,好尴尬!\",]" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我从来不会说很标准的中文。']\n", - " > Processing time: 1.6665124893188477\n", - " > Real-time factor: 0.5583910829911347\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 1.4052538871765137\n", - " > Real-time factor: 0.5193391025114328\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我来自一个法国郊区的地方。']\n", - " > Processing time: 1.605910062789917\n", - " > Real-time factor: 0.5785999490934259\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['不比不知道,一比吓一跳!']\n", - " > Processing time: 1.9105627536773682\n", - " > Real-time factor: 0.6607262973429417\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['台湾是一个真的很好玩的地方!']\n", - " > Processing time: 1.3081049919128418\n", - " > Real-time factor: 0.4218891158389621\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['干一行,行一行,行行都行。']\n", - " > Processing time: 2.0958540439605713\n", - " > Real-time factor: 0.6709288860239634\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我要盖被子,好尴尬!']\n", - " > Processing time: 1.5188167095184326\n", - " > Real-time factor: 0.6257456734843319\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "for sentence in sentences:\n", - " wav = synthesizer.tts(sentence)\n", - " IPython.display.display(IPython.display.Audio(wav, rate=sample_rate)) \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 2.114016056060791\n", - " > Real-time factor: 0.643271887228699\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# you can also play with Global Style Token (GST) by feeding a \n", - "# ... wav_style parameter to the tts method\n", - "\n", - "style_wav = {\"2\": 0.2}\n", - "\n", - "wav = synthesizer.tts(sentences[1], style_wav=style_wav)\n", - "IPython.display.display(IPython.display.Audio(wav, rate=sample_rate)) " - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 1.5687272548675537\n", - " > Real-time factor: 0.6401842606201799\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 2.070594072341919\n", - " > Real-time factor: 0.8067677285683367\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 1.3769311904907227\n", - " > Real-time factor: 0.5088718951180015\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 2.024374485015869\n", - " > Real-time factor: 0.6782983435843654\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " > Text splitted to sentences.\n", - "['我喜欢听人工智能的博客。']\n", - " > Processing time: 2.4434399604797363\n", - " > Real-time factor: 0.7435119663360867\n" - ] - }, - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# On this model specifically, we can observe that the GSToken \"2\" is responsible for speech speed\n", - "# You can listen to these 5 different samples, the flow is slower and slower as the value is higher\n", - "for value in [-0.2, -0.1, 0, 0.1, 0.2]:\n", - " style_wav = {\"2\": value}\n", - " wav = synthesizer.tts(sentences[1], style_wav=style_wav)\n", - " IPython.display.display(IPython.display.Audio(wav, rate=sample_rate)) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "DDC-TTS_and_MultiBand-MelGAN_Example.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/DDC_TTS_and_MultiBand_MelGAN_Example.ipynb b/notebooks/DDC_TTS_and_MultiBand_MelGAN_Example.ipynb deleted file mode 100644 index 67171b0e..00000000 --- a/notebooks/DDC_TTS_and_MultiBand_MelGAN_Example.ipynb +++ /dev/null @@ -1,342 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "6LWsNd3_M3MP" - }, - "source": [ - "# Mozilla TTS on CPU Real-Time Speech Synthesis " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FAqrSIWgLyP0" - }, - "source": [ - "We use Tacotron2 and MultiBand-Melgan models and LJSpeech dataset.\n", - "\n", - "Tacotron2 is trained using [Double Decoder Consistency](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/) (DDC) only for 130K steps (3 days) with a single GPU.\n", - "\n", - "MultiBand-Melgan is trained 1.45M steps with real spectrograms.\n", - "\n", - "Note that both model performances can be improved with more training." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ku-dA4DKoeXk" - }, - "source": [ - "### Download Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 162 - }, - "colab_type": "code", - "id": "jGIgnWhGsxU1", - "outputId": "88725e41-a8dc-4885-b3bf-cac939f38abe", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1dntzjWFg7ufWaTaFy80nRz-Tu02xWZos -O data/tts_model.pth.tar\n", - "!gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O data/config.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 235 - }, - "colab_type": "code", - "id": "4dnpE0-kvTsu", - "outputId": "76377c6d-789c-4995-ba00-a21a6e1c401e", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1Ty5DZdOc0F7OTGj9oJThYbL5iVu_2G0K -O data/vocoder_model.pth.tar\n", - "!gdown --id 1Rd0R_nRCrbjEdpOwq6XwZAktvugiBvmu -O data/config_vocoder.json\n", - "!gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O data/scale_stats.npy" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Zlgi8fPdpRF0" - }, - "source": [ - "### Define TTS function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "f-Yc42nQZG5A" - }, - "outputs": [], - "source": [ - "def tts(model, text, CONFIG, use_cuda, ap, use_gl, figures=True):\n", - " t_1 = time.time()\n", - " waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None,\n", - " truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars)\n", - " # mel_postnet_spec = ap.denormalize(mel_postnet_spec.T)\n", - " if not use_gl:\n", - " waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0))\n", - " waveform = waveform.flatten()\n", - " if use_cuda:\n", - " waveform = waveform.cpu()\n", - " waveform = waveform.numpy()\n", - " rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate)\n", - " tps = (time.time() - t_1) / len(waveform)\n", - " print(waveform.shape)\n", - " print(\" > Run-time: {}\".format(time.time() - t_1))\n", - " print(\" > Real-time factor: {}\".format(rtf))\n", - " print(\" > Time per step: {}\".format(tps))\n", - " IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) \n", - " return alignment, mel_postnet_spec, stop_tokens, waveform" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ZksegYQepkFg" - }, - "source": [ - "### Load Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "oVa0kOamprgj" - }, - "outputs": [], - "source": [ - "import os\n", - "import torch\n", - "import time\n", - "import IPython\n", - "\n", - "from TTS.tts.utils.generic_utils import setup_model\n", - "from TTS.utils.io import load_config\n", - "from TTS.tts.utils.text.symbols import symbols, phonemes\n", - "from TTS.utils.audio import AudioProcessor\n", - "from TTS.tts.utils.synthesis import synthesis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "EY-sHVO8IFSH" - }, - "outputs": [], - "source": [ - "# runtime settings\n", - "use_cuda = False" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "_1aIUp2FpxOQ" - }, - "outputs": [], - "source": [ - "# model paths\n", - "TTS_MODEL = \"data/tts_model.pth.tar\"\n", - "TTS_CONFIG = \"data/config.json\"\n", - "VOCODER_MODEL = \"data/vocoder_model.pth.tar\"\n", - "VOCODER_CONFIG = \"data/config_vocoder.json\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "CpgmdBVQplbv" - }, - "outputs": [], - "source": [ - "# load configs\n", - "TTS_CONFIG = load_config(TTS_CONFIG)\n", - "VOCODER_CONFIG = load_config(VOCODER_CONFIG)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 471 - }, - "colab_type": "code", - "id": "zmrQxiozIUVE", - "outputId": "60c4daa0-4c5b-4a2e-fe0d-be437d003a49", - "tags": [] - }, - "outputs": [], - "source": [ - "# load the audio processor\n", - "TTS_CONFIG.audio['stats_path'] = 'data/scale_stats.npy'\n", - "ap = AudioProcessor(**TTS_CONFIG.audio) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "colab_type": "code", - "id": "8fLoI4ipqMeS", - "outputId": "b789066e-e305-42ad-b3ca-eba8d9267382", - "tags": [] - }, - "outputs": [], - "source": [ - "# LOAD TTS MODEL\n", - "# multi speaker \n", - "speaker_id = None\n", - "speakers = []\n", - "\n", - "# load the model\n", - "num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols)\n", - "model = setup_model(num_chars, len(speakers), TTS_CONFIG)\n", - "\n", - "# load model state\n", - "cp = torch.load(TTS_MODEL, map_location=torch.device('cpu'))\n", - "\n", - "# load the model\n", - "model.load_state_dict(cp['model'])\n", - "if use_cuda:\n", - " model.cuda()\n", - "model.eval()\n", - "\n", - "# set model stepsize\n", - "if 'r' in cp:\n", - " model.decoder.set_r(cp['r'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "colab_type": "code", - "id": "zKoq0GgzqzhQ", - "outputId": "234efc61-f37a-40bc-95a3-b51896018ccb", - "tags": [] - }, - "outputs": [], - "source": [ - "from TTS.vocoder.utils.generic_utils import setup_generator\n", - "\n", - "# LOAD VOCODER MODEL\n", - "vocoder_model = setup_generator(VOCODER_CONFIG)\n", - "vocoder_model.load_state_dict(torch.load(VOCODER_MODEL, map_location=\"cpu\")[\"model\"])\n", - "vocoder_model.remove_weight_norm()\n", - "vocoder_model.inference_padding = 0\n", - "\n", - "ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) \n", - "if use_cuda:\n", - " vocoder_model.cuda()\n", - "vocoder_model.eval()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ws_YkPKsLgo-" - }, - "source": [ - "## Run Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 134 - }, - "colab_type": "code", - "id": "FuWxZ9Ey5Puj", - "outputId": "9c06adad-5451-4393-89a1-a2e7dc39ab91", - "tags": [] - }, - "outputs": [], - "source": [ - "sentence = \"Bill got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go.\"\n", - "align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, use_cuda, ap, use_gl=False, figures=True)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "DDC-TTS_and_MultiBand-MelGAN_Example.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/DDC_TTS_and_MultiBand_MelGAN_TF_Example.ipynb b/notebooks/DDC_TTS_and_MultiBand_MelGAN_TF_Example.ipynb deleted file mode 100644 index 4b009ce9..00000000 --- a/notebooks/DDC_TTS_and_MultiBand_MelGAN_TF_Example.ipynb +++ /dev/null @@ -1,346 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "6LWsNd3_M3MP" - }, - "source": [ - "# Mozilla TTS on CPU Real-Time Speech Synthesis with Tensorflow" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "FAqrSIWgLyP0" - }, - "source": [ - "**These models are converted from released [PyTorch models](https://colab.research.google.com/drive/1u_16ZzHjKYFn1HNVuA4Qf_i2MMFB9olY?usp=sharing) using our TF utilities provided in Mozilla TTS.**\n", - "\n", - "These TF models support TF 2.2 and for different versions you might need to\n", - "regenerate them. \n", - "\n", - "We use Tacotron2 and MultiBand-Melgan models and LJSpeech dataset.\n", - "\n", - "Tacotron2 is trained using [Double Decoder Consistency](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/) (DDC) only for 130K steps (3 days) with a single GPU.\n", - "\n", - "MultiBand-Melgan is trained 1.45M steps with real spectrograms.\n", - "\n", - "Note that both model performances can be improved with more training.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "Ku-dA4DKoeXk" - }, - "source": [ - "### Download Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 162 - }, - "colab_type": "code", - "id": "jGIgnWhGsxU1", - "outputId": "08b0dddd-4edf-48c9-e8e5-a419b36a5c3d", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1p7OSEEW_Z7ORxNgfZwhMy7IiLE1s0aH7 -O data/tts_model.pkl\n", - "!gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O data/config.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 235 - }, - "colab_type": "code", - "id": "4dnpE0-kvTsu", - "outputId": "2fe836eb-c7e7-4f1e-9352-0142126bb19f", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1rHmj7CqD3Sfa716Y3ub_vpIBrQg_b1yF -O data/vocoder_model.pkl\n", - "!gdown --id 1Rd0R_nRCrbjEdpOwq6XwZAktvugiBvmu -O data/config_vocoder.json\n", - "!gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O data/scale_stats.npy" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "Zlgi8fPdpRF0" - }, - "source": [ - "### Define TTS function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": {}, - "colab_type": "code", - "id": "f-Yc42nQZG5A" - }, - "outputs": [], - "source": [ - "def tts(model, text, CONFIG, p):\n", - " t_1 = time.time()\n", - " waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None,\n", - " truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars,\n", - " backend='tf')\n", - " waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0))\n", - " waveform = waveform.numpy()[0, 0]\n", - " rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate)\n", - " tps = (time.time() - t_1) / len(waveform)\n", - " print(waveform.shape)\n", - " print(\" > Run-time: {}\".format(time.time() - t_1))\n", - " print(\" > Real-time factor: {}\".format(rtf))\n", - " print(\" > Time per step: {}\".format(tps))\n", - " IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) \n", - " return alignment, mel_postnet_spec, stop_tokens, waveform" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "ZksegYQepkFg" - }, - "source": [ - "### Load Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": {}, - "colab_type": "code", - "id": "oVa0kOamprgj" - }, - "outputs": [], - "source": [ - "import os\n", - "import torch\n", - "import time\n", - "import IPython\n", - "\n", - "from TTS.tts.tf.utils.generic_utils import setup_model\n", - "from TTS.tts.tf.utils.io import load_checkpoint\n", - "from TTS.utils.io import load_config\n", - "from TTS.tts.utils.text.symbols import symbols, phonemes\n", - "from TTS.utils.audio import AudioProcessor\n", - "from TTS.tts.utils.synthesis import synthesis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": {}, - "colab_type": "code", - "id": "EY-sHVO8IFSH" - }, - "outputs": [], - "source": [ - "# runtime settings\n", - "use_cuda = False" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": {}, - "colab_type": "code", - "id": "_1aIUp2FpxOQ" - }, - "outputs": [], - "source": [ - "# model paths\n", - "TTS_MODEL = \"data/tts_model.pkl\"\n", - "TTS_CONFIG = \"data/config.json\"\n", - "VOCODER_MODEL = \"data/vocoder_model.pkl\"\n", - "VOCODER_CONFIG = \"data/config_vocoder.json\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": {}, - "colab_type": "code", - "id": "CpgmdBVQplbv" - }, - "outputs": [], - "source": [ - "# load configs\n", - "TTS_CONFIG = load_config(TTS_CONFIG)\n", - "VOCODER_CONFIG = load_config(VOCODER_CONFIG)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 471 - }, - "colab_type": "code", - "id": "zmrQxiozIUVE", - "outputId": "fa71bd05-401f-4e5b-a6f7-60ae765966db", - "tags": [] - }, - "outputs": [], - "source": [ - "# load the audio processor\n", - "TTS_CONFIG.audio['stats_path'] = 'data/scale_stats.npy'\n", - "ap = AudioProcessor(**TTS_CONFIG.audio) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 72 - }, - "colab_type": "code", - "id": "8fLoI4ipqMeS", - "outputId": "595d990f-930d-4698-ee14-77796b5eed7d", - "tags": [] - }, - "outputs": [], - "source": [ - "# LOAD TTS MODEL\n", - "# multi speaker \n", - "speaker_id = None\n", - "speakers = []\n", - "\n", - "# load the model\n", - "num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols)\n", - "model = setup_model(num_chars, len(speakers), TTS_CONFIG)\n", - "model.build_inference()\n", - "model = load_checkpoint(model, TTS_MODEL)\n", - "model.decoder.set_max_decoder_steps(1000)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 489 - }, - "colab_type": "code", - "id": "zKoq0GgzqzhQ", - "outputId": "2cc3deae-144f-4465-da3b-98628d948506" - }, - "outputs": [], - "source": [ - "from TTS.vocoder.tf.utils.generic_utils import setup_generator\n", - "from TTS.vocoder.tf.utils.io import load_checkpoint\n", - "\n", - "# LOAD VOCODER MODEL\n", - "vocoder_model = setup_generator(VOCODER_CONFIG)\n", - "vocoder_model.build_inference()\n", - "vocoder_model = load_checkpoint(vocoder_model, VOCODER_MODEL)\n", - "vocoder_model.inference_padding = 0\n", - "\n", - "ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "Collapsed": "false", - "colab_type": "text", - "id": "Ws_YkPKsLgo-" - }, - "source": [ - "## Run Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "Collapsed": "false", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 134 - }, - "colab_type": "code", - "id": "FuWxZ9Ey5Puj", - "outputId": "07ede6e5-06e6-4612-f687-7984d20e5254" - }, - "outputs": [], - "source": [ - "sentence = \"Bill got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go.\"\n", - "align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, ap)" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "DDC-TTS_and_MultiBand-MelGAN_TF_Example.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/DDC_TTS_and_ParallelWaveGAN_Example.ipynb b/notebooks/DDC_TTS_and_ParallelWaveGAN_Example.ipynb deleted file mode 100644 index 4c1008e0..00000000 --- a/notebooks/DDC_TTS_and_ParallelWaveGAN_Example.ipynb +++ /dev/null @@ -1,342 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "6LWsNd3_M3MP" - }, - "source": [ - "# Mozilla TTS on CPU Real-Time Speech Synthesis " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FAqrSIWgLyP0" - }, - "source": [ - "We use Tacotron2 and MultiBand-Melgan models and LJSpeech dataset.\n", - "\n", - "Tacotron2 is trained using [Double Decoder Consistency](https://erogol.com/solving-attention-problems-of-tts-models-with-double-decoder-consistency/) (DDC) only for 130K steps (3 days) with a single GPU.\n", - "\n", - "MultiBand-Melgan is trained 1.45M steps with real spectrograms.\n", - "\n", - "Note that both model performances can be improved with more training." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ku-dA4DKoeXk" - }, - "source": [ - "### Download Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 162 - }, - "colab_type": "code", - "id": "jGIgnWhGsxU1", - "outputId": "88725e41-a8dc-4885-b3bf-cac939f38abe", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1dntzjWFg7ufWaTaFy80nRz-Tu02xWZos -O data/tts_model.pth.tar\n", - "!gdown --id 18CQ6G6tBEOfvCHlPqP8EBI4xWbrr9dBc -O data/config.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 235 - }, - "colab_type": "code", - "id": "4dnpE0-kvTsu", - "outputId": "76377c6d-789c-4995-ba00-a21a6e1c401e", - "tags": [] - }, - "outputs": [], - "source": [ - "!gdown --id 1X09hHAyAJOnrplCUMAdW_t341Kor4YR4 -O data/vocoder_model.pth.tar\n", - "!gdown --id \"1qN7vQRIYkzvOX_DtiZtTajzoZ1eW1-Eg\" -O data/config_vocoder.json\n", - "!gdown --id 11oY3Tv0kQtxK_JPgxrfesa99maVXHNxU -O data/scale_stats.npy" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Zlgi8fPdpRF0" - }, - "source": [ - "### Define TTS function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "f-Yc42nQZG5A" - }, - "outputs": [], - "source": [ - "def tts(model, text, CONFIG, use_cuda, ap, use_gl, figures=True):\n", - " t_1 = time.time()\n", - " waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens, inputs = synthesis(model, text, CONFIG, use_cuda, ap, speaker_id, style_wav=None,\n", - " truncated=False, enable_eos_bos_chars=CONFIG.enable_eos_bos_chars)\n", - " # mel_postnet_spec = ap.denormalize(mel_postnet_spec.T)\n", - " if not use_gl:\n", - " waveform = vocoder_model.inference(torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0))\n", - " waveform = waveform.flatten()\n", - " if use_cuda:\n", - " waveform = waveform.cpu()\n", - " waveform = waveform.numpy()\n", - " rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate)\n", - " tps = (time.time() - t_1) / len(waveform)\n", - " print(waveform.shape)\n", - " print(\" > Run-time: {}\".format(time.time() - t_1))\n", - " print(\" > Real-time factor: {}\".format(rtf))\n", - " print(\" > Time per step: {}\".format(tps))\n", - " IPython.display.display(IPython.display.Audio(waveform, rate=CONFIG.audio['sample_rate'])) \n", - " return alignment, mel_postnet_spec, stop_tokens, waveform" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ZksegYQepkFg" - }, - "source": [ - "### Load Models" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "oVa0kOamprgj" - }, - "outputs": [], - "source": [ - "import os\n", - "import torch\n", - "import time\n", - "import IPython\n", - "\n", - "from TTS.tts.utils.generic_utils import setup_model\n", - "from TTS.utils.io import load_config\n", - "from TTS.tts.utils.text.symbols import symbols, phonemes\n", - "from TTS.utils.audio import AudioProcessor\n", - "from TTS.tts.utils.synthesis import synthesis" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "EY-sHVO8IFSH" - }, - "outputs": [], - "source": [ - "# runtime settings\n", - "use_cuda = False" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "_1aIUp2FpxOQ" - }, - "outputs": [], - "source": [ - "# model paths\n", - "TTS_MODEL = \"data/tts_model.pth.tar\"\n", - "TTS_CONFIG = \"data/config.json\"\n", - "VOCODER_MODEL = \"data/vocoder_model.pth.tar\"\n", - "VOCODER_CONFIG = \"data/config_vocoder.json\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "CpgmdBVQplbv" - }, - "outputs": [], - "source": [ - "# load configs\n", - "TTS_CONFIG = load_config(TTS_CONFIG)\n", - "VOCODER_CONFIG = load_config(VOCODER_CONFIG)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 471 - }, - "colab_type": "code", - "id": "zmrQxiozIUVE", - "outputId": "60c4daa0-4c5b-4a2e-fe0d-be437d003a49", - "tags": [] - }, - "outputs": [], - "source": [ - "# load the audio processor\n", - "TTS_CONFIG.audio['stats_path'] = 'data/scale_stats.npy'\n", - "ap = AudioProcessor(**TTS_CONFIG.audio) " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "colab_type": "code", - "id": "8fLoI4ipqMeS", - "outputId": "b789066e-e305-42ad-b3ca-eba8d9267382", - "tags": [] - }, - "outputs": [], - "source": [ - "# LOAD TTS MODEL\n", - "# multi speaker \n", - "speaker_id = None\n", - "speakers = []\n", - "\n", - "# load the model\n", - "num_chars = len(phonemes) if TTS_CONFIG.use_phonemes else len(symbols)\n", - "model = setup_model(num_chars, len(speakers), TTS_CONFIG)\n", - "\n", - "# load model state\n", - "cp = torch.load(TTS_MODEL, map_location=torch.device('cpu'))\n", - "\n", - "# load the model\n", - "model.load_state_dict(cp['model'])\n", - "if use_cuda:\n", - " model.cuda()\n", - "model.eval()\n", - "\n", - "# set model stepsize\n", - "if 'r' in cp:\n", - " model.decoder.set_r(cp['r'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "colab_type": "code", - "id": "zKoq0GgzqzhQ", - "outputId": "234efc61-f37a-40bc-95a3-b51896018ccb", - "tags": [] - }, - "outputs": [], - "source": [ - "from TTS.vocoder.utils.generic_utils import setup_generator\n", - "\n", - "# LOAD VOCODER MODEL\n", - "vocoder_model = setup_generator(VOCODER_CONFIG)\n", - "vocoder_model.load_state_dict(torch.load(VOCODER_MODEL, map_location=\"cpu\")[\"model\"])\n", - "vocoder_model.remove_weight_norm()\n", - "vocoder_model.inference_padding = 0\n", - "\n", - "ap_vocoder = AudioProcessor(**VOCODER_CONFIG['audio']) \n", - "if use_cuda:\n", - " vocoder_model.cuda()\n", - "vocoder_model.eval()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ws_YkPKsLgo-" - }, - "source": [ - "## Run Inference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 134 - }, - "colab_type": "code", - "id": "FuWxZ9Ey5Puj", - "outputId": "9c06adad-5451-4393-89a1-a2e7dc39ab91", - "tags": [] - }, - "outputs": [], - "source": [ - "sentence = \"Bill got in the habit of asking himself “Is that thought true?” and if he wasn’t absolutely certain it was, he just let it go.\"\n", - "align, spec, stop_tokens, wav = tts(model, sentence, TTS_CONFIG, use_cuda, ap, use_gl=False, figures=True)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "DDC-TTS_and_MultiBand-MelGAN_Example.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} From 016803beeebd7d8398ee00afa42d38217067d162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:20:14 +0000 Subject: [PATCH 60/64] Update notebooks --- notebooks/ExtractTTSpectrogram.ipynb | 120 ++++++------ notebooks/PlotUmapLibriTTS.ipynb | 7 +- .../dataset_analysis/AnalyzeDataset.ipynb | 1 - .../dataset_analysis/CheckDatasetSNR.ipynb | 5 +- notebooks/dataset_analysis/CheckPitch.ipynb | 179 ++++++++++++++++++ 5 files changed, 243 insertions(+), 69 deletions(-) create mode 100644 notebooks/dataset_analysis/CheckPitch.ipynb diff --git a/notebooks/ExtractTTSpectrogram.ipynb b/notebooks/ExtractTTSpectrogram.ipynb index 3597ebe3..50b60ff0 100644 --- a/notebooks/ExtractTTSpectrogram.ipynb +++ b/notebooks/ExtractTTSpectrogram.ipynb @@ -2,14 +2,16 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "This is a notebook to generate mel-spectrograms from a TTS model to be used in a Vocoder training." - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -20,7 +22,7 @@ "import numpy as np\n", "from tqdm import tqdm as tqdm\n", "from torch.utils.data import DataLoader\n", - "from TTS.tts.datasets.TTSDataset import TTSDataset\n", + "from TTS.tts.datasets.dataset import TTSDataset\n", "from TTS.tts.layers.losses import L1LossMasked\n", "from TTS.utils.audio import AudioProcessor\n", "from TTS.config import load_config\n", @@ -33,13 +35,13 @@ "\n", "import os\n", "os.environ['CUDA_VISIBLE_DEVICES']='2'" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "def set_filename(wav_path, out_path):\n", " wav_file = os.path.basename(wav_path)\n", @@ -51,13 +53,13 @@ " mel_path = os.path.join(out_path, \"mel\", file_name)\n", " wav_path = os.path.join(out_path, \"wav_gl\", file_name)\n", " return file_name, wavq_path, mel_path, wav_path" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "OUT_PATH = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/specs2/\"\n", "DATA_PATH = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/\"\n", @@ -77,13 +79,13 @@ "C = load_config(CONFIG_PATH)\n", "C.audio['do_trim_silence'] = False # IMPORTANT!!!!!!!!!!!!!!! disable to align mel specs with the wav files\n", "ap = AudioProcessor(bits=QUANTIZE_BIT, **C.audio)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(C['r'])\n", "# if the vocabulary was passed, replace the default\n", @@ -95,13 +97,13 @@ "# TODO: multiple speaker\n", "model = setup_model(C)\n", "model.load_checkpoint(C, MODEL_FILE, eval=True)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "preprocessor = importlib.import_module(\"TTS.tts.datasets.formatters\")\n", "preprocessor = getattr(preprocessor, DATASET.lower())\n", @@ -120,20 +122,20 @@ "loader = DataLoader(\n", " dataset, batch_size=BATCH_SIZE, num_workers=4, collate_fn=dataset.collate_fn, shuffle=False, drop_last=False\n", ")\n" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Generate model outputs " - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import pickle\n", "\n", @@ -212,42 +214,42 @@ "\n", " print(np.mean(losses))\n", " print(np.mean(postnet_losses))" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# for pwgan\n", "with open(os.path.join(OUT_PATH, \"metadata.txt\"), \"w\") as f:\n", " for data in metadata:\n", " f.write(f\"{data[0]}|{data[1]+'.npy'}\\n\")" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "### Sanity Check" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "idx = 1\n", "ap.melspectrogram(ap.load_wav(item_idx[idx])).shape" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import soundfile as sf\n", "wav, sr = sf.read(item_idx[idx])\n", @@ -255,46 +257,46 @@ "mel_decoder = mel_outputs[idx][:mel_lengths[idx], :].detach().cpu().numpy()\n", "mel_truth = ap.melspectrogram(wav)\n", "print(mel_truth.shape)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot posnet output\n", "print(mel_postnet[:mel_lengths[idx], :].shape)\n", "plot_spectrogram(mel_postnet, ap)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot decoder output\n", "print(mel_decoder.shape)\n", "plot_spectrogram(mel_decoder, ap)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# plot GT specgrogram\n", "print(mel_truth.shape)\n", "plot_spectrogram(mel_truth.T, ap)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# postnet, decoder diff\n", "from matplotlib import pylab as plt\n", @@ -303,13 +305,13 @@ "plt.imshow(abs(mel_diff[:mel_lengths[idx],:]).T,aspect=\"auto\", origin=\"lower\");\n", "plt.colorbar()\n", "plt.tight_layout()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# PLOT GT SPECTROGRAM diff\n", "from matplotlib import pylab as plt\n", @@ -318,13 +320,13 @@ "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\");\n", "plt.colorbar()\n", "plt.tight_layout()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# PLOT GT SPECTROGRAM diff\n", "from matplotlib import pylab as plt\n", @@ -334,22 +336,23 @@ "plt.imshow(abs(mel_diff2).T,aspect=\"auto\", origin=\"lower\");\n", "plt.colorbar()\n", "plt.tight_layout()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, - "source": [], + "metadata": {}, "outputs": [], - "metadata": {} + "source": [] } ], "metadata": { + "interpreter": { + "hash": "822ce188d9bce5372c4adbb11364eeb49293228c2224eb55307f4664778e7f56" + }, "kernelspec": { - "name": "python3", - "display_name": "Python 3.9.7 64-bit ('base': conda)" + "display_name": "Python 3.9.7 64-bit ('base': conda)", + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -362,11 +365,8 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.7" - }, - "interpreter": { - "hash": "822ce188d9bce5372c4adbb11364eeb49293228c2224eb55307f4664778e7f56" } }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/notebooks/PlotUmapLibriTTS.ipynb b/notebooks/PlotUmapLibriTTS.ipynb index f61ce40c..c809a5c4 100644 --- a/notebooks/PlotUmapLibriTTS.ipynb +++ b/notebooks/PlotUmapLibriTTS.ipynb @@ -19,19 +19,16 @@ "source": [ "import os\n", "import glob\n", - "import random\n", "import numpy as np\n", - "import torch\n", "import umap\n", "\n", - "from TTS.speaker_encoder.model import SpeakerEncoder\n", "from TTS.utils.audio import AudioProcessor\n", - "from TTS.tts.utils.generic_utils import load_config\n", + "from TTS.config import load_config\n", "\n", "from bokeh.io import output_notebook, show\n", "from bokeh.plotting import figure\n", "from bokeh.models import HoverTool, ColumnDataSource, BoxZoomTool, ResetTool, OpenURL, TapTool\n", - "from bokeh.transform import factor_cmap, factor_mark\n", + "from bokeh.transform import factor_cmap\n", "from bokeh.palettes import Category10" ] }, diff --git a/notebooks/dataset_analysis/AnalyzeDataset.ipynb b/notebooks/dataset_analysis/AnalyzeDataset.ipynb index 6ff2d2ca..c2aabbf9 100644 --- a/notebooks/dataset_analysis/AnalyzeDataset.ipynb +++ b/notebooks/dataset_analysis/AnalyzeDataset.ipynb @@ -22,7 +22,6 @@ "import os\n", "import sys\n", "sys.path.append(TTS_PATH) # set this if TTS is not installed globally\n", - "import glob\n", "import librosa\n", "import numpy as np\n", "import pandas as pd\n", diff --git a/notebooks/dataset_analysis/CheckDatasetSNR.ipynb b/notebooks/dataset_analysis/CheckDatasetSNR.ipynb index 91ecc954..18c48d0b 100644 --- a/notebooks/dataset_analysis/CheckDatasetSNR.ipynb +++ b/notebooks/dataset_analysis/CheckDatasetSNR.ipynb @@ -21,10 +21,9 @@ "metadata": {}, "outputs": [], "source": [ - "import os, sys\n", + "import os\n", "import glob\n", "import subprocess\n", - "import tempfile\n", "import IPython\n", "import soundfile as sf\n", "import numpy as np\n", @@ -208,4 +207,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/notebooks/dataset_analysis/CheckPitch.ipynb b/notebooks/dataset_analysis/CheckPitch.ipynb new file mode 100644 index 00000000..72afbc64 --- /dev/null +++ b/notebooks/dataset_analysis/CheckPitch.ipynb @@ -0,0 +1,179 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 9, + "source": [ + "import numpy as np\n", + "import glob\n", + "from TTS.utils.audio import AudioProcessor\n", + "from TTS.config.shared_configs import BaseAudioConfig\n", + "from TTS.tts.utils.visual import plot_pitch" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 13, + "source": [ + "pitch_path = \"/home/ubuntu/TTS/recipes/ljspeech/fast_pitch/f0_cache\"\n", + "wav_path = \"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/wavs\"\n", + "wav_files = glob.glob(\"/home/ubuntu/TTS/recipes/ljspeech/LJSpeech-1.1/wavs/*.wav\")\n", + "print(len(wav_files))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "13100\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 20, + "source": [ + "ap = AudioProcessor(**BaseAudioConfig( sample_rate=22050,\n", + " do_trim_silence=True,\n", + " trim_db=60.0,\n", + " signal_norm=False,\n", + " mel_fmin=0.0,\n", + " mel_fmax=8000,\n", + " spec_gain=1.0,\n", + " log_func=\"np.log\",\n", + " ref_level_db=20,\n", + " preemphasis=0.0,))" + ], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + " > Setting up Audio Processor...\n", + " | > sample_rate:22050\n", + " | > resample:False\n", + " | > num_mels:80\n", + " | > log_func:np.log\n", + " | > min_level_db:-100\n", + " | > frame_shift_ms:None\n", + " | > frame_length_ms:None\n", + " | > ref_level_db:20\n", + " | > fft_size:1024\n", + " | > power:1.5\n", + " | > preemphasis:0.0\n", + " | > griffin_lim_iters:60\n", + " | > signal_norm:False\n", + " | > symmetric_norm:True\n", + " | > mel_fmin:0\n", + " | > mel_fmax:8000\n", + " | > spec_gain:1.0\n", + " | > stft_pad_mode:reflect\n", + " | > max_norm:4.0\n", + " | > clip_norm:True\n", + " | > do_trim_silence:True\n", + " | > trim_db:60.0\n", + " | > do_sound_norm:False\n", + " | > do_amp_to_db_linear:True\n", + " | > do_amp_to_db_mel:True\n", + " | > stats_path:None\n", + " | > base:2.718281828459045\n", + " | > hop_length:256\n", + " | > win_length:1024\n" + ] + } + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 15, + "source": [ + "pitch_files = [wf.replace(\".wav\", \"_pitch.npy\").replace(wav_path, pitch_path) for wf in wav_files]" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 30, + "source": [ + "idx = 100\n", + "# wav_file = wav_files[idx]\n", + "# pitch_file = pitch_files[idx]\n", + "wav_file = \"/home/ubuntu/TTS/recipes/ljspeech/fast_pitch/../LJSpeech-1.1/wavs/LJ011-0097.wav\"\n", + "pitch_file = \"/home/ubuntu/TTS/recipes/ljspeech/fast_pitch/f0_cache/LJ011-0097_pitch.npy\"\n", + "pitch = np.load(pitch_file)\n", + "wav = ap.load_wav(wav_file)\n", + "spec = ap.melspectrogram(wav)" + ], + "outputs": [], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 31, + "source": [ + "plot_pitch(pitch, spec.T)" + ], + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABuIAAAJNCAYAAADEcGOGAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOzdd5gkR2E3/m91mLB570465ZwTp3zKIAkQIEQQRoAJNmAZjAMGB2z8w/b7mtcBMMY2GJNFkEAEESURhFBEOed0p3j5Nk/s7vr9cSvp9ro29HdnZ3dmvp/n0SOpZ2qrprq6urqqq8pYayEiIiIiIiIiIiIiIiIijeUtdgJERERERERERERERERE2pEG4kREREREREREREREREQWgAbiRERERERERERERERERBaABuJEREREREREREREREREFoAG4kREREREREREREREREQWgAbiRERERERERERERERERBZAsNgJmCtjjPX8sClxWdOUaObN2ObFxeYJk0bbKsPDTcx/tEKZbGZ+tAj6Gk2YQO17AmzIXQAeewKI6BK2kmyF08amsQV+WzPvowB5fyOLludRFQk84sQ1+xZlyRhboEgiZgpJs39YK2Rks7VpnjS7jqS1SjrbFVElt8zzHoNtfrZxe4tPYyv8uDZ+UDdN/G3NbpP7RKAmn2rDNOWb3iZc+mWrre83pFZo3zWzX77p99927kumfhuXIYmNrLWtd4W3zECc54c47i3/nDlcTHTgJrnMQQAA9W6uhJuYiy83RnSWcf1yiMkxUC/KHmZ8Ty4fq8u4H2diLr6u9dnDsee6NsCFi/NchcakMyhx+RiUqGDw6tnDJGSNx96YuzZwZbL/weHMYUyVuNgAwCPvzDFZmRAe/KtBKtzRBz1Jhcv52S+A+zbsSsVVGstT4YxPNlbGs1fm4VbmSRXID5N1wkT2MH6Vyw+/RgVDUOHi23hs9jzx9x2n4jplrzVUuFW9T2cOs1MwSsUVkwszPFZZSYXbUu/OHKbHrzYtLgC47qn9M4cpjxSouFDj8t/UyZcjiHBetbltayaN28JlD8PWP6yglL3eKgyRdStdJ3NtC7+cPZxfIdtNZOehVyMKZUK2tcg0WrJNWF2RvQ7acBz3cBn1kI1yMlg4lj1PctwtEYWt3Pnu2pi9LAfjRKUFwCtz142pc5WyYTr5a9xvg8fdE22R6Dxir222Soi5/E8KxG8ju0TZMsJaf/qyzGEm9iD7VsgiudNd2U94bozLx3CUa5QEG8lngP7s7eTaTkUqrqGDuA7eeg8RiLzX0M+kE1yEIdEmZJ9/a91cpRARp5vpfwaAkMzHro3cxZ3bRHR4sPcodtCbrcur2esgU+Get3/+6CfKVMBF1nIjhyIiIiIiIiIiIiIiIiKtQANxIiIiIiIiIiIiIiIiIgtAA3EiIiIiIiIiIiIiIiIiC0ADcSIiIiIiIiIiIiIiIiILQANxIiIiIiIiIiIiIiIiIgsgWOwEZBHnTOYwSZg9nqiQPR4AiLqoYDAxGS7Jnk4TWyquuMjliVfPHibJcWk0XDAqjQBgiWHs3DD524hzDQARed5Mkj2MX6Gigl/l8sSvcvExvIhLY2Eo4iI0xHlLiJMGwOZzVDjkmcqVSyN8Lv9zPle59gbZC9ey7hIVV63mU+Fswr1Hk+Sz50lcJN/ZGeLqHwZTHwNAwmU/6mTdaomynMtx9Ug3UY4BIE/cFAd8rvxXLFGPAEjQvLJVSrg60icbJSv7xzKHeXI8T8VFNpsA8rqxxC2AaY8AgCXTaGpkOCKdHhsXeeKYdppXZ9vkXDjrcdc2c5+KyfaPV+cKpR9k/21+iav/kxx3AZg6127Kb81+v8mNcvV/1EMFQ9xFVibEe8t+hSvH9S4uXHl59i6dIhUTEDDPKABMzJVJr5S9ovTq5HXTQ3bmMM9gbD4mZMdRM5GXGlv/2BzXpRkViDBF7t7mkb2u9e7s9Q/bRrAe15b0KuR1QwhHuYZTfpg8AcTzNvnYAPbRhu1L9omsZONi+uQBICb65U2Z7W+lgsEj+7cMEy7iOq7pNjnbT8hoZlxLgGbEiYiIiIiIiIiIiIiIiCwADcSJiIiIiIiIiIiIiIiILAANxImIiIiIiIiIiIiIiIgsAA3EiYiIiIiIiIiIiIiIiCwADcSJiIiIiIiIiIiIiIiILAANxImIiIiIiIiIiIiIiIgsgGCxE7DQ6j0mc5iowMVlfTJc9iQCAOJ89jBenYus3kMFA2z2IFGRiyoJicgAmIjLE9vEYeygxP22cJyLLyTiKy/jMsRwPw1hKckcJs5z59qvU8Hgl2MqnKlF2QOFza3OzUQleyCPLCNj3G97bryfCrdL92jmMF1hjYqLFdfJCqia/UbF3jeY+h8ATJw9YBKSaSQT6ZF1AohkToxxjZLhOncz3VzvzRxmPObSWCcbTjF5A+4Lstdbo2SjcGutiwpXj7PniU248h+McvnvEbcoAED22zaCEvfb2GvUEGncFjB7kCTHRRWOk5VrCzAJ99v8SvYT59W4k+1VyQvAy15IqPYggKDEtUmsIZ+JCtnbaey15pe5NJqIu2/41ezxBWUqKoQTXPnPjRGZSV5rLKZtty1g9vyPl2dvxwCAibhCaSrEDcdy+WGL5I2D5RPXG3muTYl4tgSAmMsTSzxe2pArIwlZt0aF7OHCCSoqJAGXxrgrpML5o9XMYUzC5X//YyUqXNSd/bfRdR0pKnJteaYPLihzbZLCZioYou7sF6lXZ9t2XL9duJUrW4iyx2fqZPuTvN+YmGyoEdepbXJf5mLTjDgRERERERERERERERGRBaCBOBEREREREREREREREZEFoIE4ERERERERERERERERkQWggTgRERERERERERERERGRBaCBOBEREREREREREREREZEFECx2ArLwIps5TDCRPR6/kj0MAHh1Lpxf5cIVhuLMYWq93NirXzVUuCSXPVxtIPt5BkAPKychF5/1uDxhBGUujWGJCxcT5y0JqagAcPlo/ezhrGHPGZmPea5QBrnsVbOpcBWQibLXIwAAS+RJ4FNR+RUuHydqXKHcaHozhxkqFam46mN5Kpw/yuWlV89+DQRj3HUTlKlg8CIiDFlH5sYTKpzhgsGrZS/L9Tp3rp8eH6TC9QS1zGF6A67hVCAbTqNRgQoXmuz1XTXhmsqViKt/Jqq5zGHMBFdG2PYPyHspc7ot99MQk21Cpv4BuPrOr3L5zz6nhKXsFVcSkuc6Jn9bmatcvTh7OKYduS0cV7gMkca4K3t9AAB+matb4y6yMU/UCV6NKyNeja1/uHDhWPYwha1cOe55husU8OrZ721sOfaqZCVJ1glMOAMu/61HXtvE842ZIBvJ5HkD+dtskD2czXM3btvFte1Q58ok0wfnkc+kbL3FNK8NeakFJbZPgAtGIdufJuLqBL+UvWx5FbI8jhAd1wDMbgNUOBtmL8umTj4Ak8LR7M+kJiHbFmS7yYyVqHAIsz9f2gLXJrQhWSf7ZH/TOHF/Y/oWW5hmxImIiIiIiIiIiIiIiIgsgJaaESciIiLz5yUJ9t20CQevfw4HrV8Pawx+dPSxWLvTToudNBERERERERERkbaigTgREZE21lsu49Bnn8Mh69bhsGe2/fug9etQiKYunfH7112L3/nAn+DRXXZdpJSKiIiIiIiIiIi0Hw3EiYiItAGTJNhn8xYc8ty2wbZDn3sOhz73HHYfGp5T+J5aFX//o8vw9gvfT6/BLyIiIiIiIiIiIlNpIE5ERKTFnf7gQ/i7H/0YB2zcOK+/s/qJx3HGww/hmkMObVDKREREREREREREOpu32AkQERERkrV4/6+uwle+9OV5D8I976+u+Cm8JGnI3xIREREREREREel0LTUjLqjYzGH8WvZ4TJI9HgAIylzHZW4rkUgA4TNbMoep77mciisucEVlZL9c5jBeRC6JVuGCeXUuvoCIz4upqJAf4cpW9yPZywgATBycvZwEFS4fPa74w6tnv05rvWTZIpfp66py583miOvNcvWWqdS5cPVo9i/tiKxboz7uwlnZM06FK/jZ82Ssmr2uAwCQ9Y9fNuiqVvEvP/g2XnXfPdTfGC524anB5TjquaenHD94/Xq86be34werTgAAGHJMLurmwsWF7HlS2MKVrTjH5b/1qWDwmMvNcL9tvJqnwj0+uiJzmO6wSsV16rLHqXCDwQQV7rHSysxhtlS5grx+opcKV61nr/+tz5URsMHIcN4E8f4fedtOQi4cXd91ZQ/D1j858sQx7Wuy+oEh27txnntHNDbZw/lkG40tkzYkbhxku8kGXD6aiMsTppwwz+gAf/9ly2RhKPuPK2wl2sgA/AoXztSz/zjrcwXZxGSlwGJuG2w5Trj8T7qzt7cMe43WuDQi4i4Ar5Q9L63PXaSmyj2TWp/LS6ZN7tXI/g7up8GLsl9vbF9mOMJ1pnklrjJn+juifu7Zxi+RdfJE9ucbto60m7dS4XIxd21HKwcyhzExWbcS9ygAsHmiMc/W/2Q420d2eBB9aaZCNpwMd93Q91LifmMDsnHXolpqIE5ERESAPbduwee++VUcsmHdrN+NjcHaFTvhoV12xSMrdsdDK3fDQ7vshg29/QCAi7/6XzjmmbVTwvzp1Vfg8sNXoRKSA4wiIiIiIiIiIiICQANxIiIiLeXUBx/BZ7/4LQyUy87PH9xlN9y6z754aJfd8NCuu+HRnXdBJbdtQM2vpt/k/MTZr8UlX/uvKcd2GRvBO26+Dl889azG/wAREREREREREZEOooE4ERGRVmAtLvzVtfjIZZfDn2ZtuC+e9lJ86uWvRpxhWZg799oXvzjkSLzioXunHL/whqvw3WNOxEiuZ17JFhERERERERER6WTcgsoiIiLSNIVaDf/5lUvw0R/8zDkIVw5DfPCCt+PfznltpkG45336zNcg2mGPnd5qBe+/7pd0mkVEREREREREREQDcSIiIkvaHlu24gef+Bxed9tdzs+fHlyGCy78E/zsqKPpONas2BnfPWZ16vhbb70Rewxtof+uiIiIiIiIiIhIp9NAnIiIyBJ18kOP4Sf//J84/JnnnJ/fuN8BOP/9f4YHd9t93nF99oxXYCLMTTmWS2J88JrL5/23RUREREREREREOlVL7REXlJPMYerd2ZfoSnyTOQwAeHX3nj2zCYZLVLhkeCR7XMU8FReWk3sEmdzs39mBJYeHLVuaIy5YzGQlV0QQF7gymfQWqHAmyZ5Qk/3ynFe4Wk/2glJZxuWjX6GCobiFK5Qmyp4pHsKmxQUANuzKHoasW0GWkcSS1w1RCXnkT4M/zbVmLd591fX46PcuR5C4M+DLp56BT7zyNdmWopzh0t7c3YevnvRS/PG1v5hy/DUP3Imvrj4D9+2219zjAX3aENSyh4mz32oAACbmTlxQ5Spzr5o9PjNdGZnFsiLXthgsZA/HXmu/2XIQFS7ncTfuLZXuzGGGS0UqrrEJ7v4bT2Svy4PR7G1dAPCJ8gjw90S/mj1MOEE2nEgBd9lQ/Br53FDlatfcSPbrJhivU3Fhmn1MZxWTdWst+29LcuSDA9uWIdiAfCgyXBqtz8VnmPxns5+oRwCAvG1Q6YyI/gcACCrscwMRH3mNerWYCseUEQAw1ezhbJFsFCbkdUNcp0meO9f+CHfeTEQ0rgHAy162bJ4r/wm4dhPLr2TPy6BC9hOS2c/0yXjkMwpb/8PjwkW92TvTKsu4azsocGWy+MRE5jAm4urIaHSUCgcyXEBsZWELXP6zbRlTJdugBDqN5L3N5oi+u4Arx7SY7M0h2heGjatFaUaciIjIEpKv1fHvX70Uf3/pT52DcJUwwId/5234l1efR+0HN5Ovrn4pNnX3po7/5VU/4TtWRUREREREREREOpgG4kRERJaI4x9dg8v+9XM4/6Y7nJ8/O9iP8//qj/Djo49dkPgn8gX89xmvTB0/8cnHcfrjDy5InCIiIiIiIiIiIu1MA3EiIiKL7OBn1uEr//VVfO8Tn8fhT7v3g/vtQfvhtR/9U9y39/z3g5vJ91ediDXLdkod/4urfgpvmmUyRURERERERERExE0DcSIiIotkj81b8e9f+Q6u/L+fwVn3PjTt975y5il4+wffiy195H6dGUS+j0+d9ZrU8YM2rcfr77ltweMXERERERERERFpJ+Q2ySIiIsIaGCnh3d+5AW/62R3Iz7CpcyUI8NG3vwHfO/m4JqYO+NXBR+KOPfbBMc+snXL8T6+5ApcfvgqVkNusWUREREREREREpNNoRpyIiEiTFMs1vPeS6/Gj93wOv/ujW2cchHtot13wO3/1vqYPwgEAjMEnzn5t6vAuYyN45y3XNj89IiIiIiIiIiIiLaqlZsTF+ezjhgnxC6O8yR4IgBf5VLh8yIXz+vsyh6kv55Y1i7q5ouJXs4cJJrj8r+YtFa7eN31H+ExMnP28JZuoqGA9Lk9qy4tcuJ7svy0OuTSamDtvlniNgKkPAMCQk38SMk+8KPs+XN5omYoLAVf/2CD7CTDlGhWXV+PeGdk00U2F687VM4epzVL/B/UYb/rFHfjDS6/DiuGJGb/7zLIBfOq8V+KHJx6NxPMA1yVCbtXmZzgF96zcF7886Ei8/JF7pxz/gxt+je8fsRrDXTPfTzzudMOvZK8TmPoAAMBdooDl6q2EuE+F+YiKqzskbsAAVuRmLp8uCZmRz5X6qXDPjnPhIuK+HVu2kJAMUf4D8j6avaqbjI/Lk5gIZsj8N9xlA5NweekReRlUuYo8N8z9uIQ4b7V+rgHkRWw+cnli89mvbWu4suWPcXUrfCI+9l6T4xq8TNsOAFDJHsQjr1HLNVtR6+LCMQ0FL+Ly0a9y5y0cyV4B+VXu+Rfkc5shnm0AAMQzsKmRhSvm0ugRdUmS5861qbD1D/m8x/RTkafaVMkHB7KeNEw62a2yuSQiHM8eYWEzUSGDv26SYsjFR+RJOMalkW2T1Fdm728NyDaCv3JnKhwGsqcRAOKefOYwdD1Otrcs0W4y7L2NbFtQdSRA1SWWbNuZOpknJNtVaGp8railBuJERERaiUkszrn+fvzJt36DPdcPzfjdrd1d+K9Xn4VvnnESauHSuD3/x+mvwcsevR+BfbG12Fur4H03/hL/cvYbFjFlIiIiIiIiIiIirWFp9PSJiIi0maMefgYf/fwVOOyJ9TN+r5QL8aWzT8cXXnE6xorcLNaFsnb5zvjeS1bjLXfdOOX4W+68Ed887nQ8M7B8kVImIiIiIiIiIiLSGrRHnIiISIPt+8xmfPVvvz7jIFzd93DRS0/G6f/0EXzqda9ccoNwz/ufU16BUjh1ebIwifFn116+SCkSERERERERERFpHRqIExERabA3X3E7ctH063FfftrheN1n34+PvfX12NTf28SUZbe5pw9fOeFlqeOvfvBOHLHuqUVIkYiIiIiIiIiISOvQQJyIiEiDnXb7Y87jN6zaD2/+9/fir//ijXh612VNThXvohNeis3d6QHDD//mJ/Qm6SIiIiIiIiIiIp1AA3EiIiINtNdzW7D3uq1TjsWewR/84+/iff/4u3hw/10XKWW8Ui6Pz57yytTxE556HKc98eAipEhERERERERERKQ1BIudgCzqRZM5TK03e5i4kD0MAERdPhWu2j9IhbMme7jaAPfbLBcMXj17GL/KxeVXyN8WcePRJmLKFhUVkhI348R6XJ6YJHt8fo1LoxdRwagySZctMlwccvmfhNnLpBdw9Y8NuPJvvezhjM/FlXRPv8TjTPbuH6LCDebKmcM84S1/4b9feW96YOqBQ3fFI6fsgmUoTTk+0ctVClGFPG/EXd8k2/797dNOwNvvuAb7b9405fMPXftTXL3qYCQ7lAm2Tk6Y64aclMfkBwDUernrjUlnVOfimqjnqXAbvZ7MYRKykfDsWD8VLoq58h8G2esSsvkDm5AhmeJPlmNDXjdxgbzf17P/ODNORUWfuDhPBiRmBic+2W5l23bEaWPagwAQDleocHSbhAjn1bm2hUkSLlypljlM0sO1ETyijQYACVm2bJ64T5H1D/vcwMbnZ28SIixxZcSvkOEmiAduklfjTkDSzbVJEBF5wtYj7A2fwN5/k/4uKpxH1D8sQ9atNJ9rJydE2ynJkSfOcIWrtHP231bv7qbi6l0zQYVj79v+WPZ2QrCZq3/ifm4f9rgrzB6mh6vrwiLZUVjj6n+vRJRJsk2YdJH1P8FUyI67kHyYIttbqGcvyxbZyyMAYIbtUmZEtnfB9PmR5bhVaUaciIhIA62++YnUsZtO2HcRUtJYke/jU698Ter4QRvX46wH71+EFImIiIiIiIiIiGxjjCkYY24xxtxtjLnfGPOPk8f3NcbcbIx5zBjzHWNMbvJ4fvL/H5v8fJ+FSpsG4kRERBqkUK5j1T3PpI7f3AYDcQDwy8OOwO177ZM6/rs339D8xIiIiIiIiIiIiLyoCuBMa+1LAKwCcI4xZjWAfwXwaWvtAQCGALxn8vvvATA0efzTk99bEBqIExERaZBj7noKuR2WZNmyrBuPHrDzIqWowYzBf5/1itThUx57FPtu2rgICRIREREREREREQHsNs9vchBO/mMBnAnge5PHLwLw+sn/ft3k/2Py87OMIdcVnoUG4kRERBpk9c1rUsduPn4fem+ApeiG/Q/EmuUrUsffestvFyE1IiIiIiIiIiIi2xhjfGPMXQA2AvglgMcBDFtrn9+k7xkAu0/+9+4AngaAyc9HACxfiHRpIE5ERKQRrMUJt6YH4m46cb9FSMzCsZ6Hi088OXX8/NtvQbFGbpAsIiIiIiIiIiIyu8AYc9t2/1y4/YfW2thauwrAHgBOAHDIYiRyR8FiJyAL62WfUWCD7GGSXOYgAIBaHxcO4GZK+DUikKWigiHDUXFFs3+nodg8iWf/zo6SkIur3sWVkSRo3iVeGE6ocOEYkZEsQ54AUlDmCpeJiLyMyHwkZ2oZmz2+pMBVrv4wV443l3uocIzAS7D32i3YdcPolOOxZ3DXcXsi8KY5p2Tlan2ybCWNmZn3g2OPx4d+eQWK9foLx/oqFZx795347vGr5/W3LfOKUItMOEzC7OctGePqrfU9vVS4apz9eivVuTSWa1y4aoULlxDlP6lz76z5ea5ONmH2+p9pjwDktQYgGOcuuKCcPYxXJ+tI8t4WTnDx5Uezn7egxJ24JMedOJNk/22WrFvjbu5+zzy3AYBXzZ6XzHMlACRFrv7xLJH/Oa79k4RkGalzZdKrZQ8XVNg2MvncTD5f+kQdFJS5ZyKvxoUzRNmKerhrNBrIU+HY522vTtStWye4yOpcIbE9xexhfPL5K+Yykn0G86r12b/UKIHfvLjAtZ08sv5h22kxcbnRaSSuNYDvJ7Qhcb497t5GxQXueqPbTTv1cwGJ+n9bhNnDsW0EGnG+k94uLqqJChXOsHVkQjzvkfcoliXvG6ac/cVsm6P7aSNr7XGz/n1rh40xVwM4CcCAMSaYnPW2B4BnJ7/2LIA9ATxjjAkA9APYwiZsJpoRJyIi0gAnOJalvPfI3THRQ3ZaLGGjxS789KijU8ffftMN/AOBiIiIiIiIiIgIyRizkzFmYPK/iwBeDuBBAFcDeNPk194F4EeT//3jyf/H5Oe/tnZhOrY0ECciItIAJ968NnXslhP3aXo6muWbq09JHTts3XNY9fRTi5AaERERERERERHpcLsCuNoYcw+AWwH80lr7UwB/DeBDxpjHsG0PuC9Pfv/LAJZPHv8QgI8sVMJaamlKERGRpahroooj7302dfyWE/ZdhNQ0xwO774G79twrNfD2uzfdgLv22nuRUiUiIiIiIiIiIp3IWnsPgNQSTtbaJ7Btv7gdj1cA/E4TkqYZcSIiIvN1zO1PIYinrvW9cacerNl3+SKlqDm+dWJ6Vtyr770Ly8bHFyE1IiIiIiIiIiIiS48G4kRERObphFvWpo7dcuK+gCF3bG4Rlx/5Emztmropci6O8abbb1mkFImIiIiIiIiIiCwtGogTERGZD2tx4s1rUodvPrF9l6V8Xi0M8b3jTkwdf8stv4WXJI4QIiIiIiIiIiIinWVB94gzxhwM4DvbHdoPwMcAfH3y+D4A1gJ4s7V2aPY/mD0N1YHsYeK8zR4IQJKngiEJufj8cvYMyY1yszO4FAJR1+zfScXlc3HF3WSnLxnMEOFisoxQhR9AHHGxBRPZz7hX50pJrY874XEue56wafTrVDD4Na5wGSaZXnNnXtkccfsgZ4f5VSoYNo70UOGGSsVM3z94zXqs2DIx5VjN9/G9nY5B6amZL3ozxt2GwxHuPZqglP0chBMzf37Zfifjvdf+Bt52d4o9h7birDsfxLX7Hp45PpNkvwDYMlIY4q5Rv8qFqywPM4epB2S95XFpHK1kv1GNl7ibW5I0930wGxHxxWTdasn2VjX7PdFEXFweGc6STw8JES4kV7kNKlz5L2yNqXBJmD0v6z1c+6fexV03TN3KtHUBIKhwhSQocfmfKxMNXrLdlPjkBUC0gWxA1pHkg5sNyTY5k06yamXLJJsnCZEllUEuH8vLybJlC5mDGMtlSFjiwpmYjG88e53gM88oAJDP3kYDAMs837DlmHzJjb1uEiZPfLJNUqpR4RBzPy4hflqc48qxR1blHnFrY+71AJB0ceXfH6tQ4SxRtpi2FgBUB3NUuHp3855TvIg7b7kxrt2U3zDLA76DqXAP3IYrIrAF4vmSbTbluPJv6mSHK3EPtgHZUU5XQFw4WyTOG1mPt6oFrVmstQ9ba1dZa1cBOBZACcBlAD4C4Cpr7YEArpr8fxERkZZz2u2PpY7dfMi+KDGNxxb0bP9yXL/PIanjb77vhkVIjYiIiIiIiIiIyNLSzFeRzwLwuLX2SQCvA3DR5PGLALy+iekQERFpmNNvfzR17OqjDl6ElCye7xx1SurYKU8+jD1GNi9CakRERERERERERJaOZg7EvQXAJZP/vdJau27yv9cDWNnEdIiIiDRE33gZqx5+JnX8N0cetAipWTw37H0InulbNuWYB4s33ffbRUqRiIiIiIiIiIjI0tCUgThjTA7AeQC+u+Nn1lqLaVZtN8ZcaIy5zRhzm7WdtWaoiIgsfSff9QT8Hdbif2qnQTy+606LlKLFkXgevnvkyanjb3jgZuQjcs8HERERERERERGRNtCsGXGvAnCHtXbD5P9vMMbsCgCT/97oCmSt/YK19jhr7XHGNHPynoiIyOxc+8NdfdTBALNxe4u77PATUPWDKccGqiW84tG7FylFIiIiIiIiIiIii69Zo1tvxYvLUgLAjwG8a/K/3wXgR01Kh4iISEOYxOLUO6YZiOtAw8Ue/OLAVanjF9x3Q/MTIyIiIiIiIiIiskQEs39lfowx3QBeDuAPtzv8LwAuNca8B8CTAN4829+xBqj1ZJ9hEOedq17OKAkzBwGwLY0ME5PxEcOo9e7s+QEAJuF+nCGii4pcGm3Y3OVLo57sJ8BEZFxdZP6TZSsXZo8v6uaqE/a6ibqyh/ErXFxBmSuTbBUbTGQvKM2eM2wq2dNoQ5+KK8lRweCRhcvOMdxhj6/DipGJKccqYYBbXrI3/PzcLr64xp25ej8VDFF39jyp9809zNfOPBmvfei2KceO3PAUDhl+Gvfvsuec/kY4nil58xIVuDISFbiyHHdlr0u8AnfjyAXcDcAQN+5x5Lm4qFBAErPXNhHIJ9sk7G2jnv23+ZXmthGSkMwTYqZwYSsVFTy6vcXVyXGOqFvJth3dJiTa8n6VfG4gyz9blr1a9sLMlEcAQEi2uIj4rMelMclx9yhWEmRP59heXD7W+ppcJktE/wPZtjDkoyxTl4cTs3/HpdrLdnhwwerdxPN2UqTiCjeVqHAeccNnn4nYFTcs2W5NiHR6EVeQkyLXCZfkuOftuJg9L5M899vYupzp72PvbUmOq5M9siwzj+lxkey+Zqst4r6RNPf2i8Qn25IDhcxhAvLhxtTJBw4CXbd6XPlPyCah9bOn0yRc/eMNk50rVW57EVvI3i9gi2SHX4ta8L5ba+2EtXa5tXZku2NbrLVnWWsPtNaeba0lH7NFREQWx+m3P5o6dtPh+6GS76yGxPbu3nMv3Lf7HqnjF9ylWXEiIiIiIiIiItKZtPGaiIgI4fQ70gNxv3nJgYuQkiXEGHxr9cmpw69+6A70lcnXr0VERERERERaTCGq4k2P3IC3PHQtemvczFsRaR8aiBMREclocGQCRz76bOr4NasOWoTULC0/ecnRGClMXRaoEEV4/f23LlKKRERERERERJonH9Vx0ZX/gb+87TL8+R0/xiU/+yT6K3o5VaSTaSBOREQko1PuehzeDsukP7HrCjy5y/LFSdASUsnl8P3jjk8dv+CuG2Fsc/fyFBEREREREWm2k597EPuMbnrh/3cqj+KctXcsYopEZLFpIE5ERCSj0+94LHXsN5oN94KLT0wvT7n38Gac9GR6OU8RERERERGRdrLX2KbUsf1G1i9CSkRkqdBAnIiISAZenOBUDcTNaO1OO+HGvdP58ZY7r1+E1IiIiIiIiIg0TxhHqWP92idOpKMFi52ATJo0bGjIcHb2rzQ0QjadDC99/5iTqJg9V6LldSouLx9z4XzuzNXj7GcgGPOpuJIebjk3r8aVEpNkD2fJ69OQK9XVe7OfN6/GxRVONPNqAwpbspcTr5Cj4vKq3PWGKPv1ZmLuGo27uELSW6xS4YyZuWwd9dgzGBgvTzlWyoX47T4HIC5nu636vVz+d6/kflvgZz8HtYhrKnz1lSfh5C88MuXYGU88gP6+zXh22eC04Qrrs5f/kHyeqfeQdSRXlMG0FJIqd9+YqJJ1wizl3yUMuQxJEu7+GxvuhpPUs+elGSfv23ny5kb8tCTH5aNfJe9tIReMYckk+lWybdfNRVgZzH7i6j1UVEjI/GfqraDM5Uec58Il3OWGsJTPHihmrxvyBmCzx2fIOtJ6XP6zbXkbZA/oV7i40McFi/NcXiZEEyguzv4dl4B83siNZQ/D/K75oJ8Tiedtls2TFRBRlzDXzHzCmYhrkwRbs+9lZUOucNkCWSjJ+o65BhKyHjFk0YoK2c+3V2fbP1z+mzrZT0WUyaiLy8jKABcuZh6ldiiOgZ/uSO2rT6Tam7VerhwX2b5ky1wA3M3NxFwZsX728m8DLkP8Mtfh7ZFtSRNl7wNKitwDQLyMazh5NXYQgGgnE23kVqYZcSIiIhmcetvjqWPXH3YAauSDZ7v69eGH4tnBgSnHfGvx1utvWpwEiYiIiIiIiDRB3jEjbqCSfYBbRNqHBuJEREQyOMUxEPebIw9ehJQsbbHv4+JTVqeOX/DbW5Crk29YiYiIiIiIiCxxucixNGVVS1OKdDINxImIiMzR8qFxHPZ4eoNlDcS5fefkE1Dzpy4HstPYOM65695FSpGIiIiIiIjIwsrF6SUIByoTHbcUn4i8SANxIiIic3Ty7U+kjj221wo8u2L6Pc862ea+Xlyx6sjU8Xdcd+MipEZERERERERk4bmWpgxsgp4au1GqiLQ6DcSJiIjM0am3PZY6dv1xByxCSlrHN04/OXXs+CfW4tBnnluE1IiIiIiIiIgsLNeMOAAY1D5xIh1LA3EiIiJzEEQxTrpzTer49cftvwipaR237bcPHtxtl9Txt1/320VIjYiIiIiIiMjCcs2IA4D+qgbiRDpVsNgJWJISLhg7qmkTQ4Uz7pcrZuTVubjYPAERnZePqajCvPsmN5skJs8cESzuITOym/ttEbn0dNTrz/6lHYTD2cMAQDDBlUnLnDay+HvEtQYAfpULV1mWvWr2ohwVl7eZK1vxQFfmMCbmyr9X5q7RUoXLE2vdBeX4B9aid2LqSR0v5nHDPgcAI1wak5ALx64qH/rMOeDKyHj3i3X5N85ejf/39R9O+fzcO+7CR999Hqw3NQ+sn70uiQpUErl6BIBf48JRJ26a8jibhGxbxESmRBFZ/mPuvpHUyRNHtIFsjrzayPuNIdJouEuUrkgM225l25JUZFywaj9XtiorsoeJurkMScgyyZy3eo3LSLbdFBXIe6KX/SYQlrj892tc/ueGst84/DKXkV6dq1ujbq5rIM5nLydRkYqKRj8DEyyX/TBs444oymwa2WebsMT9uHAse79AbsM4FRcirg/CFrM/b5g6Fxe7r1RSCLnoguzPe16Fa5R4bH1Hhgsmsv82f4K7R7HXdkKctjjPxRUXuDqyNsg9b5ske6aUl3P3KPZ+wz4nbi9MphmIq09MOb/W4/K/2sslMg6zxxd0cXH5VbK9S6TRL5P9TTWy/emTz0TELcBUufuGSZr5AAaAiM/bMroACVm6NCNORERkDs6485HUsRuO3B9RQPZmdJDLTjoalWDqw1N/uYL+UnmRUiQiIiIiIiKyMHLTzIgb0NKUIh1LA3EiIiJzcMadj6aO/eaYAxchJa2nVMhjtDv9OmKOfSNYREREREREZInKT7NHnAbiRDqXBuJERERmscvmERzy1IbU8WtXaSBurqphejmRfJ1cv0xERERERERkicpF082IKzU5JSKyVGggTkREZBan35WeDffAPrtg47K+RUhNa3INxBXq7OZWIiIiIiIiIktTXktTisgONBAnIiIyC+eylEcftAgpaV2VML3juGbEiYiIiIiISLvJTbc0ZVkDcSKdKv16+hJW78keJi7azGGSdF/hnBh2qxtyONT62cPEBS4uj5y0wKTRkPlhDBeuu6tKhSv7SeYw1dE8FZcx2csxACBiMzN7kLjApZG9bpJc9vhMzBUSNo30aStkP2+1PrLiQhcVytSzl/8kz91y/BoVDEnMlf/+3vKU/w/rEU657/HU9247eW/0dVcAALU8N6DUX6xQ4UKfK5T1OHulHHjZzzWA1AVQzTlmxEX11PeiLuLaJpNI87i6xK9mD1evc3FFEXEDBuB5ZMVFiGvkPYq8txkiPva+Yck2iUdUJbkxLrKYbe+S4+f5oexhujdydV1+M9e2GzqomwoXdWevhJg2MgB4ZJ1gicuGaWuxcQFAXCTbaTZ7OuNc8/IRAIqV7A9TJuJubibhzptHtO0AwBDPiX6VaxPWyYUI2LIcTGQvJybiyhZT/wOAFxHlv0DepIhrDQC6NnB1eVjKXriSPHdzMx53cduAqVypqGB9sv0TcxFaor2b5MibGxku7uLOd9/TRJ1M3rgt2VFVXUbE5XNxdW2kgiG/hWtvbT4qex9EbYBsI5B9mUwf6I71+HQz4vqrE1OvL/Lxy5I9+pVlRF1C3jbCcbbDO3sQv8jW49yPC0IuvnCoPPuXdpAU2f4+jj/K9VNRHfNk26JVaUaciIjIDFbd/zS6y1NHA0d6Crjv4N0XKUWtyb1HnJamFBERERERkfaSi9xvWAxqaUqRjqWBOBERkRmcelt6NtxNx+yLmHwrtVM5B+JqGogTERERERGRNmItcrF7NrCWphTpXOpFFBERmcEpt6YH4m447oBFSElrqzr3iNNAnIiIiIiIiLSPIInhTbO+Yn+l1HHL8YnINhqIExERmcYuG0ew7zNbUsdvPHa/RUhNa3MvTUluiCIiIiIiIiKyBE23PxwABDZBb43cg0tEWpoG4kRERKax97NbU8ce3H8XDA10L0JqWpv2iBMREREREZF2l59mf7jnDZTHm5QSEVlKNBAnIiIyjUIl3YDesKJ3EVLS+jQQJyIiIiIiIu0uN8OMOAAYqGifOJFOlO4VW6oMkKS3l5lTuMxByKV6rc+Fg89FaOLsP86vUlEBCRksl/23xRU2Izm1CncZJFUinRE39m3JcEwZAQBTzR7Oq3NxsQxZJhlJjgtXJydNFbdkv25MzNUj1nDnzUuyx+fVuGUIvWqRCpd4XJ70FV5cJmIA5dTntmimfGc+6jFX3/Xn0ulaKFsrjZn9N9eBOI8oJl5E1j9sMHL8MC5kL5N+mav/a2NcxWVy2StXL+AqZJ+ICwBiy5042+3eMH3GMOy9jQxmicZkxFWRVHkEADL7qes0zpPn2ufC1fq5PIl7s5ctUyPbdhUyHLEXiSVf2WTa/wAQdXPhKsuyJzQ3wsWVH81+rgEg6s1eJ7NttHCsRoUD0bYDgPKuhcxhagNcXEnIhfOIZxsA8GrE8zaZ/WzbgrnfBGUyH8kVxdn+lXAT0VFNXjdJkelsApIw+33bxOSDrEeW43GuUJqYuLdVuUJiC1y71R/hnolM1JM5TL4ne10HAAnZ6xr1EOeb7stk27vk9ZbLHo5tk/hk3WqY2/12Pysfz3wtDFYmXvw+ed488n5jiX4Suk1Iln+mv6/WR5ZHRz/FXHRF3IkLguyZaepc+5Pty6f52X9bMtjHxfUMF2yxaUaciIjINHK1dMu9RjbUOl0lTHdwaEaciIiIiIiItJPZZsT1V0pNSomILCUaiBMREZlGrpZ+86iWa+6s3XbhnhFHvlotIiIiIiIisgTNNhA3WNbSlCKdSANxIiIi03DNiKvmNSOOoT3iREREREREpN3lotn2iBtvUkpEZCnRQJyIiMg0nEtT5jQQx9BAnIiIiIiIiLS72faI09KUIp1JA3EiIiLTcA/EaWlKRlV7xImIiIiIiEiby2tpShFx0ECciIjINNx7xGlGHEN7xImIiIiIiEi7y0UzP+cOVDQQJ9KJWqo3MSrazGHiYpI9IpM9CACYhAtoAyKNABJiHNXE5I8jJUwJq3Ljw4mfvXwAgOWCAXUinQEXmcmlBwPmxJJl0hInrs5eOFwwS+SlJbORzUevxkUXFbLH50XcdROOcTOSrJ89PiYMALqMJDEX30Qt98J/e+V0/Tzm56d8BwAKAZeP+/RtocKFhrtvPDUxmDmMMVy9FeSnXnBRMX0+Ckk99b24i7i2yWvNkBPywipXKJMwe7g4T943Qq6MBLnsmRJH3CzRpEQ2Q8n2FpiyTLYtDHm/T4jfFnVz+REXuDLCtnfjYvZwlX6uHrdegQxHBaPbCQzDtmWIm6kh8yPOceU/LpDh8tl/W72HO2dl6uEGyBP1f34zeXNLuGvbq3Dhat3ZC0q9h3xuC8k6mXwGptI5vvSfG8JxMh+5IoIkIJ9Jw+ztC1PmMtJjrxuPqCjJuJj8AAAEZP8KEZ9nuHNtylUqnC2VuXC79mUOE+fI30b2N/mV7GHi3OzfcYZLL1wyJ9Yj24T57GHqveR9w+fSyOT/9k0EP5z5maq/OoFa77b/ZstIQrR/ACDimsmUgLtEqTa5T95HbZUsW2T5T4gXu/0Jro5kJXmuUjDM/Y28b7QqzYgTERGZhmbENY72iBMREREREZF2l59tRpyWphTpSBqIExERmUbetUecY0BJZld1DGC68ldERERERESkVeWimZ9zB8olGEtOMxaRlqWBOBERkWnkHDO2XANKMrtKLr28QUF7xImIiIiIiEgbycUzD8QFNkFvhVj/UkRamgbiREREppGvOgbi8hqIYziXptSMOBEREREREWkj+VlmxAHAoJanFOk4GogTERGZRk5LUzaM9ogTERERERGRdjfbHnGA9okT6UQaiBMREZmGa6ColvMXISWtzzWAqYE4ERERERERaSezLU0JAIMlDcSJdJqWeq0/7rKZw5ju5nXy2RrZOetl/10AYJF9Y8+oh4vLL5FjtqZ556zQXaPCRXXuvNUr2cOZfEzFFeS4PInJ32ZjkzlMUM4eBgCSgCuTSIj4uCQiIWvKqJuL0B/Knie1bu4a9avcjwsmspdlY7lzHRe5cAlRjgFgeLz4wn/75XTcW6LuKd8BgGKe2+vs/uouVLiVPeNUuO4gez05Vs9TccX1qWWyZHKp7+RqUep7QTX7eQvHuHNtyP2xiVsbAMAnblO1JrfUmPtGQt5r2DqZ5ZWaN4ieFLjCZYh7G1uOLXv/5ZoyiHPZ44uLXCGJq+R9g0gjAJhi9naazXH37SQi6zvivCXprT3nxIZcobQ+99uslz0v/Rp5rsm2TFQgzveK9H1zLoJxrq7zK+TFzTTJyXoLZPn3yC1pDdGWZH9bVJz9Oy4J0UwzlsvH3Abux3l1LlzcQ/y4bu66gWHr1uy/LS5wjTu/wvUJmDp3bduQaBOS9bhh9+Du66KC1Xuzx1dnn+3r3H0jN5o9nEfG1fMc15fGlH8AiLqzh2H7BOj7DVFPbt+OzGH2G09fNL7td5F1csI+2hBNEr/KRcX2pTFtC7b8s2KmbQeg3pe9gc22P/0x7sRZ8hkgLmS/B3vkPapVaUaciIjINPK1dAO6kiNbJR1OS1OKiIiIiIhIu5vL0pSD5VITUiIiS4kG4kRERKbhGiiqsm9sdrhqmB7A1ECciIiIiIiItJPcHJ5zBye0NKVIp9FAnIiIyDScA3GOmV0yO82IExERERERkXaXj7RHnIikaSBORERkGvmaBuIaJfI9xDvsvxEkCfy4s9YEFxERERERkfaVizUQJyJpGogTERGZhnMgTktTcozRrDgRERERERFpa3PZI26gpD3iRDqNBuJEWlS+Vseha9dhcFRv0YgsFO0R11gaiBMREREREZF2pqUpRcSldXoTDZAUkszBwlz2Ja+CkFsmK8pz45q+b6lwcWxm/9KOYeo+FVfSkz0uALBEGnMFrlPWGC4fwYbLZS+PNiLHvnMArMVBT2/EaXc/hlPveQzHP/gkCvUIsTG45ugDccnZx+Oaow9E4r0YhyHLFhMq6uLiMs1clY481UmeC2dGyfhCIq7sxXEyMi6YVyfKP1eNIDfMBZxYzt3iavG2a8iPYwTJ1N8ZeR7K1WIqTL3GxTXQzzW+R6oFKlw9zn4PiGKu3rLVdFzVMARQnnIsN57A5l78rl/Nfr79WuYg88KWZUPc3pgwAJ9GmxABq+S9jUxjM3l1LpEmIduE5ezxBSUyjWSb0DBlBEA4lj1McRN3k8oPcxeOXyVuwADqNSIvA+63JT1kw4mJjn1lk21bk2XLEumsd3FxRQUunFfPnidBmbtGi+R9w4vIZ9Jc9jBJgXxGCchwjjbJXCREfJaLCl5Eln8iWEL2AsV58rrp5jLFekSbsEo+3LBtEuaEN7n9Y32yLW+yJ9SG3Ln2HauQLGR81b7seVLvo6JCTLYlw/Hs9Q/7TFRaSVTk4J83EqKfKslz13ZENmbqPcT9Zrv2T2jnMCOuPI7aQELXCX6Z+21JSLbTCLkR9pkoe5iJ3cg2GtluSkLut+WHmVDcNcqFApA0r4zEhdYZmmqEzvq1Ii1mxcgYTn3gMZzx0MM49Z7HsPPweOo7vrU4845HcOYdj+C55f249Mxj8N0zj8WGZWRLUUQAAAXtD9dwzhlxc3hbUERERERERKQVzGXVl8GJEkyS0IPlItJ61KMosoTk6hGOf3QtTrvvUZx+/yM4/Kl1mcLvtmUEH/zu1fjj71+Dq485CBefdQKuO+qAKbPkRGRu8vX0W2yVHDdbQrapBlqaUkRERERERNrXXF429a1Fb6WC0e6uJqRIRJYCDcSJLLJCtYYLrrsNL7vnYax++AkUa7NPYZ9NkCR4+W0P4eW3PYRnVgzg22ceh+++9FhsGuxtQIpFOoNzfzjNiJuXbUtTTqWBOBEREREREWkXrpd6656HcIetL5aNT2ggTqSDqEdRZBHtv24jLvrUV7DnlmEqfM33kYtn3itkj83D+ItLf4U/+/6vcdWxh+Dis47HDUfsD6tZciIz0kBc4zmXpnQ8pIiIiIiIiIi0IteMuI39fdh9aHjKsWUTE1iLnZqUKhFZbOpRFFkkBz+9Ht/65Jew02h637fp1AIftx6yN64/an9c/5ID8MieO+O0ux/DW391G156xyPw7fQbaoZxgnNueQDn3PIAntx5EI/suRKx76Hu+4g9D1HgIfI8RL6Pej33wn9v/+91gwO4+rBDMdTT3YgsEFnSCo7ZqRqImx/3QJxmxImIiIiIiEh7yDkG4tYP9KcG4gYmSk1KkYgsBa3To2gBU80+gydJsoeJoukHM2bieVy4gR6u4vVM9vhGywUqrnI5R4VjSli9whVLr4vL/+5ijQpXJs53d7EKADjksfX4n09cgoGx8qxhHttrBe44fm/ceuzeuPuoPVAtvLi02y4Yx6Mv3wX/5+Xn4n82juI1V9yH11xxL3bePPPg3t4bh7D3xqHM6QeAdSv68M5/+T2s36l/yvHRjT3U3/NKPhUORP4boj4AAEOOEyRkDRsVTOYwuTGu/Edd5MxIL/teaX4lmf1LLtxPo9loW/7nKunZppUwfOHz7ZmteSquoST7uQaAFcvHqHCFIHthHq5z9w1TTl/bFT99LymOx1O+G+ezn/ComDkIACCc4PK/sIkrlNVBIj4uibARd217uZlnWTsViTAAMMZVkjbP1SVJIXs431GO58RyJ86rZw8XzN6UcEpyZOEiq3KPmPxqEu5aMzHZlueahABx3uCT+e+TeZIjTlyT6x9TI9skZDoZlkyi9bIn0szwgt1CiLrI+o7IfxuQv40pxwCiInniiOstZi/tMfK6IbIy6uYSWTZcuFoPV7byI9njK2ylokJulFuhgbm2o26u/VMd5J43PPKeyNxL/TL34Gzzzd2Hm3nerneTbQuyUmDSWN6ZiyvPdQ3Br3J5khBF2ZDPGwnZbgLxnG6KL5Z/18um63bqBdZMPbYsGQX6ufonyjexL63KxRXnmpf/Sci2m9gGLxcsN5q9LOe3VKi4TMS1myz5nOLVsv+2uJscb2hRrTMQJ9Imjnj4WXzuY99G70TV+flQXxE3r9oXNx6zH25atS82rehFX8H93e1t2rkPX3vXyfjG21fjxJufwHk/vQcn3rqGucfOaNfNo/ibL16JP/vbCxr7h0WWGNeSidVAt835cOXfXDayFhEREREREVnqvCRxbiGzaaA3dWxwTDPiRDqJehRFmujo+57Cf/3jpegup1+5fm7nfvzNX74O9x68O/Vm3fNi38ONJx+AG08+ACvWjeG1V9yLc6+8Byu2Tswn6VOcefMjWPXg07jr0D0b9jdFlhr3HnHNfVuz3bjyT0tTioiIiIiISDvIOZ5vK2GALb3pLV6WjTWun05Elj5yjQYRyeqke5/AZ//+O85BuKd2HcR7/uXtuOfQPeY1CLejDSv78aXfOxXnf+t9+Ju/fz1uOm5fZoa304e/+iugyUvmiDRTwTUjTnvEzYt7Rhy3FIeIiIiIiIjIUuJ+oTfAkGMgbmBcM+JEOol6FEWa4PQ7H8VnP/Ft5w35iT2W430ffxs2LU9PU2+U2Pdw3SkH4rpTDsTyLePYf80m5GoRgiiBHycI4sl/T/5/veLBjxOEUYIgjrHn+iG87tf3TPmbqx5+Bmfe/DB+vfqQBUu3yGJyXa+VnGbEzYdrIFMz4kRERERERKQduJ5va2GArT1dqePLNBAn0nDGmD0BfB3ASmzbze8L1trPGGP+AcAfANg0+dW/tdZePhnmbwC8B0AM4E+ttT9fiLRpIE5kgZ19y4P4j3//rnON6Ef22Qnv+6e3YWgg/WbMQtmyvAdblvfM+J2RcmHK/3txgsMeX4cDn9w05fiffePXuOb4gxD7mlwr7We6N9mE51yaUjPiREREREREpA1M148w7BiIG9TSlCILIQLwYWvtHcaYXgC3G2N+OfnZp621n9z+y8aYwwC8BcDhAHYD8CtjzEHW2nRH/jyp91xkAb36hvvwn5+61DkI98D+u+DC//e7TR2EYyW+h0+/86zU8f2e2YI3/Oqu5idIpAk0ENd4zqUpNSNORERERERE2kB+mi0utjr3iNOMOJFGs9aus9beMfnfYwAeBLD7DEFeB+Db1tqqtXYNgMcAnLAQaVvwHkVjzACALwE4AtumA74bwMMAvgNgHwBrAbzZWjs029/yiJfmkzox1mi4fa+MSahwIxNFKlxU9zOHSRJu7DUeI5djI7LSkJuYVSrZ8wMAKmGeCme8mX/cG6+/A5/40vfgO/ZRu3P/PfGuv3oXxpIiMDx7XKN++s2ZuWB3cLOOc3DFwUfg5kP2wYkPrZ1y/P0XX4PvHHscTI4r/wl5voNR4nxzSaQlXNFCOJ79zHkxd7b9KhcutzW91+FCiYvkNZojX16ZLJLFJP0ba3nfWda9XSpUVIP93Btw1nLXzVAp+/2Gjcvm0/lULTgG4mx9yncNcW37NS6NPnfa6L08k1z2MCbm4jIT3D0xqRPxkfW4x/62EvfbbJi9vosLXB1pc+wdOHs7zSPLP3lp078tyWePMAnYa5ur/4MSF1+NKMu2xrXJDXONArDE6gWGLCTsdsTkoxQ8okkSVLhyzDyPsuEsV9Wh3k2+a8vWCUQ4qh0PIOqjggGzPLdNiyiTNiDrSLJuZdoJ1WXcxRZ1kc9tE2y7KXtZtj4XV0ReN8yzFHtvi4n7KABYskrwa8QzaQ93beeHucrVL5Ev9DF5QuZj7HPXdkRcp36FS2R+iLzfk9kfjmWPL85zfZKGrJOZzjRb21b+c+V04EoQYms+vR3NwHiJfpYybFsyyh6fXyHr/zJZ/xOjFWyfQDBOBUNhmLuXhuPZL5wkRz7/Ovpf5sKvcBe3DZfefC9jzD4AjgZwM4BTAPyxMeadAG7DtllzQ9g2SHfTdsGewcwDd7Rm5NBnAFxprT0EwEuwbRTyIwCustYeCOCqyf8XaRtvvfoWfHKaQbhbDt4H7/jI72OsmxuAXTTG4F/eek7q8M7D43j3lTcsQoJEFpb2iGs89x5xWppSREREREREWt90K+sMuZamHC/BJE1+Y1ykPQTGmNu2++fCHb9gjOkB8H0AH7TWjgL4HwD7A1gFYB2ATzUzwcACD8QZY/oBnA7gywBgra1Za4exbcrfRZNfuwjA6xcyHSLN9K5f3oh//tpl8ByDcNcfsT9+7y/fhQlyhs9iu+uAPXH5CUekjv/hT67DslHyNRKRJWq6JSWE5x6I09KUIiIiIiIi0vqcA3FBiHoQYKwwtS/QtxZ9JXKZFpHOFllrj9vuny9s/6ExJsS2QbhvWWt/AADW2g3W2thamwD4Il5cfvJZAHtuF3yPyWMNt9Az4vYFsAnAV40xdxpjvmSM6Qaw0lq7bvI76wGsXOB0iDTFhZdfi3/85k+cn/36JQfjPR96B8oFYm2yJeQTF7wckTe16uitVPEnP756kVIksjDcb7JpRtx8OAfiIg3EiYiIiIiISOub6YXeoZ70PnGD49onTqSRjDEG2yaFPWit/fftju+63dfeAOC+yf/+MYC3GGPyxph9ARwI4JaFSNtCD8QFAI4B8D/W2qMBTGCHZSittRbTrL5rjLnw+SmGVlN1ZYl77xXX4W+/c4Xzs58fcxje9ydvR60NlrVbu8sKXHLmcanjv/vrm7Hnxq2LkCKRhVGoaUZco1WDdB2opSlFRERERESkHUy3NCUAbHUsT7lsnNs3XkSmdQqAdwA40xhz1+Q/rwbwb8aYe40x9wB4GYA/BwBr7f0ALgXwAIArAXzAWsttOj6LhR6IewbAM9bamyf//3vYNjC34flRyMl/b3QFttZ+4fkphsZbehv+iTxvv3Wb8JFLr3R+9pMTj8IHPvA21NqoA/8/33gWJvJTZ/bl4hh/+b2fL1KKRBrPvUdc+1zHi0FLU4qIiIiIiEi7cj3f1oJtz8HDrn3ixjQjTqSRrLXXW2uNtfYoa+2qyX8ut9a+w1p75OTx87ZbrRHW2o9ba/e31h5srXXPsmmABR3dstauB/C0MebgyUNnYdvo4o8BvGvy2LsA/Ggh0yGy0N73s2sQOGZtfv+Uo/Fn77sAUeAvQqoWzub+HnzxNaemjp938z04Ys2CLKMr0nRamrLxKhqIExERERERkTY109KUWx1LUy4b04w4kU7RjFf7/wTAt4wxOQBPAPh9bBsAvNQY8x4ATwJ481z+kF81mSNPxrP/xDjkxifj7MkDAJgct+ym8bOHswmbSOfqobMHi4m8jLg0GnJc2Sbcb7PetnC7bRnCG264M/X5d045Dn/9jvNh6y+my3Rx5zpf4JZu8z0uvnJl9sGGr553Et5+1c1YMTK10fCR716Bt//1uwEzx/NIrjpriWJiC9y5jslxAkNWCgmxjWBMXjdRkQsX5ps3uBznuPMGsrp7frHkfM21yXLgXEw5GuMG6DZV+6hwxiPrZCJcUuGaCv54uk6OonThLlSiKd8NStlPXDieOcg29C2Ry3/mt8UFKip4NfJeWsp+vq3PthG4NOaGybqVuEzJ5g9db3n17L/Nr1JR0QzZlgyIPgYvIk+Az5YtLr5aX/Y2aJInywjxPLQtIBGGbaORr3oGFfK8jWbPy+IW7scFFTZTsgepd5PPpHkuH/1q8+pyQ7atXW2LufDIdrJfJu7bRTIf2feSiJ9WG+BWW0rSEznmJOoly/JI9nDss01hC5fGPFH/JE1eYKMwRJ7vMHteWo+8R8XcdZOQfXce0b3ik/co+nwz/R3ssw2Z/x65cFtATAALR9m+FS4c83yTJNvKY76cbivUvBCm7GGo0JP6bGBrBZjIXlC8Mlf+/Vr2MOyzJf2czhRJsvznRsj6h3zesAETju3c4pg6d3GbSvbK1WPvGy1qwdd7tNbeNbm85FHW2tdba4estVustWdZaw+01p5trdXGUtKyLvzltQh3mA031N2Ff7jgPNg2XlJ1opjHf53/stTxU+9/HKff++gipEiksWZ6k004rj3icpFmxImIiIiIiEjrc66sM/kc7JwRN6GlKUU6RfuOEog0wfLRcbzlultTx7965skoFfKLkKLm+s5Zx2LtLstSxz/ynZ/DOJbqFGkl7j3itDTlfDj3iIu4Gb8iIiIiIiIiS8lML/QOdzv2iBvX0pQinUIDcSLz8O6rrkdxh5vsRD6Hr515yiKlqLmiwMen3nJ26vhhT63D63579yKkSKRx3HvEaUbcfFQD7REnIiIiIiIi7SnvWPHl+efgrd2OGXEaiBPpGBqIEyH1lsp459U3po5/84zVGHG85dKurlh9OO46YPfU8b/43i+Rr2mmi7QuDcQ1XiVMzygsON4YFBEREREREWk17n6Ebc/BQ46lKQe0NKVIx9BAnAjpHdfchL5KdcqxauDjS2eftkgpWiTG4N9+9xWpw3tsHsbbr7p5ERIk0hgFx0CyBuLmp+oYiHO9MSgiIiIiIiLSalxbL2hGnIgAGogToRRqNbznV9eljn/35OOwcaBvEVK0uG45bF/8+iUHp47/8Y+vRt9EeRFSJDJ/2iOu8dxLU2pGnIiIiIiIiLQ+54y4yefgIdcecRMaiBPpFC31ar9XN5nDmCh7GMR+9jAAbD6hwsFYLr4k+2+zETn2SmQjABjinNFqXFx+NXuevPXG27FibOrNMjYGXzr5LAQj019WcZ3L/4kxrvPfkPF5xASVT7z8XLz0nkfg2RfL8+B4GX906fX4t3NfM224/GYujUkue5g4T0WFYIIrW4Wt3LUdTmQPF5S5uPLD3GwkrxpnDmN9Lh/zw1y4JOROuA235WW+ms6bWqkLZshR+JbVqLj6+7llKDyPu99U69nrkqTIDZRVyum3/crWMRAXRUhyL5bfoJI9rohcDTgucOFMzJVJQ5y2JOSu7bgv+zUKAKaWvU72J8i2BffT6DZJQLwXEqWL8ZyE5H2juCF7pnjcqYZJ2BPA8avNu7clAVcmc2NcfAFxDbBzgZl6BAAME6FlHwC4fPTJd7fyI9njyw+R7Z86eQIYhntUtz75LOtx55t5bmDb1jFZ34XjZHxEU9Krkm0EskqOuppYl7OP9mS4uEDcN8hzneS4cHWiDZof5eoRr04+W46y9V32C84QYQDARGTdGpPPwKPZHwKqo1zdyrYlmb40pj6eT7igwt7vs/82v4usSDyyLUm0gZ6/jeZr6QyteyG8usFozr00palZWC9be9Ij32el+snJW03SxPeYfaIfAQD924JKE9uEZFRMPQ4AMGRbxjEIPasye+Jak2bEiWQUxDH+4JrfpI7/ZNXReHr58uYnaIl4eLdd8YPjjk0d/71rr8Muw8PNT5DIPGmPuMarBekH2HwUAba5AwIiIiIiIiIijebaeuH5GXH1IMBYYepAsG8t+jpsMEKkU2kgTiSj8+66A7sPD6WO/++ZZy5CapaWT59zDio7LD1XiCJ88MpfLFKKRHgFx5KJrqUVZe5i30d9hzf9PGsRsq+4i4iIiIiIiCwROecLvS9ODRvqSk+lXablKUU6ggbiRDLwkgTvu/qq1PFfHnY4Htll10VI0dKybnAAF512aur4+bfeigPXr1+EFInwXG+yVULtETdfVUceumYfioiIiIiIiLSSfDTzC71D3enlKQfHNRAn0gk0ECeSwcvvvw/7b9qUOv75M89ahNQsTf9z1pkYLhanHPOtxV//9Gdafk5ahh/HCJKpC3FHnoeY3HdFXuRa3tM16CkiIiIiIiLSSmbb4sI5EKcZcSIdQQNxInNlLf7IMRvuxgMOwF17770ICVqaRru68Lmz0wOTZz7woAbjpGXMtK67zI8rH/OOZUBFREREREREWom7L+HFVWG29qQH4rQ0pUhn0ECcyByd+ugjOOLZZ1LH/0ez4VK+fuopeHZwIHX8D6/+Df7yZ1doME6WPOf+cI6ZXJKdc2lKzYgTERERERGRFudcmnK7voRhxx5xmhEn0hlaqlcx6sreeZ/0Zu/cM2Ey+5ccPI8bXDCGCoa4SiyRVufGXk3EJdISwQw7RkOGs+HcAr7/N79KHbtrnz1x7TH7ASaeW2Rk2aJ/W46LL46znziv/GJ5rPoh/vW1r8F/fv1bqe+9/9e/RuIbfPI15wDGwJIr/fnV7GHYchyybSLyvEV5Iv/JcYwk4PLE5LOfOBNx5TEc4zKy1ksFQ603QViqpY5XciGSgvs3sPX/yHC6ET4XtkzevvNzrKu2Y3y2AnKXLeeMuFr0wveZ+0Z+qLmD+4asymt92X9cOE7etxPy2s5eROBXyIZMk+/3fiV7GPZcs+GYe6Jl4yLr/4S9b6er1VkFZe7mZhKukFQHuest6iaeUXJL/6Ukpj4AAI+c5GzJV0QTYvvW0kpuz1e/zp23cCx7ZvoV7uImm/+IC+z9JnueFLZwcVWWk/UW2WxK8tl/G/u8EZDPG0ydbCrN7RPw6mQ7gSjLljzXfpm7tnNj2RPpkfVIUOYubr/MVcpJLnvhsjnuBASlEhUO5P0+N5y9fVEscuW47HHXWziWPVxQpqKCXyH7MmMuHHOd0m2EJtb/SX7bNZqP0+Wr3OMh7tr2+ZYBx0BcZRy2kK2twPYJWOL5nu23szXymZSo7iKua4XuEwO4TImI9lZhC1eP+45+rTkh++6S3uLsX9pRT4GKC89xwRabZsSJzMGxa9Zg9eNPpI5/9lUv40dS29xPjlmFz5/1UudnH/jlVfjQ5T/XzDhZslxLJWpGXGNojzgRERERERFpR+6+hJmXphycIAejRaSlqFdRZA7ef9WvU8ce2XUlfnnUYYuQmhZhDP71ta+BZy0u/PU1qY//5Be/QuwZfP74Vy1C4kRmNtu67sJz5aP2iBMREREREZFWl5tlv/kh10DcmJamFOkEmhEnMotDnn0OZz7wYOr45855GSy5zEDHMAb/fN65+NJLT3d+/MErf4k/uvbnTU6UyOwKNc2IWyjOGXF1zYgTERERERGR1uZ6tt3+GXioO72G4rJxzYgT6QQaRRCZxft/nZ4N9/SyQfz4+JcsQmpakDH4+Otfiy+fcZrz4z+95uf4w+t+2eREiczM1XiuhJoR1wjOPeK0NKWIiIiIiIi0OPdA3GxLU2pGnEgn0ECcyAz23rQZr77r7tTxL7zsZYh9crfSTmQM/ukN5+Grp5/q/PjPf3MFLrz+V01OlMj0nEtTakZcQ1QdA5pamlJERERERERamrXOvoRa8GL/oXNpSs2IE+kIrdOraICoJ84czC9kD5NEJnMYAEhq3MBM90CZChcS+VGPuTQmCZcnUT17fPWxHBWXP8r9NjvDVXDhb66Gb+2UY5t6e3HpScfDEOXE+lw+skyZG2sPxrOHmykfJ1ODfzr39fAji3feeEPq0w9dfTniwOCLp501p/hM9uKPhCtaNCaNAOATYxJBJaHi8iI7+5cc/Er2GUxemR1sKVKh6n3cb0M+Rh7V1OFqPgDy7pNa7KpRUe0xMEyF27tnKxWOsXZsORXukYndnMer+XRlkUMdNretDI/vS543AlOPA4BXJcPVs/+2uMjlh4m5NFL1VnNvbYgLXLioJ3te+hXux+WHqGCI89njq/dycUVkPhryEjXEbcoabiayX+MSmZDvWCV54tru4u7bhmyTg8kSy+UjWf0gyXHt1tpw9ggLW7nfZsnfVlmW/bE7qHJpLGzh2iThGHlxE883cS5PRcXUkQBQ76GCwatnj8/jsp8WlIj8L3DXmkc+27B5wtR3foWLi73emHtiUObqf79KngDDXTfWz15OTMT9Nku+5Gx7uHZCOJp+1ptNeuhkbuo9XCdEvYvob2KbCOQUDba9FRO3gKiLjKtA3u9zRFnOJwijCN4O7ae67yHuMgC2/c3hZekMGJgowQsiJBm2v7Fsu5UoJx65gE2deP5iMc8aAGDItoVP9gn4RJ9AvZcbvomLXM0VTHD3m/y60cxhosH0Uq3tTDPiRKaxcngE599yW+r4l152Ompaoo5jDP7x9W/AN0862fnxX/7yZ3j39Vc3OVEiaYWaZsQtFOfSlJoRJyIiIiIiIi1stv3hAKAeBBgtTh2M861FX4mbpCEirUMDcSLTeO/V1yAXT30LYKRYxLdOOWmRUtQmjME/vP4NuHi1Ox8/8ouf4Pdv+E1z0ySyA9fAUCWngbhGqOTSLzK4Bj5FREREREREWoVrIK7meBFVy1OKdCYNxIk4DI5P4G03/jZ1/KLTT8FEgVzLSV5gPQ8fe8Mb8e0TVzs//5uf/xjvuvGaJqdK5EWzbbAsPOeMuEgz4kRERERERKR1zWVGHABs7U0PxC0b00CcSLvTQJyIw7uuvR5dtakdw6VciK+dftoipaj9WM/D373xfHz3mBOdn3/0yh/hHTdd2+RUiWwz1wa0ZOfKR1d+i4iIiIiIiLSK3Bxf6B3uTu+LNTg+sSBpEpGlQwNxIjvorlTwe9denzp+8cknOaePC896Hv7uvN/B944+wfn5/3f5D/HHV/8cXkLuuipCKtTSM7Q0ENcYrgcRDcSJiIiIiIhIK5v7jLj0QJxmxIm0Pw3Eiezgd2/4LfrLUzdJrfk+vvSyMxYpRe3Neh7+7nVvxg9WHe/8/E+v/jm+/PUvYPn4WJNTJp3M1YCuaGnKhnDPiNPSlCIiIiIiItK6XM+1rudf7REn0pla6vV+0539jfn+vuxTe601mcPMJ1whx3VAjlfymcPU6z4VVxjGVLiEyBOT5+KKe7j839Gbbr01dez7Jx2Ldbv0AnhxZlZ+WTn1vdl0FWpUmuoxd97Y810dzV62TImLC4mHGAZ/fcGbYbwEb7jj9tRXTnniEfzofz6JD7717bh5/wMAAH6FKFvkxLo6OREyKnJlMiTGHOOQy/98yKUx72cP5xW4W47hqgSEo9xvq+V95CvpSKteDqi687m/K3t9AAA9YZUK92xpgAo3Uc9lDhN43IWTW1ZxHo/608e6/OoL36+vT78dOBuPnFBnIvIaHSfbCcQlkJC/za9yaUxCmzmModPIhUvI1muSI/Ike3YA4O8bPtFMqA5wcdUGucrVq3Nly8TZ3/+LSmxc3ImrrODCxcuyt+UDsr3LYp5TErKOZEWOPUTnot5LhCOvbbb8e8Tpjgpk+U+4F4f8Cpcplni1t7SSex84JrfqtuRjivWy50lCtq3jApf/zOlOuri2XUzkBwD449wJCIg+6aDJK7vFRNsiIZ6jACCokQ+z7OoyljjfHtlGJp8TyS44GGQPWB3k0lgd5BJZ782e/wHRRwLwbet6N1eX1way/7ZoJ67f1CP7Mn0/exo9P0FPkO4XqOV95LqnNvJHlqX73HaqjaLYO/cHpHJCXgD17OeNfW72ylwZSYrZ6y0bcPcor8KlkXm2AYComD1cOM79ttwY2eAl+YPZ+3LYNnmr0ow4ke1Zi/3Xb0od/vwrNBtuoSWeh7/6nbfgh0cf4/x857ExfOOLn8cHrvqllqqUBedcUoLsIJSpatojTkRERERERNqMsx8hl37+HXYsTTk4qhlxIu1OA3Ei28lHEbwd3vyqBj7WrtxpkVLUWRLPw1+++a347JlnIzHpN258a/GhX1yJr35ZS1XKwirMcUkJyc71IJKvaSBOREREREREWpfrudb1IuqQayBOe8SJtD0NxIlsp1BLd75XctmXchNe4nn491e+Cu/+/fdiS7d7ba9TH3sUl335kzj+yceanDrpFM494nLaI64RXAOaOc2IExERERERkRbmeq6tOVbWGepLD8QNaCBOpO1pIE5kO8VaenOWsjrfF8V1Bx+C1/7Zh3Hzvvs5P99pYgxfvfh/8L7rtVSlNJ5zk2UtTdkQroE4V36LiIiIiIiItIq5Lk051KOlKUU6kQbiRLbjnBEXaiBusWzo78c7/uB9My5V+WfXXoEvfOcLWDahpSqlcfKRowGtuqAhnEtTakaciIiIiIiItLBcPU4dc72I6poRp6UpRdqfBuJEtlN0DMRpRtziin1/1qUqT1nzCC778qe0VKU0jGtQXnvENYZzRpz2iBMREREREZEW5pwR53j+HXbMiOubqMCLtdqTSDtrqV5FG2UfN6xF2X9i6KffYJhbXD4VLvC5iranUM0cxubTSy/ORbXOFZUqsg9ieYGl4oo9LpypvViuusquGXE5mCg9G8va9LHZMGEAoF7nylZ1pECF88ayx2fJ8zbX1wGuO/QQvPbPP4xPX/xNnPjEE6nPdx4fxVcv/h/810vPwf+eehYSL/2HbZNfPTBklgTl7AEtWZsnAVcm43z2zEx8Lq4kx4UzZBvWH/NRqKTvA7V6Hv4018ZzTy2n4lqXH6DCdfVmr/8BoL+rnDlMLebqnzh2l5Gyn953M1eLX/h+OJr9fIcTzS0jIMPV+rNf22w94nG3e/iV7HmZIyckFzdzGRnlybqEea+Giwqlncm6tZg9TG2Ay0fbzbV3Y0ebaC7KRJ7kt3JxWfLeRt838tnzMpdv7pK8TBu0brjGhSErLrJoodZL3KfINnlAvqyexNnzxGvyOypJyOVJVCSe0fupqOjnDb9M3jeIZ4facq5uBRmMKss9XP1jyOsmicl74jRtyZlY8nmDxVyn7LNNbYDbu94j+3JyI9kbk8bRnzIXNs89b7ANtfG904Mgsxnbk3wmylPBEPdmb5REfVwd6dW4MhKuIfvuCsQ9MeQqSeNzacwXspfl0I/RYyup40nRoKuQvp5Gu/Pom3jxud5PLHa3IxjpmtsDQY08b4lPlGWyvzWhHsAA2529cvVzXEM+Jtu7tX6u/skR99KAbMfE3G2DatsBAJZlr/AKm9PXTDvTjDiR7bhmwWhG3NKxob8f77jwffjsWWcjcTS6fWvxwauvwI8+/0n87ZWX4ZUP3I0V46OLkFJpdfnINSNOdUEjuPeI04w4ERERERERaV2ulV5cWzMAwEhvesCtX/vEibS1lpoR1wh7rduKv/3iFTj08fWwnkE98BH5HuqBv+2f0EccbPf/gYfohf/2sWlZD3590sG476DdAMeeVdLainXXQBz5CoEsiNj38e/nvAp37LIfPnHZt7CsNJH6zkGb1uOgTevxrpuvAwCsXbYCt++1L27bZz/cvte+WLt8J12/MiPXHnG1oONumQvCPRDX3JkhIiIiIiIiIo2Uc7xgWptmi4vhvi7suX54yrGBkTKe2mMhUiYiS0Fn9Spai//9h29hrw1D8/oz7/7+b3H/AbviO685Fj8/7TBU85ol0S6KtfR08bJmwSxJ1x9wCF73hx/Gp37wTZzwZHqpyu3ts3Uz9tm6GeffdSsAYHN3D27fe1/cvtd+uH3vffHALrsjZqbnS9sqOAaGNCOuMTQjTkRERERERNpNvpZeQnO6GXFDfeklUvvHsm8lISKto6MG4vZ9dsu8B+Ged/hj6/B/PvNT/PlXrsJlr1iF773qGKxZtqIhf1sWT9GxNGVFS1MuWRv7BvB773w//uQ3P8cfXncVPMxt3eoVE+N45QP34pUP3AsAmMjlcPte++Hzp5+N2/bZbyGTLC0i5xiIq0zzJptk43oQ0UCciIiIiIiItDLX0pT10P3S97BjacqBES1NKdLOOmqPuP7xxr9ZMDhWxru//1v85MLP4b//+ds4+a7HActtYimLr+CaEaelKZe02PPxH2e+Gu95+4V4YJfdqb/RXavh9Mcewle//nnsv2lDg1Morci1NGU10KB8I7hebig4HlhEREREREREWkXo6keY5oXeEceMuIFRzYgTaWcd9Xp/obpwe9D4icVZtz6Ms259GE/svhyXnHM8fvjSl2C8u7BgcUrjuWbElTUjriXcuP/BeMP+B2Pl6DCOeWoNjn1qDY576gkcvGHdnGfK5aMIZz14Hx7faeUCp1aWOvdAXEfdMhdMLUi/EZivR9teYtHejSIiIiIiItKCXDPipl2a0jUjTgNxIm2tpXoVvVx6rd3ZFHMvDqz0x5XU57e8ZG/88x+fgzCKEUYxgijBMn8CYT1BMPn/QX3bv/tGK3j5zx/AwQ9vnDHO/Z7dgo9++Up8+OJf4dpzDsQvzj8Mz+y3LHPaZ7Ou1Jc5TLnODSp15dMzxeaiWs9exMqb02+FLCRvWfWF/+7y0ze9ap835TvP6+tOl6fZ7No7mjkMAAzkuOnp0e7cvmdPjQ1mDvPsc1wZj3LcDNKo1338yV178OTBR+IyHAkA6C2XccwTT+H4x9bghEfXYtXap5yDLM/rt2OorZha13glbvJwOM4NKlQHs4fz00V0TuLlXBrrXdnD5ce4c21iLpwl53wb694jrhaEMNMkJezl6shVez5DhduzyC2znPeyzzzbUu+m4to6Nn24WuAjF029zrpRRS0MUdk5exq53AdMlSskXo27bixRJSeFhIor6iPT6GW/3qKt3L0mKnD5H2S//QIA4nz2MB5ZuKarK2aTEPlvsjePAXDnGgDgkQPmRKbUszd1AQCWTKINuTzxiN/m+9y17ZOFK2EzhRDH3LVt61w4v5r9tyXkU3BCLpTh1bOn0VTJdlOTJ5lHeSL/yfY/27bzyeIfdWdPpw24a9uwixUR59smXIaw70vZkMuTpJA9wnovl8j8MFtHZi8jXr25Kyj5dS7/kWQPF/dzL6Jbsm0RjHCDFpX+7NdbdYCKCvU+Lv+9AaIRSrYRqmR/R3mMC5cQ9WQQcg1ett5i2jLGWISOPeLKQYgoSf+9rb3p5+WekQpq0Ryfrci2Xa4re9mqlbm+ZBuQ9V0te/7HZe6Z1ETkPZH8aT5xafs1st3EZQktJu7bSb7JiVxkLTUQN1/FSrq0D/d14endpw4gLCtOP+jx4ze8BAc/uB7nXXY3zvjNo8jVp78ZFMoRXnHZg3jFZQ9i7YHLsHWnbowMFjE6WMTIsuLkfxcwMvn/Y/0FJEFHrRa65Lg637VHXGsbKxZxzeEH45rDD4ZJDHL1CEc8/QyOf2wNXnXnvVj15NNTvl90LE8qncWPYwQ7PHhGnofI76wGwkKq5oLUQFy+HqGm+lZERERERERakGtGXG2aGXHDva6lKbVHnEg766iBuEI1XSFWCtk7/R4+dBd84tBd8MX3n4ZzLr8f5/74Huy8cXzGMPs8uhX7PLp11r892p/HyLJtg3XDy7pw/7G74bpzDkA931Gnaop8vY4D123AAes2YryQx40HH4BSgXitfQ5cy5eW8+oYbie1MMAd++2DO/bbB1t6e7DqGzsOxC3cErbSGrQs5cKrhgF6MXUaZ74WYYZJdCIiIiIiIiJLVq4+9z3ihvrSS1MOamlKkbbWUT2LrkGWyjwGuIYHu/Dt3z0el77lWKz+7Rqc/6M7cORtz80niegbqaJvpAqsGQYAnPKrx3H+V+7AD37/aPzmNQchDtt3RoZJEuy+cRj73LcGhz67Dgc/ux6HPLse+27YBN++OA334d1W4oIPvw9DPY3vsdUecZ2llE+vL1TQQFzHy0fpMqCBuMZyPYzkHQ8tIiIiIiIiIq0g0x5xfekZcf0aiBNpax3Vs+geiJv/IEvie7jx1P3x0Fm7Yvc1Q3jFDx7A6Vc8imKpMR36yzdN4A/+7Xqc98278f13H4PrXnkArN/aS1j2j5Vw0FMbcfCTG3DQkxtw0FMbceBTG9HtWD50Rwc/twGvu+UufO3MUxqeruIClRFZmspheiCuS0tTdjzXErXVUPVAI7mW59BAnIiIiIiIiLSqvGP7oto0M+KGHQNxWppSpL111EBcvgmDLM/uO4ivfvgUfPt9x+O0Kx/FK77/APZYO9yQv73yuTH80T9dg9d94258973H4IcnrqI3tV0M3eUq3nD1Xbjg57fhwGc2zetvHfzsugalairXbKiyY9aUtAfX/n/aI05cA0IaiGss14y4nOPtQREREREREZFW4HqmrU2zstlIT3ppyv7xMrw4QdLiky9ExK2jBuKKlebNdip35/CL8w/HL954GFasH8eyzSX0bS2jf2jbP31DZfRvLaN/qIK+yf/vG6nO/ocB7P7kMD74//0a5+53L778eyfjxpP2B8zSHZDbY/1WvP2KW3D+r+9Eb2luv3E2CzVrqVhN/10tTdm+yjktTSlpOe0Rt+C0NKXIwsrV6zj7/gfQXa3iyqOOxFgx/aAvIiIiIiKN4+xLmGZpyijwMdqVR992/aSeBfrGKxjuT8+WE5HW11I9i8lY9gGRLabnxf8ZTX++OenBlqGeKceGx7jOiif9ZdN/uHzyH4eV/WMAAD+KMTBSxsBwCSu2jOM1V96Ll1736LR/8oAnNuGfP/YjPHzoSnz9Patx53F7zjogl/e5js6RaiFbAGtx7N1P4Y3fuxNn3v4IvO32eGuEQrUO7Pgn8wn1t7bPMdcecZV8CFeu1qLs+/WtH+/NHAYA1tk+KpzvcXkyXslnDuPl0lPw5yKJuEFkU8n+hpBXnxpXBemBuGK1Bq8y9XuGy8Z0GZ2joJQ9oA24fLTki1aW2a6SrAfiPJdIG3LxufeIC2FnyGLf5wrJs+P9VLiHN+9MhespZH8ZYmXXGBVXMT/9CxP1fLoA9aGCYr6GaKTHEWJmpsljeIa8tpnrhr22nTeuBQoXdXEZEo5yiUzI92N84l0gE3G/zSPvbbZKhOudmsYgjvHNz38Bxz+xBgDwoSuuxLl/8efY0rtDG6TG3gCa9/IXfW2TTzg73v/nqjaWffWEepksyHQFRPw2tvlOhjMT3InziHeoAnYLFvK3ebXsAYMKW/9w4UzChUuIPczjIhlXjmtvWY/bZ91j6mTDxWU99sGBCFcn29ZMXAB/32CiI5OYkPeNiHgh2mfKFfhn0uoAd78JwuzlxK+yD86cqJ/ru6v3Zj8H9V7utyVdXD9JMZ/95lav0QWZCsY2SZiyHFW4cmxj8reF2ROZ5A1y1fT5HkcBtWnOzVBv95SBOADoGq5hY/fsfYBsN2u9mr2cWPK+Ybq5xnxYyB6OaY8DgCHyAwCCca5sBcTqo0xbFwDyo1y95dW5wsX0dyRBZ83+bKmBuPly7f9VXkL7f8WBjy3Le7BleQ8e339n3HzCfjjokfV4z0U34KRb1kwb7uAHN+Djf/Ej3PuS3fD196zG/S/ZvYmpnipfreMVVz+AC354Ow5cm335yfFiHg/tugse2n0XPLz7rnhw912xcmQEn/3SxVO+t1Az4pz7CGpGXNtyzYjTHnGSd+4R11G3ywXn2iNOS1OKNMaJjz3+wiAcAOwyMoo/u/IX+NjvnL+IqRIRERERaW/ubS6m70sY6uvC3hu2Tjk2OFrC2sXr1hWRBdRRPYuu2U5LfdnBRw7aBX/98fNxxH3P4r1fux7H3P30tN898u7n8Ik//QFuP34v3Hbi3tiyohtbdurBlhXd2Lq8GxHxtuJc7bR5DOf/9E684fK7MDA6t1dMH9t9Jzy898pt/+y1Eo/stTOe3WkA1a1Tp2Af/2h6ENJ1LhvBWUaW0GCtNFbZse/XQpUtaR15x3ISFe0R11DVXPp+lNdAnEhDHLBhY+rY62+7A/983rko57PPeBcRERERkdk5B+KmWZoSAIYd+8QNjhJTpkSkJXTUQJxztlOLDLLcd8Tu+OAnL8Axdz6F9371Ohzx4Lppv3vsrU/h2FufSh0fGixi84oebFnRg80rurF5p15sXtE9+f89GO/Jb1s1whhYM7mqw+R/o5bAGgMYwGLyc2Ow71ObccEPb8eZ1z+MIJ59yms5F+KHZ7wEX3/ViXh8z7ktreYaLC0u1Iw419KUS3ywVngV1x5xjtlQ0lncS1N21O1ywdUcbwXmtEecSEO42ki91SrOvfNufHf1CYuQIhERERGR9uda5cX17Pu8rb3dqWMDYxqIE2lXHdWz2FVNd0y4lqZbyu44ei/80aq3YfUtT+APv3Yd9n9s85zDDg6VMThUxoGPZl8ycr6eW96Pb77qBHz3zGMw0ptt01HXQNiCDcRpacqO4poR11WrbVtsm1jvX9qD8y22QPVAI7mWptSMOJHGmK6N9Nbf3qSBOBERERGRBeAlCXJxeo+4WjD96mRDfen+0YExdiNbEVnqOmogzrXkXKlFZsRNYQxuOnF/3H/K7jj52sfxjq/chL2eHFrsVDnddsheuOjVq/GrEw5B7HNLY5by6cFSLU0pjRD7Pqq+j/x2jSXPWuSiCDUtRdixXLMitUdcY7nyUzPiRBpjur1Oj37yKRz67HN4cPfdmpwiEREREZH25nqerYTBjC95a2lKkc7SUj2LxmafoRLmXqwIXW8IRz3elO8AQF93JXviAPQXuHDetkUgM3tqdBBPrToOl376GLz62vvwR9++FnuuX/wBuVrg44rTDsfF556A23bbZ9tBC2COfaxe1Zvy/1VbSH2nq1pLfQ8VL/W9uYiez35r0eWYETceF5GU0397giiPIMtWMccNPJZr3GBSLsjeIe71zL40qcuE4/zOLWD2fW6SMH2tVXI55MtT3zjK29q2BtMkP+Jmx1lyW8Yklz0+pjgCgOFOGxUuqHB1Xb2HCob6QPpttLkIfcfs6WKAuHv6vxdvSTeg52JdiZuVvesuXF2/smssc5hKzNUjlRnqn5Kf/t1e2aJSC6myzM5fZa9RcEULYK437tYGWyATSWSmjbkz4LjUFlREXKa5US4uj/1tRDXp1afmf7EyfZvhLTfcjH98wxsBAInP1clUOQZgmHJCXtx+hQuY5Mg88YhwhovLC7gTYNmGAiMh200h99vifPaK0iPf6WPrLSZcWCbPNXvf8LjzFhWzh0uK5Ms3bL3leJabCzYvKWSbxDJ1ApmNbP3P1nfUPYCs6tg2oUc837DPX00tjwCSsHkRehFXRozlwsVEF0TSxZ04U+Ta5EGQPVwcc+eMvG0jIXuUDdG/kpDPG1QbDYBP1K2uNngtDODNkIbh/vSMuMGxEswc6k2fKCMAkMtlDxcRbS0AqNe4QhJHRKVMlhG6biXDRdkWiQPAtz+9OnvD5yR+9nMQFdlOmdbUUgNx8+Wc7dQGyw4mvoefvuwoXHna4Tj5zsdxyJoN2HnLGFZuGZ389xiWj0wseDq29HfjO686Fpeecyy2DE72nDfgRY6SY/nQhZgR51qOrhIESLwmt3ilqcq5EP07DMQVazWMdBN3R2kLzqUpNSOuoVwbVmtpSpHGmG5GHAC8/o7b8a+vOde5R6qIiIiIiHByUXqAy/Xcu70hx9Y92iNOpH11VM+ic/+vNlp2MAp8XHv8Qbj2+INSnwX1GDsNjWH5hhJWbh3FzltGsXLrGFZuffHfhVoEWAuDbW8WGQsYbP9vO/ky2+Qxa5F4Bo/uvRLff/kqXHna4agvQGd1NQyQGANvu7ed8lEEL0kaOkjmmjHZTuVD3FwDvTN1Ykr7yzuXplRd0EiuDatdA6Aikp1red3n9VUqePU9d+MHxx3fxBSJiIiIiLQ314uls73Q69ojTktTirSvjhqIcy072Cn7f0Whj3U7D+DJwRVU+Fy4iB2kxqAchujeYXCkWKthokAua+jQrjMmZWYVxzleqD0IpTU494gLOup2ueAqmhEnsmBme5nkLTfdpIE4EREREZEGcu0R53oBdXvDrhlxo2XHN0WkHXTOmnvWOmfElfNamqcVuAbEGj1YUnSVDy3d1PZc57hQ14y4TuZcplaD8g2lpSlFFs5s97Bjn1yLg9ava1JqRERERETan3NGHLE05aCWphRpWx0zEBdGMYJk6mabdd9DFHTWpoCtyrWXiWspyflw/T3NiGt/zdqDUFqHc484zYhrKNcSHVqaUqQx5rK88gU339yElIiIiIiIdAZmr/mRnmLqWN94GV6cOL4tIq2uYwbiCo6Odc1waB3uwZJGD8Q5lqNTGWl7rnqgq6oZcZ0sH7n2iNNAXCNpjziRheNq8+7oDbffhrxeOhERERERaQhmIC4KfIx0T91yx7NA30SloWkTkaVhwXsWjTFrAYwBiAFE1trjjDHLAHwHwD4A1gJ4s7V2aMY/ZAFTNZnjr47nAQD9Q9XUZ+Vc7oXPt7elxmXLkNdNhRvsn6DC9RTSv2k2tSCm4mIxv8w6TrNzacp6bcp3PbIP14xvO9+F0fQbJ2U/98LnO/J7sg/W5Mj8DzzubZhl3dyU9jjJPkZfrnODluNJ9usaAGx39rz0R9MzYCt+epC3q1yHV3sxXUloM8cFAIZ8iSkuZM+TpMmTe02cPY31Lu7dD0NWW6bKxVesOtZ293Iw9en/3s4HbqbiOmXlE1S4/YqbqHAFk73jvZSk75Nz8Uvv0Gk/G1yRXvd+mZnAATttxn0T2ZcEZt8XDHJs4eLqhKiSvZ7MdXEvBiRk3ZrPZ7+ZTgTcfq3je3Jp9OpkOCIro24yruxNNABAvTd72UqCqWG66rNH3l8u49W33ofLTjomc3wz1YUzB8weJBzj8t8n+y7qvVx8zKuNYYFruFpXQ3kOEqK6Y+OCx9WR7CuiAdHcDUpk2478aSDCWcPlP9OOBABL5n9CPAKYAvlMRN636dcOiPout5VrlNcGmzj7wScLMlkl0PERmHs9APjkfZsRFbmLjb1GTczlfxJkP+EBef+Ni+T9vkpeN0SWmIhLoyHviUxsvs/lR0JeouE4F9AjqvKYfLYJu7g7QL6QPVyPTV8AUd5DITfz3xrrK6B/h4G3vapDsLPUFWOGe073iMZMmXhGB4CkSnZUEafb1NiGJPlsX+DCBePZf1ycI+ufiEuj9ck6uZY9Pr/S3HGKxdasGXEvs9austYeN/n/HwFwlbX2QABXTf7/gnLNcNGyg63DtY9Xw/eIc/y9cqgy0u6ce8RplkBHc+4Rp7qgoWqOtfJztc5qgIksFNeKAb8+9JDUsbddq+UpRUREREQawbVHnGslmB0N96eXp+wfTb+4KiKtb7GWpnwdgIsm//siAK9f6Ai1/1drcw6W1Bd+jzjX3nTSXpzLnja4bElr0R5xC6+WS78Zl3M8uIhIdq4Xi75yxumpYyc8thYHPrehGUkSEREREVl0PaUKPv2fl+K29/w/XPwPX8Je67c27G/n6ukXS10voO5opE8DcSKdohkDcRbAL4wxtxtjLpw8ttJau27yv9cDWLnQiXB1SpTyGmRpFa7Bkkbv4+WaBaXB2vbnmunU1eD9B6W1uPeIU13QSHXHQFyoGXEi82aSBMV6ug777YEH4KFdd00df8t1tzQjWSIiIiIii+5jX/sZXnvjvRiYKOOEh57Ef//HJQ372znnHnGzL82ogTiRztGMgbhTrbXHAHgVgA8YY6a8kmuttZhmhWZjzIXGmNuMMbfZZH5rprsG4ioaZGkZrnPl6miaD9csqHKowdp25xps1dKUnS3vqFs0I66xtDSlyMIoOOqvShgg8TxcctKJqc/edOPtzjpPRERERKSd7Lx1FOddf8+UY4etXY/lw+MN+fvOpSnJGXEDIxqIE2lHCz4QZ619dvLfGwFcBuAEABuMMbsCwOS/N04T9gvW2uOstccZb35JLbiWptSMuJbhGhArNnhGnHOPOA3Wtj1n2dLSlB3NuUec6oKG0tKUIgvDNaP7+VUFLjvuWFR22KdioFTGq26/rylpExERERFZLOdfcwcCxySPvlJjBr1cz7PVOQzEDfd3pdM0poE4kXa0oANxxphuY0zv8/8N4BUA7gPwYwDvmvzauwD8aCHTAQBdVQ2ytDLXMqKNHizRHnGdyTUg7xqUlc6Rj7RH3EKrOx5ItDSlyPy5ZsQ9/8LJWLGIn65alfr8rdfdvNDJEhERERFZNCZJ8OZf3+78zNVfzHC90FsLNSNORF600D2LKwFcZox5Pq6LrbVXGmNuBXCpMeY9AJ4E8OZZ/5IBkv7sb8sPLN82xXh5OJb6LO41L3y+vWXdpczxAEBguOUzN5fSbz/Mxdbh/sxhjHGuAjorz+d+W6GQ/YZW3yUdV20w/b2ecALhLi+eq/p6Lh+fzxLnjLgwxHRZVt+UvlnOZvNoPnMYAJg2EbMFq8y+HrWLLWTvEA+6uNkstsal0dSzv0fg1U3qWMVz7D9YqU35rl9Jh5tbfFQwBMTKCH6dKyNBiQtXGMpeRkzMxZUb4wbEk5B718S1R1ylx4MtTv+b9+gdpuIqJ9xv+9mGI6lwHlGX7NPNbWAdJdPnfzlIvwgT1mJEiQczlD1P/DJ3jZK3bdR7yYD92SuFJOF+G6tez14n2zJXjwe15v62hHj/yq9ycYX0CjfZ8yTJv3hddztWCyjnci80I769ejXedOttUz5f/cga7L9pPR7fbec5xWd9ri7HWPY6mb1G/RqXxrohy6SjfTFrEEu+EBiTaWTCsZdowJ04Q9YJHvEeh0dOwmbqETZcnG9uHcm20xKmR4H8aYllr1Hy/WMiS9gkBmPcvTQuzm8LjyxsgYyLbMsw7buA68qBF5HPUpXs4QpbuReKk4Arx3GBDJfPHq7WSz7bk/WP6/l+LmwzNul5Pi6y/Nej7HlZmeCeLXNVsuIi+6moetIj45qM7JR7n8Cem4ad3ylU6i9873m5gOh/RnrwzO9JsKJnYsZw1cF0I2H52AS6w5nriq3jXB9oeSh7X6Ypcdc2edpgA+YGzMXFhrMhFzAuZr8AipvINlqOu7bDce4laeZ+Exe4sjUTY8yeAL6ObeNSFsAXrLWfMcYsA/AdAPsAWAvgzdbaIbNt4OozAF4NoATg96y1dzQ8YVjgGXHW2iestS+Z/Odwa+3HJ49vsdaeZa090Fp7trWW6+XLoOCaEVfQjLhW0YxZS64ZdpoR1/4qzqUpNSOuk7lmlFTn8CabzJ2WphRZGK7Z/duvAHHHPnvj4V1Wpr7z1mtuXdB0iYiIiIgslrdcddu0n3VVGrPalut5tj6XGXH9hdSxPs2IE5mPCMCHrbWHAVgN4APGmMMAfATAVdbaAwFcNfn/APAqAAdO/nMhgP9ZqIQ18T2QxeUaiKvkNRDXKsqOc+U6p/Ph2li1EqqMtDvXErUF7RHX0bRH3MJzbVqd09KUIvPmHojb7oUTY/Cd1atT3zn/hjuQ17LMIiIiItJmlg+P4+zbH5z282KD+hZdz7OuF1B3NNKfnqHWN1ppSJpEOpG1dt3zM9qstWMAHgSwO4DXAbho8msXAXj95H+/DsDX7TY3ARgwxuy6EGnTQJy0BNeMuEat4/w814y4sgbi2l7ZNSNOnZEdLe+cEae6oJFcDyShZsSJzJt7me2p97nLjj02te/lsvESXnnH/QuaNhERERGRZjv/2jsQxtMvrdtdIdei30FYdw3EzT4jbtQxENc/qhlxIo1gjNkHwNEAbgaw0lq7bvKj9di2dCWwbZDu6e2CPTN5rOE6ZiDO9YaDBuJaRzNmxDk7r7Q0ZdtznWPXoKx0jnyUHhDasdNa5qfuWpqynsAk7MLuIgLMYUYcgJHuLlz+kqNS33vbb7Q8pYiIiIi0D5MkuODq22f8TuNmxKX7EeYyI260L700Zc9YBd4Mg4cigsAYc9t2/1y44xeMMT0Avg/gg9ba0e0/s9Za8DsL0jpmIE4z4lqbayCuq9rYwRJX55WWo2t/rlmPrrIgncFP4tTbcrExiPyOuV02hzGohY5ZcY63CEVk7rpm2SPued8+Kb085UkPPYF9129akHSJiIiIiDTb6gfWYJ/1W2b8TqP6Ft1LU87+Qm/iexjryU855lmgd0zLU4rMILLWHrfdP1/Y/kNjTIhtg3Dfstb+YPLwhueXnJz898bJ488C2HO74HtMHmu4julZdL3h4BrckaWpGTPiCo7l6FzLFkp7qTjOsassSGdwzYar5ELAmEVITXur5l37xGl5SpH5cC6z7Zj5feu+++KxXXdKHX+rZsWJiIiISJu44Ne3zfqdYsMG4hwz4hwvn7q4lqfUPnEiHGOMAfBlAA9aa/99u49+DOBdk//9LgA/2u74O802qwGMbLeEZUO11lpbUfaO0PGJbVN8g4n0lN5hdL3w+fbGxtMV4Fx0dXOV5MrecSrc7n2js39pBxN1bmBptJLOp7ko17IPdsZR+kY14afjL1bqU75rc9y0bVPdNh7tXs5p+vR7y7KvI93Xw63zPNjV3PWh6/HcGgvb2zrRRcUVjZCDnUR9EHWlZx2PRY5B3qg25btxgZutHI5xgzeVnYhwlosrN0KmcVn29ziCCjnrm1yRod6bPT5vIl0PVMMA8Gf+Ww9tWjnj59O5p7YbFa4+zNXJfm/2QeanugeouEql/Iyfl4MQvZhajz7z1AD8SvYy2fUcV44NOQEvLmSvIwFg7BCiMOe4REZ1Lo22kj1cMMw1J/0yd97sLNfjdOKZi6STIeufqJsL5xP9AEnuxfwoxOk/UCqEU77zvItPOxEfu/SnU4696fo78MnXnYNaOP05NRXuPT6PKMoB2fyx5MsTbJ2AIHuZDIrciwd+wCUyDLOH8wx3rSVkm2TcI9uSRJ0cFbk0BhNUMFiiSq5y1Tg88n2ygOzvs8QtwHhc2WJfi/KqXL1l6tljdNW3cxGUuF+X5Ik05snnZrb+J/OfeZbyIvK5rdS8JeBqfeQL4ewCWuSF49WIZykiDADkt3J7dEVdXBuUqZNtyDYKufKfJET5J9ojAN/+qfWRhcsS6SSCDI5N4JW3PjDr93qjCvKFqTfPYpi9nRZU02VkIsijGs1cTserOWzp6cLuGJ5yPFkPbFo+/UNFpUT2pcVM3cpFRTYJ4RF9Aoa4ZgAgLpDXNlknM9dbvZtst5L37XCcqxSSkChb1QVZHfIUAO8AcK8x5q7JY38L4F8AXGqMeQ+AJwG8efKzywG8GsBjAEoAfn8hEgW02kDcPBSc+39pRlyrKBcc+3g5zul8FF0z4rRHXNtz1QONXvZUWke+7tgfboYOaeG58tWV/yIyd66Xikp5d1vm+ycfg7/+wRXIRy8+aC0fn8Ar7rwfPz3hJQuWRhERERGRhXb+jXcgF80+oNCwPeIcz7JznRE33Jt+GWlwtDTvNIl0Imvt9Zj+NZSzHN+3AD6woIma1DFLU3ZVHLOdtDRly3ANljR8aUrXHnGO/cOkvbjOcbFe597SkpbnWpa0qnpgQbjyVQNxIvPjnt3vHogb7unGFccemTr+tmtvbni6RERERESaxlq89dpbUoc3LOtNHStWGjQQ51iasp6b40BcX3ogrn+suStjicjC65iBONeMuMo0bwjL0uMaNG30rCXXDDvNiGt/se+j6k9tHHnWOvcKk/bn3CNOM+IWhGbEiTRe1rbMxaefmDp2ykOPY58NmxuaLhERERGRZjn+0bU4YP2m1PFvn3Nc6lij9ogL6+nZdzMt9769ob70FkkDmhEn0nY6ZiDONdW4pBlxLaPsGDR1Da7Oh2smjDrgO4OrfLlmFUj7y2tGXNNUcxqIE2k010tKpRkG4m4+aF88vnJF6vhbrku/QSwiIiIi0gredk26LXvzEfvgwX13SR3vatiMOMdAnOOZ12XEMRA3OKoZcSLtpnMG4lwz4rRHXMtw7uNVqTVs+UCTJM494rQ0ZWcohwu/B6G0BteMOO0RtzCcM+J03YnMi7MtM1N71xhc4pgV9zs33IZQM8NFREREpMX0j5fw6tvuTR3/7iuOQbng6Ptp0Iy4vGNpyuo8lqbUjDiR9tMxA3Gu/cRcs2BkaYoCH7Udlg/0rZ3TxqtzMd1ydNbrmEuko7kGejUjrjO5Z8RpIG4haGlKkcZzdSTMNCMOAL538rGoBlPbWCvGJvDyux5oaNpERERERBbaG266E4Ud+viGe4r41YmHOre9Wcg94uY6I861NKX2iBNpP63Vuxhmn/1kzLYwzhlx+eCFz7eXL3CVcKXMDew9PrwzFc74SeYwftiYgau5yucb16layYfIlaamv1itvbDmsqlxg2YmArpKjoHaMAczQ/I9L3v+1yLukquS4QIijQAwVEo3AmYTRVz+e1VysJOZDGnchyuOTspirY7nqwcTTRNwFgk5oTIgxgA98lJL8ly4YDx7GI9s31qPm/lqg+zh8tZxrwhCYJYyUBrnMnLf3bh9mA7efyMVziMunKdKg1RcD0zsOuPn7hlxMerLshfmkT7uGjUxF86rkeEm5vZG5PbqEXmRckmEP9G8F1AS4hoF+LrVNvHdmpg8bcxv2/6cuZbWqSZ553mNitvaVEN9XbjymCPwulvunvL52669GZcff2QqnM1zbQsTZz8BtT4qKiTkEw5z3wAAEG3QqM49N0SOZ5e5qBLPUR753GAtWQGRbXmmvuta35hVNeaKKZPNrLMAIPG588ac7hz5jBiGXLgxxwyIuciNZb9vwyPbJOTzhk/001pDFi62Te5z4eJC9jBRF5ePtR4uT3zi+SYhihUAkNU//Cp73poXV5LnMiUucuEs004g7qMAYMjrplbJ3ihk44q6uXCFzeR9g2hvefk5tkmsxVsdS6x///SjMZYUMWLSM88KlTqqO+T3CHHBBdV0G/m5Sj+GR9Jxbi+KfDwXpp+5e4cqGB2foW9ujHso8ivZzxsTBgDAPTbAJNnj88h36dk+SY+8bxc2Zy9buXGy/km4cPUerm5l2pJ+g1a6axUdM93H9YZwRXvEtRTXnn6uvf8YxXq6fJS1LGXHcM0WcO2zI+1PM+Kap+aoYzUjTmR+XLO5y7PMiAOAS844IXXstAcewx9ecU1D0iUiIiIistCOfuxpHPLMhtTx75x5HABgwvFiRlelQUtTOp5l59qXMNSTHqxbNqalKUXaTUcMxAVRjDCeOgwfeR5qAfkqkiyK6WYtNULBuYegli7tFK69AF2Ds9L+3I1nDcovBPfSlNojTmQ+ulwDcY59UHf024P3w5qdl6eO/+33rsBfff/Khu3JKyIiIiKyUN5y9a2pY7cevDce333bSmSuLYoa8oK/tc6+hFo4t37nod70QNygBuJE2k5HDMQ5B1nyIWDIqbWyKJxrOTdo1pJmxHU212wBV70h7U8z4ppHe8SJNF7B1Z6Zy4tFxuCLrzzN+dEHLv8NPv6NH8JLyLVlREREREQWWG+pgtfedG/q+POz4YDpVtqaf79iGCXwdnhxLfI8xP7cBuKGe9JLUPaVKvDj5m4vJCILqyMG4lxvN5RzGmRpNaWFenMF7pl1c+q4krbgWprStbyXtL98lB4Ics2YlPmrOjauzjs2uBaRuXPOiJtjm/fi00/AZatXOT97+zU34zNf+DZCRx0pIiIiIrLYzrvx7lTf3kh3AZevPuKF/6+FAeIdJmXkoxhBNL8BL1cbOcsLvbHvY6R76saYnrXon6jMK10isrR07kCc9odrORVHR1KjZsS53iBX53vncJUtV2emtD/NiGsezYgTaTzXbO7SHJamBADrefjz97wZX3/Zaufn5916D774399AQXuoioiIiMhSYi3e6liW8kenrEJ1+/4eY1By7BM3375F1wulNceLpzMZ6u1OHRscm6DTJCJLT0v1Lpog+5I43V1VLDPjqeO1QoDurqozjOdxS+8MdJWpcN0hV+GP1vKZw4xMpKc7z0VU5/bTq1azF7G46o5rwtGRlJ+IX/i+8bn9SxIDFBLHG+SFEElu+r+ZbM6el/Ui95bNxFbuvJkSd968avYx+rif60Bnz5tfItJYcMflmhFXqNdhJ1+U8smXo8IxbvnbgKhKLPtaBbntT1DJHjAscZFFBS4fw9HsmdI1kT7Z9SQHf2zma2mPfTdljgsADu7fSIULDFcoN1d7MofxyEJSKM58b4u70ue1y1bhd2evS5KIKyO2wtWRlrwn2hnuKdPx+riZ30mdqxRsJXs4sjjCJNx58yKy4iKyst7LxWXIMWUmT2wwmUZr0eVamrIrhHXca03ZVY59fOyNb8Rwvht/euVVqU9fdu/D+Manv4L3vu/3MdqVrW0SF2b/Tio1XNMaSUi2Ccl3sLze7IWrp4d7w7mY4+qEgHi+YcIAQJRw9c/W8fT+KHNRG+nNHsg2d4uCpIkLbRS2cOFyo2TdSmSl73NlKx9wN5xSH/e8HY9nr7jYesQne2Zc9fusPPJck/dtv0aGK2cPl5D5WOsj2yRE0WKeowAgHCfr5ALZJiSCJT6XjzG5YpVf4/KymbcAQ9Z3Pb3Z2wnVGncBxGR+ROlxoznxiGe3ejRzgTxyzTM4/Ml1qePfPvtYGDO1nJTzIXrLU/uDu2s1jPe8WOfHcbYLIKikz3MlDBBFsz83xsPbGglbi93YB1Nv4n3r6oi73Y2Iwkayv49oSjJ9VGxcAGDi7Nd2EpB9AiUuHPsMXBjKXiewdV1uhHso9epcvVXvyV4HefXO2ou8I2bEFVwz4gqa7dRqFmxTVWwbdNmRZsR1DudsS82I60iuukAz4haGZsSJNFYuilJ7U9R8H9Ec96Z4gTH493PPwf99w7nOj094fA0u+cznsWJ0jE2qiIiIiEjDvO03t6SO3bHfnnhkr11Sx519i5XGz4jL2o+wtSc9srlsXDPiRNpJRwzEuQZrKlqasuW49jhp1GCJq/Nde8R1julmxEnnyUUalG8WDcSJNJZ7fzi+LfPls87AX/7u76T20QCAw595Dpd++nPYfcsQ/fdFREREROaru1zFeTffnTp+yUtPcH6/5OgP7prv0pSO59isA3FD3emVATQQJ9JeOmIgruB4M0F7xLUe94y4xgzEuQb0yup87xiuQd75NsSkNeVdmywHmhG3EFxr5ucc92sRmZsdN6cH5t+W+e5JJ+AD7347qkF6Vt1+Gzfju//+Wey/nltiV0RERERkvl57893o2WFG21ghj5+ceJTz+wux2lbOMRBXyzoQ55gRNzhRotMkIktPZwzEuWbEkWtQy+JxDpY4Op0Yzs4rlZGOUXHsP1h07LMj7c89EKe6YCG47sOuF2dEZG6cLxU1YHb/lUcfhff84bsx4fhbuw2P4NJPfw5HPPXMvOMREREREcnqrdfcmjr2w5NWOQfcAKBUSB+f94w419KUjhdPZ+JamnJQM+JE2kpHDMS51vqt5DXDodU0e0ZcRUtTdgzXoGuhQYO80loKjqUptUfcwnDNiHM9wIjI3LheIGnUMtvXH3oQ3v4nF2K4q5j6bPn4BC75zOex+pHHGxKXiIiIiMhcHPbkc1i1Jv1C2LfPOH7aMK4V0ubbt+hamrKWcWUd19KUmhEn0l46YiCuUHUsTel4A0KWtubvEadZMJ3CtUdco8qWtBbtEdc8rgFO15IeIjI3rj3iXPc31p377o0L/uz92NjXm/qst1LFRZ/9Es6694GGxSciIiIiMhPXbLh79tkd9+2z+7RhSo6X/LsqjV+aMvMecY4ZcdojTqS9tNRr/jZObxY/m5HhLmDYcRzFbZ85I8oeDwBsJdIHAMX+ChXO82xTwgBAHHNjtkmdCFd352MpyKeOFcv1F7+fZI/qhb/j3CNu5s4r2529w3jP3bZmDgMAA4UyFa4ccQMIY9V0Xs9m81C6Y24uknL2uAAgLhBl2bjDuN6I6qrVtvs+d21HRe56Y+KLuri4bHrbnzmJC9nTWKlx+eiRYzM2yJ4n+dgxIy4Xws5SlT27eSBzXADw3JZ+Klx9iLtuEGbPk6CbeyiJxmauf0ZK6QeNYCKBv6aQOa5clStbPnf7pa+b0qHZIywUuZcCKuAGYGYr605c9vPYqpVA3WsA+GSZNLXs8SW5bWEKSTX1WTkfvvB5Kq6B7GXrkYHleONH34eLP/ll7LVpapsmH0X43y9chE+94eX4wjmnI9phXzlvJHvhisn7qEe2yQ3Zlowmsj9SjZZ7qLhGp2knz4q5ttm2Nfmqp4m431YYzx7OI/vbwgmyveVlTyNVHwMobJ3HQxHBJNl/W0KEmQ/f5/KkvoIoKGNsFwuXJzmibq0OkmWEPW1kdEx7iy39ScC2JbPXCUx9AABexNU/xc1chWfi7PHVe8nyT7btLBkddb8n2xYJ+eBQLmdvy0c1Li6fzP+EzH+/QvR3lNKRFas1vP7Gu1LHv33SiTCT33e10SaQXuEhv9Ui2vLic2iUsYyYDel4KjaPZNPsz7b54W31+Hi9L/XZ8qES8pvcGR2QY3RM+WfbTUw9AnB9QB7xHAUA+VHuzuHFVDDyfsP293H1liXHRfxa9rw05L2tVXXEjDjt/9Ue3DPitEeczJ9rGdKiY5aktD/XkhLVjEtKyNy43hB05b+IzI1rSeVGLU25vad3Xo7z/+Z9eHj3lanPgiTBX3//5/jx//1vHLFW+8aJiIiIyMJ4zR13o68y9UXHiXwOPz7u6BnDudrH816a0rXFRealKdMvqg6UtTSlSDvp2IE47f/Velw3S9cyTAyVkc5Wdiw9WNDSlB3J2YDWHnELouq47jQQJ8JzLk05zSb187VxsA9v/siFuHO/PZ2fH/7UOvz4/3wWf3Pp5Sg0aD9fEREREZHnvfX6m1PHfnLsKowXZ56F5lyacp79P84XejNucTFUdOwRVxqn0yQiS09nDMQ5OgBcS9HJ0uaaoeZ6+5uRd8x+qqjzvWO4GmKNmm0prSUfpRvQ2iNuYWhGnEhjuZfZXrj6a7inG2/7y/fi+sMOcH7uW4v3XXEtfv6xz+DkBx5bsHSIiIiISGc56Ln1OHbNk6njl5x64qxhF+Il/0asrDPiGIgbKJfhx+QaiCKy5HTIQFy6Q32h3hCWheO8WTboLWtn55VmxHUM136AjZptKa3FNShfDTQQtxCcA3GOGYkiMjfOtswCt3dLhTx+/4O/hy+88jTExr2XwD4bt+CST3wJH//Bd9Bb5va8FRERERF5nms23IO774q793av1rC9UrOWpsz4cn/s+xgupvev66uo/SzSLjpjIM7RMVHR/l8tZyFnLbn2A9MecZ3DVR9oacrO5JoRp6UpF4brDUHNiBPhOfe7dbxo0mi1MMDH3/IavP7v/ggP7rHLtN9782234Mr/+Fe84r57FjxNIiIiItKe8rU63njz7anjF596IjDNi2Hbm1iiS1MCwFBXT+rYstIElSYRWXo6YiDOvXm9BllajWuwZL5vrbzwd1yDtU3ovJKlwVUfaGnKzlRwzojTQNxC0B5xIo3VzD3iXO7Zb0+89u//GJ944ytQDXznd3YeG8NnL74I//2tr2Gn0dGmpU1ERERE2sNr7rgb/TusslAOQ/zo+GPmFN61YsT8Z8Sln2Nrvrs9PJNh1z5xExqIE2kXrdO7aIDewVLmYLv1jWK5SVday/cYx0F7bnCGWVHgKrlSxA3ubS6n33iYi6FSesrybCZGZ960dDq2zo3ZmlySOYzf6x4AqS5Lv9nSFdUQTH4/slxnk4mNs/O9XAyBGe6b+e7sN+p6wuVjLc5+AweAcp0rkzGRznyBa7iU2P0asxetaV89KDmWHizW67BhAhiDOLJEZICP2d/GcrFEVhomPwCQSYRlwrFxka+MJETRysXpBnQdOfi1mRMf3ptuMC+kkMzLOE+EKXBNhdwsY2q2kr4f5WsRcsPZf5xHjpuz4SzZerIJeeIIQcjtJVDLE/ftEe4eZbiqFRHXlIENskcYlLhzxtQ/AJAwTZn+bQW5aCqpjyp93guf78jzuRNg7fRlJAo8fPYNL8WVJx6Gf/7yZTjhkfTeHQDwyvvvxeonHsPH33AuLj3p+GnfXs5v5spWnOd+W9TNhevZOfuzQ0+hSsVVrXMVENO2q1S5glwb59rk/jAXH9MmYe6HAGADtjGTPQhbj8RFruHkkX2QhniHpjzMVeRRN1cnJFTDFfDC7PdEUybzn2yTVJcRaSSfG9g2ORuOuW58shw39XljnDsBXpULF4xxmRL1ZK/L/TK5l5VHtrfYtjUTjGy3GqIeAYB8oXkvBidkmzDJk/0dXvb4zHbP42+7Lr0s5U+PfgnGgi6YHYq7X0mnsRqn70HdpTqCsRcrgqz1ZLGULvtRlENuePbKxd+uCT9c6E59vnxkfMp3nse2Ezzivh2UuDISkuEYbD0elMlnIq5JAhBtSUNWrUnI3ri5YH6FKFzNKyJLQmfMiKukb2AVtvNfFk3F8dZKwbH/H2OxlnOSpSHxPFR2mPXkWavZOR1Ie8Q1j3NpSu0RJ0JbSnsiP77bzrjgo3+Av3vXeRgvuNPQXy7j3y7+Li7+r//FEU89A5Owb5SIiIiISCc4aN06HLdmber4xSefNOe/UXbsEddd416Qel7O1Y9AbHEx1JUeiBsoZ5+UIiJLU+vMiJuHvGNEtlJUx2qrcS0f2LWAS1Nq+dLOUsmFKOywnECxVkNV5aBj+HGMcIeO4NgYRF5HvLPSdHXfR2IMPPviK1BhksBPYsQe+3qZSOdyLamzmG0Z63n45tmrcdXRh+CfvvYjnHXXw87vnfzI4/jpv30GW3q68duDDsCNB+2PGw4+EE+uWN7kFIuIiIjIUvbWG9Oz4R7cbVfctfdec/4bJcdAnKtPMAvn0pTEC71DRcdAnPaIE2kbHTEQ55o1Vc13xE9vK5Vc+pwV6hFMksDOs6PcOSPOcXOW9lXO5TBQmrrOeLFWx/DiJEcWgavxXA3COW34LARjUA0CFHd4ezAXRSjnNBAnklWXo73r2gOj2dYtH8B7PvROvPbXD+Dvv/dDrBh3dyYsH5/AuXfcjXPvuBsA8MzgAG7e+yD8dr8DcdO+B2JTb18zky0iIiIiS0ihVsMbb70tdfySk1ZnemZ39fUtxEAcs9f8sGMgbrCsgTiRdtERo1H5qqNCJPe8kcVjPQ/lXJgaNCvW6igVyI0eAHhJ4r5pEtPIpXW59qrrmmdjTFqLa1nEiuqBBeUciIsjlMHX6SKdaqnNiJvCGPzkuFW4/pAD8Xc/+DHOv+WOWYPsMTSMPYZuwfl33QIAeHSnlfjtvgfipv0Owi1774+xYva9kkVERESkNb36rrvRV5m6WVo5DPHD447J9HdKjm1o5tv34+pLYLa4GHYsTTmoGXEibaMjehhdS1NWtUdcSyrlHQNx1fkNxBUcs+FKOc2C6TSVBXgrSlqLa09AZjkJmbttDydTZ6JqnzgRjmt2/2LtETedoZ5ufPidb8WPjzsaH//2D7DH1qE5hz1w0wYcuGkD3nnL9YiNwd27742vnnwGfnHoUWqziYiIiLS5t914U+rYT45elfnlLOeMuPrSmBHnXJpSM+JE2kZHbHxTqDhmOWhGXEuqOAZQXW+AZ1FwbKpaCdX53mlcswZcg7TSvjQztvlqjocT13kQkdm59s1dCktTulxz2CF4xUc/jH940+tw7SEHopyx3eVbi2OeWYv/uvQifO7bX8HKkeGFSaiIiIiILLqDn1uHY9c+mTp+ycmrM/8t1x5xS2ZGnJamFGlrLdTDaBH6ceZQ/aaEMEqmHEsMkO+KkDfuv1eJuWwJvGT2LzmctPMaKlw4Tfpn8mxlgIrrmQkuXGIb+4ZyvSu9b9B+PZvh7Qw8F/ZTf7OrOp46VimE8AarM6elSrzdgq7MYQBgaIwLVytzA4q2kn1/prB/5vyajtfDDXaZdYXscc3Qv18x6cZY75YY4XIffpUsx1yVAEu8ImEiMo2WC5bkiYDkjAWPHQ/NmP95136iXgi/PHu6awNcRia7V2b/ksOyAa4xbkz2dA6NpB8G5iJ+dvY3E10D4PHOVUzsmvH+Rr5WFIxxAcNRriyH67MPipR9rmx5AVkBednji7q5NIZjzZ3FZIgsIZpa28KRZZKKbzIbXS+PlAvhC5/vKF/gKtfSOLcKgV+emilVFPGNE0/HN048HbkowtFPrsXJjz6Kkx97FEc9/TSCZG4n7KyH78eJax/Dv73qXFxy4mpYz0PUTZb/hCuT5VI6T3YeGkV3uYo1u65w3v/Gh7i2nRkjH9+I+p9tW+RKZDiybmXaCey17dXJ+o64bcfkOLrPNclhLPfbaj3Zz5s3ypVjtknoFbgT7vvZ65KowNU/JuJuHMy9Le7h8sPUyTROdyOaPWBmda7ZSj9vMNd2OMGVkSTk8r82yN23mfjYeoSuW0e5E5cQL1sash5hjQ8TS3CT9UhA3u8t8dwAACbOHt/br745dezBlbvhwZ59kN8y/d9zXduJzSGBgbddZ0g+ilDYapF42/IwznjZ5Grp8hHBn1PdEpRe/O9RzzEQNzEx5TvPo/tJCEGZPddcuPxI9ustCblyHJS4azsqcPvaJyHxvF3kflvXRrKQkOeN4nXWyiYtNBDHyVXTF1S1oGUHW1W1kO60dS09moVrKaeyli7tOAuxYa+0FudbbJodu6Bcbwm6ZimLyOxcM+KW2tKU06kFAW7e/wDcvP8B+DRehZ5KBceteQKnPPIoTnr8URy6bt2M4XuqVfyfH34f5911O/72/DfjkX13alLK04Ioxt9+6wq84xc3wbcW9+67G/76D8/HQ3vtsmhpEhEREWlVhVoN591zW+r4pcecxPXtGoNKGKJrh+Uoi/UaJvLZX/gGGjgjrqClKUXaWdsPxDn3h9OylC2rkk+fO9fSo1kUHbNgXLM0pL2VHRv2znedcGktjVrXXebOtfSna68+EZmd88WiFm3PjBcK+M2hh+Gagw8DACwbH8Pqxx/DyY89hpMfexR7bd3iDHfc2rX46X98Cv/5qrPxv2e/FPUm1+FdlSr++zOX4KV3P/rCsSPXPIcffvRz+PTvnI0vnnvqC29ai4iIiMjsXn3/XeirTl1JphTm8JMjjqH/ZinMNXggztGX4Gdvh44WiqnZev3VMvwkRuxxM7BEZOlo+ydBDcS1l4pjRlzBMZCWhavjyrUXnbQ3176ARc3M6Sh5x/lm3mKTuXPuEafrToTi2jN3qe4Rl9XWnl5c/pKj8Xfn/w7O/Ku/wQfe/i5s7O11fjcXx/iLn/4cP/nXz2DV2qealsYVI+O45P9+acog3PZp+utv/xzf/j9fwp4btjYtTSIiIiKt7s23/zZ17GeHH43xArF85yTXi9hddXJtZwA5x0BcjehLiD0fY47BwP6KY21KEWk5HTAQ5+hYdcyqktbgGkQtzHNpStdAXqu+QS68Ui69CHhBS1N2lIJzOQndLxaSa6DT9TahiMwsiGKE8dQ9Z2JjUAva8M1ZY/DzI4/COR/6K1x6/AnTfu2Qdevxg0/9Nz72vR+hq8p3rMzFvs9twvc+9nkcuea5Gb933CNP4vKP/Bcu+PWtALmPjoiIiEinOHj9czj6mSdTxy89ZvW8/m6jtyZxL03J9SUMFXtSx7Q8pUh7mPNAnDHmmJn+WchEzodrRlxNM+JalnNG3HyXpnTuEdceb5DL3FUcg687LlUg7c21JKJmxC0sLU0p0hjT7g/Xxnsij3Z14W/fdAHefuH7sXb5Cud3PGvx7t9cj1/80ydxxgMPLUg6jlmzFt/9hy9gr01Dc/p+d7WGf/7SD/GVz1yEnUbGFiRNIiIiIu3ANRvugZW7497d9prX32301iTubS64voThYnqfuEENxIm0hSwz4j4H4CYAXwDwxcn//iyATwH4ZOOT1hj5aroyrGggrmW5loyc/x5x6Zuta1BG2purIVZwDNJK+2pk41nmxvWWoOttQhGZmet+1SnLbN+0/wF4zZ//Bf73pS9DNM3+a3sMDeOiz30Zn77oYiwbG29Y3K+45z5c/N+fx7Lx9HJBlTDAD095CZJpBkPPuvsh/OLvPo1X3XZvw9IjIiIi0i6KtSped8/tqePfOfakeb9sVg7TKyLNbyCucTPiRvPpJTd7ahXHN0Wk1WSpFZ4D8AfW2nsBwBhzBIB/sNa+aUFStiNrMF7KvmnmhvXp/SO2mG48sGGX6aOyXIWeJFy4tT3LqHCDhXLmML6XzP4lh3Kd68zZOt6VOcxM+b/ZpN8MqQ4FeG6oH/U6t/xSvhynjpXCHJJo5nHqIEyHm83O/VznT0+OW1JpvJZuXMwFc77LNa6MVGLuvMW92fMfE9Of01LRsexpUkOS45eO8mrNm4kQZK8OAAAxOfnTqzfvtyXkuxM2zHbuctbRkV0IEBVn/zt2T+4E7DTA1QnGcOWyWs+emWGOm6FWHpx9QK3Snb7+86jDFrLdq0ydW2mbvb7jAlf+o92z1+X9/dx+ALWIq1tLY0RdTlaTtQGuTcIKxonzRrYJQV6j1H1jJETX5nRelsIcMDL9+ayR13ahi+ukKPcR9c/w3MpxNQzxiVedi58dtQr/7/vfxRHPPuP83htuvRNn3P8wvnba6bjkxJOwtWfq0j9Z6oS3X38D/uH7P4TvWGJya08X3vuhd+KOA/fCJWccj0994XvYY/Nw6nvLxkv4/Ge/hR+ctgr/+K7XYKx79r1OkoC7bmyVqBMqZN0akHVk9scGAMD/z957R1lyVWffz6lcN3SYqJFGCSGQRFAARA7GZLBNMg4YY4yNjXGO2J8xr83r+Dpng7GRsUkGbDA552CUQCihMEqj0aRON1XdCuf7o3uk6a7dfevsubn3b61ZM1N9T1fdqlMnPWc/22IMkxWz+XHA/G6BeZugGENdALAyXvvjdHjlkor5PdFM11zl8K4x544TOO+Nz6tceTzEsQyzb9NVXr+RMG2Ss8D8nnhLzPvI3LvS2WN+LzPmRplggVe3uO2dt2LeCGlm+5/UeM9Npcw2gfEI/JC3WZA7b+PQjXl1S6/w3lHNXROwy92T595wDerxegGq5Xn44KWXlm77cpe+xmZYXAjxre4DYxHD5+ZlRBCI75bq76wNr1qbEAlrUVT4nNdgzmU5a0DMR73xmsuy2XPbCu64adhwrtPKme0Ic0yYVnmLcLlr3pb7i9vLiczkDj30hAgHAFrrbwO4sP+X1F+o/F/bZYfwNEJFqlHWkiaQu8g9iZrcbvTbI1yYPIJEcsQNG7GmFIT+UCH6KyrSe9q54Yz9eMnrfh5/+PwXoOPS4/0d7TZ++eMfw5f+4E34o/e8Cxfcu3VetwJa49f+5yN403v/ixTh7to9j5f8zk/j6vNXLZO+fuGD8Jw/+Hm8+ymP2vRXvviL1+Kjv/G3eMK3bzO7FkEQBEEQhCnlB678WuHYhx9xKVqBeZDGRkhrSub6j8pzeFlRXUlsnsDZIoS4ajLYfMeCIAwHEyHuW0qpf1ZKPW3tz1sAfGtQF9YvKhGxMCFC3MRCPbuQeMYmUEJeRIgywnTTz4GYMJn4hBAXbbKQK/QHWogTa0pBMIXaVLRd891mto1/fup34Xm//Kv48oPP3/Rzfpri+6/8Bj78l3+G//jHv8czv30drHzrsAE3TfHn//FO/MynP0P+/FvnnoEXv/GncWDf+px1zTDAb/zkS/CTv/gjOFYvujsAwOnHl/Hvv/+veMMVH4Yv1tiCIAiCIGxjLjh0Ly65567C8Xc/+nF9+f3URuwK05qSEuFi22HbZ7a9otBY6YoQJwjTgMlW/1cBeC2AX1j7/xcA/EPfr6jPbOecGdMItah0qnm8AmoXueSI23YMItpSmCzoHHESETdIYkLolIg4QTCnQuS7bW/zTUV37dyFH/3Jn8JLrvwGfutDH8RcZ3ML4cfdfhsed/ttuOvDO3DFk5+E/3zsY9AI19tE1qII//AvV+BJ37mF/B2ffeRD8Lqf+yG0g81twT/5qItw9YPPwh/863/j2VfdQH7mVR/7Kp76zVvw6cseittO343bT9+F207fjcUZWsATBEEQBEGYNn7gyq8Wjl2/7wxcd8aZffn9bWIjdiXmiV39zA8HbBIRJ0KcIEwFpVsGrXWklPpHAB/RWt88wGvqKyFhTblddwhPA1REXCU6NbGEriMixG03qIi44BSS9QqTByUAUUKR0D/EmlIQ+gOVXF42FQFQCu97zOX43AUX4jc//D/43muuJu0kT3DW8QW84b8/iF/66Mfx3ssfgyue8kTcsXs39iwv423/9M+48N5DZLn3POVR+K1XvRBpiTxIx2dr+KlfeDle+pWr8MYrPox6p7iw8qBDx/CgDx9bd2yhVsFtZ+zGrft247Z9u3Hb6at/37N7HrnFy60jlMPOM+xoNbGr1cCexQZ2tlf/7Gqd+HcTc1ETubIQOS7itT8dx7v/3104D/zMdlf/bbtY8UN887RzsBgW85oLgiAIwnYk7Mb4vm9eVTj+rsc8nh1ltpGOVxS7qPF0GegNvfxxeIu4NrGmFITpoLQQp5T6XgD/D4AH4Fyl1CUAfk9r/b0Dura+QOWIE5FlcukEhFhCPGMTqKgnWbzaflCRAxIRt72QHHHDhxTiiB2FgiBsDbmpaJtHxJ3M8Xodv/qDP4w/e/Zz8Yqvfhk/8PWvbRkhV4tj/NgXv4Qf/dKX8dkLL8AFhw7hjMUl8rN/+exn4i9f/jSzhSGl8P6nXoavXXQu/uQf348n3HCgZ5EdzTZ23HwnHnPzneuOx66DA3t34rZ9u3Fkvo7lavjAn8oD/15ya1iuhIhljAtgdZFvvt3CznYDO9pN7Gw1Md9Z/XtXq4GdayLbrmYDc502LPCS3ZchsWx8/LxL8I5HPBk37u7PTn9BEARBmFSef921qG2ITmt5Hj70iEv7dg5qIzaVc7kM/Y6Ia1NCXDdi/z5BEMYHk5bhjQAuB/A5ANBaX6uUOncQF0WiAM8z3yW/WzULx7y5DGfML29aZn91yfg8ANDJeBPbO1fmWeXuWjAvt8Um4C2x7a1zZmx+PvPdKtHS5olXV5JK4ZjXylbLpLydMQERUdexfehk693FszuKdasX580e6/0hgnPC46xyXI4n5vZH31nZwzrX3XqOVc6qmO8IiqPN39G4U6zjFUSwTu8gafPebRXxkvNq2/xF7Wb92RlWFtU1P5/b4F2ju8IrpwzbBL9b7GPK5ojLj/ISRh85vrmF2VZYETP6wTKvW5p5Kjfqff+TqPj9K0s5/EOG75wa3AIphdPmleNM7ZotXt1KO7zJn7NkXs5f4L2jFlNzjed5z1szmmS3wToV3MO8e5IxHne8e7OIuK2FuGSL8daW5TzemFCF5uP4hNFmAYDVph/2wdPm8Ecvej7+6nnPwIu+cRV+7AtfwvmHj2z+e7TGd99wI/mz1LLwOy98Cd5z+eOgO8XcIL3ILI27wz344V94DV71mS/jN973MQTEjupe+EmKC+45jAvuOVzq87HjrAp0YYjlSgWNIEBq28gshcyykCkLmb32t7KQWRZyZSG11/62LOSWQmrZ9/+dWdZa+dXfk8NaO3bi86t/Z5aNW/buxa17TzP+nlvhbXhPlc5x+d234iHH78WOdhPzURM72k3s6Kz9aTcRpuPjeuDmGV5wy1V4wS1X4Zs7z8V7zn8SvnD6w5ErXgdsR7x3NPN55+O05RZjHAkAGXNsbTomPIF2GG2Qb94eAEA2x3MEULH5c7NqvA5YM+cbqjO8qN2cMY8CACdmfjfO6YY7bIXDbBOciNG3+bx31OryrpELY5kKijnfyHNe3UruMV+T4batbos7T2cVg+rxuH/oq18rHPvwwy9Dxw7Z84eNdBzKmrILa60p1lb5e+LHxXel67g9v+cJVLa+brUcQoiLo8LnwuO8foPzvmmbWUeavGs0uf8n6OzhbULMAt5385aZ9z8ZXntnE3WzDFnAa8tdYk2tJ1yhYkIxWW1JtNbLav1uz7G/W35MhAj7EuEwqbQJW1EqL4oJYk0pAJvkiDvFaEthsqB2snUlIm6gdKkccYyFaEHY7kiOODM6vo93POkJeMcTH48n3/wdvOpzX8R33XhT6fJt18PPvfxH8fkLLjzla9GWhX95xpPxxYsegj+54r247Pa7Tvl3boWfptiz0sCeFabK3Afe9sQn4/df8L3QA7DUdLMUf/LRf8PTD3y77797GFx8/AAuPn4A91bm8d7znogPnXs5Wm7Yu6AgCIIgTAEPPXoQjzhcHAu9+7LH9/U8VEQc35qy3zniipvlxJpSEKYDk5bheqXUDwOwlVLnA/h5AF8ZzGX1j5CIdopEZJlYSLHkFO0DxZpSAOjckadqeypMFv32dhd6Q01QPLGmFARjqLEMNWYSNqAUvnjBQ/HFCx6KBx0+gld95kt40dVXorqFNdHxag0/8WOvxnVnntXXS7nl9L140et/Bg89eB8uvOc+PPjQEZx33xGcd99RnHPkGPyUt6N1HPmxL38RSmu86Xtf2LdcL8BqLrc/+vjbJ1aEO5nT24v4+es+hJ+48ZP48NmPxnvPeyLuqe8e9WUJgiAIwkD5/m9/tXDsht37cf3p/bVuppwjQrY1JbWOcArWlC7hGiNCnCBMBSYtw88B+P8AxADeAeDjAP7vIC6qn/gRYTUWysLEpEJ2lqcaEUd0tpHsIt92tAmBXnLEbS98KkcckcNM6B+U0CkRcYJgDjWWoVwEhM25fe8e/J8XvgR/9uzn4WXf+Dpe8ZUvYf/S4rrPHNi1C69+1U/irp27BnMRSuHm/ftw8/596w7bWYb9xxdx3qGjOP/gEZx3+CjOu+8IHnzfEcy1N891N8688itfQtv38WfPeV5ffp/SOX73U+/GM267ri+/byOLQQULYR0Lfh3Hw9U/C0EdC2ENx8MZLAQ1KGj4aQI/SxBs+LvS7iLIuvCzFH62eixMY1x++DuY77Y2PW8ljfH9t30ZL7ntK/jKvgvx7gc/GVfvPq+vAqYgCIIgjANhN8bzbrqqcPw/H9HfaDhg1d1gIxVmRJxHzF8T+xQi4sgccSLECcI0UKplUErZAD6stf4urIpxE4NPRLSINeXkQi0qnapYEkhEnAA6UvZURV5hsqAsJSKJiBso1E5BEeIEwRxKiOuUzHEprKcRhnjrU56Gtz3xyfjuG6/HS6/8X5xz7Bi+fP5D8NfPeBYWq7WhX1Nm27hzzy7cuWcXPvOwix74gdbY0WzhvPuO4uxjxzHXamO23Vn7s/bv1kn/bnfg5sPNw7MVr/3sp9H2PPzD059xar9Ia/zW596P77n5ytJFEsvGYlDFQlhb+1PHYljDYli9X1w7VlkV3BbDGlJ7NVeGw9Q9vSZ9370swbPuugYvu/WLOG/lvk3LW9B40qEb8KRDN+DWmdPw/vOegDvqe7HiVdDwQqx4FXRteecFQRCEyeW537kGtQ2RXy3Xx0cfcmnfz0VaUzLFLtqakt8nS0ScIEwvpRQprXWmlMqVUrNa6+VBX1Q/CYgccWJNOblQAlmFGT5+AkqIEzun7Qf1zCtxsu0Sh25n/ISwlJCF7IESkTniJBJVEEyhxkKU5bJQnsy28YmHPxKfePgjR30pm6MUFuo1LNRr+Mb55275USu2AK1R6XZXRbnOqjhXjWPYeX7/HyvXcPIcVp7DSXPYuX7g5zqHna39nZ/8R8POs7VyGm6Wbficxly7hafdXMzD9ysf/yg6roe3PfkpvHugNX7xKx/Cy75NZ0x4+yVPxZ1zu1fFtsqa6Fapo6X8sYgq69ouPnTu5fjQOY/Bo47eipfd8kU86b4btyzz4JX78OvXvL9wPLJdrHgVNJ0QK26IFbeChrsq0p3497fnz8btM/uI3yoIgiAIo+WlhC3lRx56KdpeMWfaqUK6bfUxIu6UcsRREXEixAnCVGDSMjQBXKeU+iSA+/0ztNY/3/er6iM+kSMuDiQiblIh83idohAnOeIEAMgtC7HrFMQYP0mRQBYztwP99nYXekNZf1IWoYIgbA21qagtNtvCRpRC2/fR9n0cmp/r/fGUJ1RZm6Sz+5WPfQSv/eynC8d/+0MfQMdz8d6Ln2B8rp/5/Cfxqqs/S/7s95/6ErznkU+krzEZs41WSuGqPefjqj3n48zGUbz0ti/jeXd8A5Ws/DwnyBIEnWXswdb7Zt97zhPx1w/73lO9YkEQBEHoGxceuQcPP3J34fh7H24+NigDGRHHFOL6nWu+RUXEdSP27xMEYXwwWWF8/9ofY9asLa8EcFBr/QKl1LkA3gVgJ4CrALxCa92zxdPafDIYEtaUeUXBtze3vlqMK8bnAYAZj+dV8tg9d7LK2XvMrWVua/KSfC9EvHvSScw7H9ve/HtZM8WZfdhNUN3ZRpLYxucC6M62W7dhB5usIqxx/Li5NdHnj59vXAYAvhqewypnWbxFBtfZ+rtT7Ki2Wefi0jxuXidVvHUdaXteQYjz7rOAumV8LgAA0/3JXTGvy4p5LrvDW2SzGRuylHm1AgBo3qsNy3AcTS1kp6kLp937Htkxc7GSqTlx76XFWFTNmBsQyzy3NKc2V6RwDJsTh1mPGcOKVZjldMpoSxzey61azBeH0W1w25+cOT/1VpjvG2Nu7US8frRylHdTulXzOtI6U6FCWClHtrelkKKrvIZEdXh1y26ZP/B0jmdVmzO/m9Xlbb6wIvPnZkW8esxuthi3hNtGbtYm/Pl3PxeVqItXfvWLhZ+96b/eByx6+MhDHlX6PC//1ufxC1/9GPmzv33k8/GhvY9D5TCzw9wEp8NrExKDd/uOHXvwpztehDc/7Nn4njv+Fy+99cvY117sXbAkL73jy/jM3otxw9xZ646nYd9O0ROL6UKdJ8xKmTPLVc0vtDbHWxOIOrzNE7pi/t08f7ibnnSFd75opbgY3fNcEW9wQQxJBwZ3n1/CeNYAYDPfmzQ07++zgDdvdtq8cZPd4dUtxXjgWcb7bjm3XN28/+K2kU6HVykTplu3S6RGfcn1xWi46/fsxw379q+WafDOpTe5/d2UELviLpy1a9MGt6RC1MMscVC5r9yYoX73hr5GW8igYJ80MQuyFDtui5FZD7yXVsJ7bzjvaRry6nFa5c0bVG4+3rK7vDGaSodbzonN321tMdvxGnPCzTyfe9/meY83PxfrVBNLz6+rlDqxbfEirfUVG/+UPM8vADjZY+OPAfyF1vrBABYBvNroqg0IIsKaMpBop0klcSykGxoEN8vhJvxJNiXWdsS+dFtC5dOh8u4I0wnp7S7WlAOFzBGXSUScIJhCCXESESeMHUrh95/3vXjPoy4v/MjSGr/7uXfh6bd/q9SvetGNX8OvfvWD5M/+5aJn4D8ueNqpXOlY0PRCvPMhT8XLnvMb+K3HvQLf3HlO3373M++9um+/SxAEQRBOhbAb4/k3Fful/3zk4wd2zn5GxFHWlF37FJx1lELHIa4vE3tKQZh0yuiO+5RSTwDwvUqpS5VSl538p1dhpdR+AM8H8M9r/1cAng7gvWsfuQLAC1lXXwKfEFnEmnKCUQqdgIqg4C/cUmXFmnJ7QvqEn0LdEiaLfltKCL2hhDhqIiMIwtaEhKVrJEKcMIZoy8IbXvj9+J9HXlr4maNz/NGn/x1PvGvrHGnPueVq/PYX3kv+7F3nPxlvediz+nKt40Jm2fjc/kfitd/1Ovz4038e73rwk/GlfRfiWzvPxh31PVjwa0iV2Xbipx2+Dnbe32hBQRAEQeDwvJuvLuRAa7k+PvLQnkvObNp9taYk1p1PcR2hbRN54lIR4gRh0imjSP0OgDcA2A/gzzf8TGNVVNuKvwTw6wDqa//fCWBJa31ipe0eAGeUuVgOPhERF/sixE0yke+i3l7fAYVRF/Cqxr/LzjL46fpJaK4UukTeImH6iQgB9lRzEAqTg+SIGz7UBIWayAiCsDVU9LZExAnjSm5Z+PWX/hCCpItn3nj9up+5eYY//cTb8LPP+0lcdfqDC2Wfdse38abPvhMW4aP7gXMvx19d8j2A4hp4jj837TgTN+04s/gDrVFJY8x0O9ix1MBM0kE9aWMm6WAmaeNHb/s0/PyBcc58t4XLFm7DN3Y9ZIhXLwiCIAirKJ3jwiMH8ZQDN+Cl132t8PMPX3AZOp65PW1ZIsoNKU2gdA5tuLnFz/ocEQeg7QRAvLLuWEWEOEEYC9aCzC7HA3rWQQD/q7Xu6Vfas2XQWr8XwHuVUm/QWr9pi4t4mNb6+g3HXgDgiNb6KqXU03qdi/idrwHwmtUr5fnK+jGxsCrWlBMNZRtJ2UuWIegW60fHc6d6Ai9sTtsvLlpWRIjbNvhERIlExA2WLmVNKRFxgmAMJcRJdL8wzqS2jV/8wR/FP779rXjyrd9Z97MgS/HXH30rXvv8n8K3Tjvn/uOPvedm/PEn/w2OLuZE+cRZl+BPHvWS7TuGVwptN0DbDbCQzxR+fP7KvXja4evWHfvuQ9eKECcIgiAMjWoc4fEHvoOn33gDnnzgRuxub574bZC2lACglYW246GSrh9DB0liLAC6/bamBNB2iBx2IsQJwshRSj0LwN8DuAWrAhywGrz2YKXUz2itP7FV+dItw1Yi3BpvB7AxbviJWLW0fB6AAMAMgL8CMKeUctai4vafdOEbz/lmAG8GACvwjLMgWlkOr7sx2gnoejxRTxgP+irESX444SToHHESnbNdGISlhLA11P31iB2FgiBsDS3ESUScMN50HQc/8/JX4V+ueAsec8ft635WSbv424++BT/5Pa/Fzbv245JDt+MvPv6v8Ag7xS+cfhF+7/IfRG5ts2zvBnx638UFIe7Jh6/Hn1+UoGvLWEcQBEEYDGcvHMVTb7kBT731RjzmztvIfnwj1+09Ezft2T/wa+u4RSGuknSNhTgqx/kghLhQhDhBGAf+CsAztNZ3nHxQKXUugI8AuHCrwv303CpsP9Ra/yaA31y7oKcB+FWt9cuVUv8J4KUA3gXglQA+0MfruB8q91ccSLTTpNMhopYoQa0MlMhC2RMK2wMqnw61uClMH3aWwc3X77DPlEIqi3oDhbL+9NME0Fr6akEwgBrPiBAnTAKR5+E1r3g13vav/4SL77lr3c/q3Qj/8OE34/894fvwm196P0Jiw8zXzjgfb3jcjyCzZKPlVnx910PRdHzUTlrEq2YxHnf0JnzhtEeM8MqEcSKMVuc9VE52QRCEMrhZikfddTueduuNeOotN+DchaPGv+OfL3/GAK6sSMf1gM76Y5UkxvH7MyuVg0xxcYqbXDpURFwmQpwgjAEOVtOsbeQggJ4vfj+FOJOItd8A8C6l1P8FcA2At5b57XFk1pDVlouNVNv18J37dm9ZLs94E7ks4pU77fRFVrmKay4+Wco4sBAAEKW8qtLsmHs6d6Otz9VyihMDa1mj2zSfMNjErY8cD1mjd12zm4wFeua6cmLzJkNWzDthZ9Y8ebs+jXUq2HbRXqgUmfl3U8nWZTpE3Qo7KZwG7912G7z773R6f6ZfKGbwUeWo+XNz27xnffxCXvuT7Ch/PocQ82PXQbxLo0z35rR5zzpnrnNkPq8tz6vm7zY0sx4v935vUlhILQvOSSKorTUcnSM1WFRVjK+1ejJeMc3UZ51j5hOydJZ3/xXzudmMvQfc+2Ex2x+nzav/nHrCuR8AkFSYbQJjzu4tWajExQtN2wG8dPOHEzMdIrTLa8u1ZX5PKgd4ixjJLLON5LYJjMedzjEbLov33YxmamuomPdy2x2zcitugFf9xE/inX/393jo0UPrfjYftfAHn3kHWe7qM87Fz770x5FFjM6UOSbn9jecdtLpMNvxuPiwO/DxhX2PwPPuvnLd8acf/iY+e9bFAIDc4Z3P7ZhXLqfNq1tWlzlPD3nvTdYyH4OmNd41KuY8nVMqzzc8a63xq//xKfzYh74KBY3/ePbl+MNXPht6w4a0QrmSpIbrOCewPPMXjnv/vRLjVgrOXIrbjljMct0a732L6+blXOYYTTOvUXEGTgAsxj7uNGGOmxjrFgBgL5m3P9w6wl1/2Fj/Lzl4AL//sXfi7KVjrN93x8xuXPHwp+Mr8w9HcHx9XXKIvq0Mrb2b162IWP+pdLqwqoAymAME0SbWlCUvOZ4t1q1GGBSOeW6y7rNJlbmkz7iVwTJ3/M8qBqfFOB9zI6/mjv+Z46Zod/HZDgp/gRe0knm8B9c5y0zEBgCry1wTHh3/AuAbSql3Abh77diZAH4QJfStfgpxW6K1/hyAz639+3asJrUbKKRNj9gOTjwR8QypBagyUFGTklNl+0JFW0qOuO1BIPnhRkbsOHA2vGdemiC1JbpBEMpg5TmZJJ6KOBWEcWW5UsFPfP9P44p3/R0etHCk5+ev37sfr33JT6Dj+fAipji5zfjU6ZcUhLjHH7kJ1aSDlhuO6KqEceCx19+B13zgS/f//8c+8jXcu3sWb3vBE0Z4VYIgTBIPPXIQ//S+N6OalI/a6lo2rtp7Hr68/yJ8af+FOFjfNcArLNJxiY3Yifn6z7CsKSVHnCCMHq31Hyql/hvA9wE4kczyIICXa61v6FW+nzP0sVutpvKGie3g5NPuqzUllVNF6sh2hcoRF4gQty3wCSEuIuqD0H9ix0F1w3vmZynaI7oeQZg0qAWDtusVIhkEYdxZqNbx6pf9NP7tnX+LM5cXNv3crTv34jUvfQ2avohHJly96zwseDXs6DbvP+bnKZ583/X42JmPHuGVCaPmUTfdWTj2i+/6DD71mAtxz975EVyRIAiTxL6VBfzj+8uJcIdrM/jKvovwpTMuxDf2nY+Oa+6i1S/6JcR5A7CmbBP3pWIgcgqCMBiUUmdprW8EcCOnfOkZulLqRUqp2ZP+P6eUeuGJ/2utH8e5gEFCCXESETf5UEIZNyJOcqoIJ0M9e6qOCNOHnxCDZ1eiSYZBTAiePpEHSBAEGiqil1pYEIRJ4Eh9Dq9+2WtxX22W/PldczvxEy/7aSxVakO+sskns2x89vRHFo4/8+C1w78YYayYaUWFY5U4wZv+6YOreXsFQRA2YabTwj+9783Y3WqQP8+hcO2+s/HXT3wuXvojv4zvfs0b8QeP/3584ayHj1SEAzZJTSIRcYIgbM1/n/iHUup9poVNtsq+UWu9fOI/WuslAG80PeEwEWvK6YSyD+SKJbQQJ3Vku0IKcYyBmDB5kNaUIsQNhS5hn0clvBYEgYYc70pErzDBHJzbiVf/wGtxbIPYdqg+hx9/2WtxdBORTujNp864pHDssmO3Yi5uFj8sbBtqHXpx94nX3Y4Xff6bQ74aQRAmBS9N8Dcf+BfSUvr2HXvwm8/9YTz1tb+LH/nhX8CbH/dM3LR3PzuP1yBoExvXOGLX0CLi0uKmCUEQhs7JjdiDTAubCHHUZ8d6lVIi4qYT6hlSz7oMlBAn9qXbF0qEDZnRlsJkQUbESY64oUDdZ08i4gShNJRNjUT3C5POHTv24JU/9LP43zPPQ9Pz8eWzH4If+8HX4dDsjlFf2kTz7fmzcW+43mrQ0Tm+695vjeiKhHGg1t584fk33/Yx7FwSoVYQhPUoneMPP/oOPOrggcLPDtdm8JqX/BT+56JHY3GMI9j7Zk2ZF9cSklPMd94iIuJM8u8JgjAw9Cb/LoWJkHalUurPAfzd2v9fB+Aq0xMOk0BsB6cSKkccVywJiHJSR7YvVARBSERKCdMHZYUoOeKGQywRcYJwSlARvZFYUwpTwB079uBVP/i6UV/GdKEUPnPGJfiRWz+77vAzD16D9zz8SSO6KGHUbCXEzbU6eMO/fAS/+MsvG+IVCYIw7vz6Zz6IZ3+nGDHb8AK89sWvwX0z459fcrxzxAWFY5IjThDGgouVUitYjYwL1/6Ntf9rrfXMVoVNhLifA/AGAO/GquL3SayKcUNDWbnR5ymrnrRioRpu3bDWA17jttwpNpRlOHz7LlY5NWPeQdhuxjoXF52bBF2uYjtbP+c4KFbbStpdHxxaEkpk6bguUKaqnW4eFl6v8kLJqz5PaGzFvIU4pcxzAURd3kAjajJ9wV3za1Q9NnNGlEd43IXT5tknKObrZnfMy2hmfLJi6h3a/NVG6jMKAciZY1htUI+pCKyu40CV7HaSed7Drp9Ge+n3Yl+dV861za/zWLvKOteRAztLfS4i2nTMdNHZW77Pj+eY7yirFGPb0ykUVDHvvXFavG9nxeblOO3BqWAzXVksRnuXVnjnyh1m7WLUkTArjlvbngdtb/3LnAbvwdkd3g7f3GP022ZD//uZY6XOBqyU93Z3ZxjvjeJ13HbCbIEYxZwO71yauQnc6fD6Un/ZvFxaGW7DpS3zOpL6vHYk61Hu4+cUhbhHLN6Jfa0FHKqZRxxqxmXmzHFrWmM2CjnvXtpt83oSd3gD19oMYwIAIEnMb6bjrH9nZqKtz/28r16Pj1xzA774uPONzwUAUY/+aDPilvlcljOOAfjzjWQHo29jzr8q9zHHu8zzKUZ+QCtlXiNTX4hneR2Ozagn3Taz4WK2PxvHQNUowms/+2nsbjRw++7d+PDFl+CeHRvmW8whgrdsVvBHvvl5/OiVXygcTywbr3/iK3Fvchpq927eXmfu8OYbweLm15GkxTam3ohXyxjckoBw6NKRA3+Fn2OTyhFXTWOcvMRhx7zfHyyZj5tyxjMDwBskAFAZ57vx7odivqNJjdcmuE3z+58x19LSCq+NVDnvXnrL5oEMzCoyMrTmznRWKV1rtNYtAK9XSlXX/j32VIhop0isKSce6hn205pSIuK2L2SOOGb+QWGyoCKoJUfccKAiDyUiThDKQ0fEyXhXEASa22b34faZvXjQyuF1x59x17V4+0VPH9FVCaOk1u696fMN//gRvPgRP4Vmlbf5WBCEU+Nf3/oWPOrOO+7//69/9CO49syz8KGLL8FHLr4Eh2eHkz/1Wbdeg1/56gfJn/3fx/4ArjqNJ9iPAmojdpByrCmJTb32qa0lUEJcJZEccYIw6ZSWVJVST1BK3QDgxrX/X6yU+vuBXVkfIHPEBSKyTDr9tKakykmOuO0LmSOOiKwVpg9K+JEcccOBsqakhAVBEGgqXToiThAEgUQpfPLMSwuHn3XnNSO4GGEcqG5hTXmCvQsN/MIVnxnC1QiCsJFzjh5dJ8Kd4JK778Jvf+iD+NIfvAnv/Ie/w8u/+mXsaPKcU8rwqHtvw5s+807yZ3938fPw8XMuG9i5B0GHckRiCHF+VlxLGIgQl4o1pSBMOiaxjX8B4NkAjgOA1vqbAJ4yiIvqF2SOOF8iHCadfkbEyS5y4WT65REuTB5+Uhw8S1swHGIqIk6EOEEojeSIEwTBlE+deXHh2PlLh3Du8n0juBph1NTbxSiLay7YXzj2so9djcu+fecwLkkQhJM4Y3Fhy59bWuPyA7fjdz/wfnz1938Xb/vnf8T3f+PrmG23+3YND1q4D3/+sX+Flxdt9f7z/Cfg7Rd+V9/ONSz6JcR5lBBnnWqOOCoiToQ4QSiLUupflFJHlFLfPunY/1FKHVRKXbv253kn/ew3lVK3KqVuVko9e1DXZWQyqrW+e8Oh4SYcM4QSZyTaafKhI+KYQhxpTSl1ZLtCtQ9iTbk9oIQfsaYcDtR9poRRQRBoKkTktkTECYKwFQdru/DtHWcVjktU3PZD5Rq1TrEfef0vvgjtoDg3euPffRheV8ZpgjBM6lF5S0Jbazzp1lvwh+97D776+/8Hb37bP+OFV1+JmsHv2Mju5jL+9iNvwUy3mE/yc/sfjr+47IWAmrBET6CFOJY1ZUasJZxyRFzRBlgi4gTBiLcBeA5x/C+01pes/fkIACilLgLwgwAetlbm75VSp5QLbjNMhLi7lVJPAKCVUq5S6lexZlM5rlAii+SIm3xIsYRtTSk54oQHoBYuA4mI2xYEhPAj1pTDgRTiJEecIJSG6qckolcQhF586sxLCseeeee1gNZDvxZhdFSiYh/SCjzcc9o8/vJHijkDzzm4gNe8+4vDuDRBENYwEeJOxssyPP2mG/Gn73kn/vdNv4P/+Ke/xy988mN43G23lnYgqcUd/O1H34J9zaXCz76162y88fEvR24ZxXiMDVSOuL5FxJ2iENeiIuLSWPpoQSiJ1voLALYOJ36A7wPwLq11rLU+AOBWAJcP4rpMWoafBvBXAM4AcC+AjwN43SAuql9Qg0oR4iYfamceN2qJKicRcdsXSoSlIg2E6UMi4kYHJXiKNaUglIfqpyirZUEQhJP59P6L8XPf/B/YeGBRb3/zOC5auBs37CxGywnTSb1VXOBvVlYXgN/53MfgeV/8Ni65+eC6n//Y+7+KTzzpInzn3L1DucZTQmv84Ne+jp/4/OfR8n189sIL8c7HPw6HZ2dHfWWCUBoqmu1jD38EvnPaaXjBtdfiQceO9vwdXpbhsQduw2MP3Iaf+/Qn0bVtXHvm2fj6g87DddXzcN2eswvW5k6W4k8/cQUecvxQ4ffdObsLv/aUH5/ozat9sabUGl5eFOIS69TWEjLLRmw58E/63bbW8LMEMXHdgiCU5meVUj8K4EoAv6K1XsSq1vW1kz5zz9qxvlO6ZdBaHwPw8kFcRFksy0z5p0QWZy7Hrlpry3K7g6bReU5wVp23M6E5X1agXc+xTtW4zNHlGutcvs9bFM1z850xvTZ4pNXi7wy7XaiOedRo2OELcfWq+a6k83YcMy4DAIHNiw6xVM4ql+bm9/LeFm8yc9dh83oMAHZkbn1gd7Yu081o29PuPO8+KrZ5L2NHGdMJwuU1d4jmza8xZ47RmdUYKil/jUFcfFix5cLqlrux1l6eYFvxeG3r0RbvvWm2ihYXveC04wCgSt67LjFJCTppz/f1ZOyY9wJw31HNnFdFexltucMbW+gO77lZjEvkvqMWc59DUuc9b6trfi8tribMbJNtxvmqTWIjQe7DW976ItKK+bkAwF3hlfMY5TSz/ntNXqW0GXUEANLAfNwU72CdCqnmVS5Ou+W0eeeqHGaOm5htSe6YX2dS4bWRSci7J17TvG65bd4NSYi5EsURfxZXnfZgXH7fLeuOP/vA1bhp9szS57MS8+9mR7z7zy2XFwMLSpEGjDZhkbdAyqgiAIBK3dyqzHMe6OznusUcUq2Kt/oZB3jTzz8P7/qFt8JNH6iPbpbj//z1h/Dy//fjyOzez8SymG2yZz5QSyvr2+Nf+8DH8LMf+8z9/3/EPffgZz79aXzi4ofh3576eHz1IecBSiHJuGML8zLccSsxVS2FxTTXSgPz69TMc+kGr5zNHEvaRYfF3mWWeRMAbt+m7QcahTphCXnLaXvxl899Nv7yOc/Chffeixdc+018z9XXYv9SufVFL8tw+R234/I7bgfwSSSWjet3nYWr9p6Hq/c+CNftOhuv//r78NiDtxTKHg9q+IXv+kl0lz2EMB+8Zr55W06tA5bB6WzeuCZJcYGiEnfhNXJYablG2c4z2BsWMVNlQUUKDso9fLdFT8A6trdOiAOAnYcbWPTqAIDc5rUldmxeKTe2rYMul8yYv29Oize59xd4lp9pjbfApS3z5+Yf512jSnkNUO7xnlvGmBNZzGsE4Cilrjzp/2/WWr+5R5l/APAmAHrt7z8D8OPcC+BQumYrpR6E1Yi4x2H1gr8K4Je01rcP6NpOGUqIk4i4yadDPMNK1L+IuI27gITtA/XsA4nM2RZQOckmeXffJBETFnpiTSkI5Qkpa0ppvwRBKMEnzr6kIMR9993fxN9c/D3I1WRajQlm1NrFxb1m9QHl8vazd+Ot3/8E/PQ7v7TuMw+77RB+5INfxxUvevzAr5HLKz7/lXUi3AmcPMfzrrkOz7vmOtxy2h68/amPxwcuegyaYTiCqxSE3lDWlI1gbYOlUrjxjDNw4xln4C+f8nw84uBdeP63r8Vzr78Wp60slz6Hm2e45MgBXHLkAF59HZApVRCYAKDtePjlp78a99Z3otqY7LWSyKYi4swEDyoajtpoyqFt+5hL1m+WqKTx/UKcIGxzUq31o00KaK0Pn/i3UuotAD609t+DAE7ehbZ/7VjfMRldvwPAewDsA3A6gP8E8M5BXFS/CIj8X3EgVmOTTscnxBKuNSWxeCXWlNuXDiEIVJKu+HBvAyjBNXakvxgG1H0WIU4QykOOZYi8EoIgCBv53JmPQLIhXGZX1MClR8d2r63QZyghrlVZ34f8y8uegNvO2lX43Ove8TnsP8Rz9xk0z7722/i9d3+g5+fOv+8Ifu/dH8BXfv/38Hvvfy8ecqhowScIo6beIYS4kHA6UQrX7T8bf/Sc78PTfukN+OEf/1n8++VPxJH6jPE5KREuVRZ+8ymvwI07y0dNjzOUNWWQmYVZellxHaFvQpxTfMaVjBcZJQgCoJTad9J/XwTg22v//iCAH1RK+UqpcwGcD+B/B3ENJkJcRWv9dq11uvbn3wGYe1wNkZAS4nxZWJ10qKjGkMgHWAZKwKPyhAnbA21ZiAhRQKLiph8/pXLEiSg/DKhcfJIjThDKExDtVyTtlyAIJWj4FXzl9AsKx5951zUjuBphFFQpIS5cPx9OXAe/93PPQ77BTSvspnjj33947DYtPvrWA/jrt74DlsF1VbtdvPxrX8VH/+JP8c5/+Du84Npr4MrGMGFM2DIibhO0ZeGqsx+ENz3/JXjyr7wRz/m51+MN3/P9+J9HXMoS5gDgjx77Enz1jAtZZccROiLObG1xo3UkAHTt/ozD232I2BOE7YpS6p1YdXN8qFLqHqXUqwH8iVLqOqXUtwB8F4BfAgCt9fVYDT67AcDHALxOa81ONrQVJqrUR5VSrwfwLqxaU/4AgI8opXYAgNZ67LZChTFh1RPIwsSkQ1lTht0UKs+hLTMLlbArEXHCejquh2DDpCvsJohEoJ1qSGtKQiAS+g9lASoRcYJQnjApTsg7YrMtCEJJPnH2pXjqPdevO/a0e67Dn136IiS2jIWmnVq7OB9uVosL/N+6cD/e8fzL8SMfWr9B/HHfugMv/PQ38d/PuGRQl2jE+YcO463/8LbCfA4AvvqQB+Hcw8dw2vLWSUsvP3A7Lj9wO47W6nj3Yx+Ldz728bhvbm5AVywIvamRQpyBlapSOLBrDw7s2oP3PPrxgNY4e+EYLj9wKx57x2147K23Yk9n6/fiLY98Jj54/mNNL32s6RCCmXlE3OCsKTtO0eGiYnh9grBd0Vr/EHH4rVt8/vcB/P7grmgVk9bhZWt//xRWhThgNRX9D679/0F9vK6+QFpTSkTcxKMthchzEHTXd3h+mhqLJQGx+B6JELetiTwX2JALOex2sYjqaC5IGAqU8CM54oYDJXh6RISPIAg0IRFBKkKcIAhl+eL+i9B2PFROigKYSTp47OGb8aXTHzbCKxOGQa1F5Iir0H3IX//Id+HpX78Zpx9dn3fq1/7lk/jiox6M4/O1gVxjWfYurOCKv3kr5tqdws8+8/AL8BM//UooAM/65vX40c9/BY//ztYWrLubDfzspz+Fn/zcZ/EXz34u3vLUpwFKbVlGEAYBFRHXDE7Bhlwp3LlzN+7cuRv/+ejHY/aWDPsbx/Gow7fhsrU/e9sPvOdvv+ipeMsjn8U/35gSE9aUYZZA6Ryry9298XLCmrJPm1jaNiHESUScIEw0Jq3DbwD4mNZ6RSn1BgCXAXiT1vrqwVxakXjZzAkziIoLq7c09+Dew3Nbljugi/7nZajXigO+MuytN1jldoTt3h/agGvzIis7CW9But01LxdFvct0PLcgxIVpjE5odr6AiIiLHA9qo+8GwdJx84nG1csV4zIAoLu8ZOnK4dmEOL55NIrjMqN2x8vJBG1i8bLaTGBVzSddToc3UQuPmd+UpMY7l2bOJZ3I/BodXhOJsoPgjVgGWg65ccNxoEp+zewwz6n5yDHeBMpb4rUJdom2bSM6YLYjcblzpQnxzq2kqB4sf612zLtGbff+DEXm8epkvJtx/5l2TxnzualseAtMVsrsAJjFsoDx3co2AhsIFofXuVG2YlhyUHXyLctlPu9ZZ8x1nzQ0P5/dZd5HZjVuncZrFDh9sOY14+zvprauDiTca+Q+t9zlfbmc0SZz+42U044AyBlTqcTQ6eMETsfsu2Vw8cV9D8Oz715vR/nMu6/F589+eM/ynHtpMkY7mZy5x8Bp8cpxXric2f/mMa/9sWfNX27beuAaqUibdtVb95kTJDUbv/+zz8HfvfHd647PtCL8f2/5KH79N19Mnm9lgbmh0WAOXO908M9/+nacsbhU+Nm1Z5+J1736FcjXolQ+eunF+OilF+PBhw7jFV/4Cl789atQjzZf3PazDK//yIdw6T134Fdf/gNohA9EIrkN8/eUW/+ZQxKA0f5zz8cdIyimGYbNDA7iPAP+uXj9hrf8QLmZZvE9TVcqqB5cX/985vgzPJbhOObwiblH4RNzjwIeonF6ZwH7W8dwNJjFgfppCI8V13uinUzBiSFouw3eelMWbv6OZrARW07BXtJDhk5YrsOxOsUXrGs5Rt1HZxc9SGhUi1GPTpje/3mLuQSXx+aNgsUoAwBOm3mRjDqibd67llZ5691JlVf/rcT8XnorTAHW4Y0lNbOclTLGJM3tFeVpcmd/e02EexKApwP4ZwD/MJjL6g8hmf9LIhymgY5P7FwhnncvqDKR7CLf1lDPP0y2V8ewHaFyknUlIm4oxEReRp+w+BAEgSYgksRHxA5fQRCEzfjk/ksLx5588HrS+laYLihrylZl8w1mX3nUefjQdxUF2md++Sa89t8/j/PuODL0nHFekuLN//BvuOieQ4WfHdi9Cz/+Mz9Orh/cum8v3vgDL8Lj/uC38dvf/2LcvG/vlud59re+jQ/+6V/hgoP39u3aBaEM9ZiIiPN4G0FLoRTurezE/+5+KA7UTxvcecaADjFmDgzyxHlEjrikT9aUbcqaUiLiBGGiMRHiTsjYzwfwFq31hwGM9SyfzP9F5BcTJg/qOVaInIBb4WQZvGz97oxMKXQdZoiEMBVQYn1AiDTCdEFZU0au9BfDgBI8PUJYEASBhlosoBYVBEEQNuMbe8/HsrfeuSPIEjz54PWblBCmBRNryhP82U88A4szxUiN17zry3jvz/4zPvHKv8Gb/uyDeP5nrsPOxWbfrpVC5Tn+9Ip34wk331b42dF6Da/82Vdjob61k00rCPDvT34Cnv36X8XLfv61+NClF28akXru0WP4rz//G7z4f6/sy/ULQhlqcdFapuEb5IgTNoXavBYa5GGj5q1dIvcch7ZLCHGyQUYQJhoTmf6gUuqfADwTwB8rpXyYCXlDxcpzMv8XlYtGmDwoIY6ymdyKgIyY9MT3fZtDRsQZ1i1h8vCJnGRUpJbQf8iIOEIYFQSBhhLiIluEOEEQypNaDj591iPx4lu/tu74s++4Bp8457IRXZUwDCh741Zla3/BpdkK/uQ1z8If/ukHyJ/vXmjiBZ/9Nl7w2W8DAG7cfxq+9LAH44sPOx9ff8i5iIjoNC6/9f6P4Pu+8c3C8Zbv4cd/5sdx126DtCNK4X8ffB7+98HnYffyCn7mU5/Bqz7/pcLHwiTBn//7u/Do2+/AHz31ReKiIQwUJ0sRbHALyZSSfMB9okOMmY0i4ggnl65ExAmCsAkmQtrLAHwcwLO11ksAdgD4tUFcVD+gRRYXmum1L4wXZEScoVgi1qUCBZUjToS46YfcuCGT6qFA3WdqQiMIAk1ICXHSfgmCYMgnzi7aUz7u0M2YidkJ1oQJgIyIq/ZO9PWxp16ELz76vFLnuPCe+/CTH/8S/u3P/xXf+tnfxTv/+M147Yc/h4ffcQ9UzkxgBuDVn/oCXvPJLxSOJ5aFn/mJV+C6s89k/+6jszP43Ze8ED/16lei4dP344e/8jW8421/gzOWFtjnEYRe1LpFW8qWF8gG8j5xqhFxG/PLAUBsixAnCAJNaVVKa93WWr9fa33L2v8Paa0/MbhLOzUokaUttpRTQz9yxFERdJEIcdseqg6EYk059VA54iSCejhQ99kXa0pBKIfWCKkccRIRJwiCIdfuORdHwtl1xxyd47vv+taIrkgYBnREXIk+RCm88ZdegOsecrrR+fw0wxNuuh2vf+/H8OHf/Vtc8/P/F//4t2/Hj376q3jwveXzy33PN67F7/znh8ifvf5Hvh+ff9gFRte1GR+/+BH43l/7Rdy0j86T9fBD9+B9b/kzPPnWG/tyPkHYSI3ID9fwB5gfbptx6hFxhDVlnyLiWoQ1ZTUp1gdBECaHqV1lpCJYIhHipgYqcs1UiCMj4iS8f9tD1QGJiJt+yBxxElEyFKj77Ik1pSCUghKtY8tBLg4QgiAYopWFT559CV5+0+fXHX/mndfgv85//IiuShg01U5xntPLmvIEi7NV/OifvRIX3XofHnfNATz22gO45IZ74KVZ78JrzLfaeO5V1+O5V63mIzwyW8dXLjxv9c+Dz8fdu3YUyjz+5lvxZ297N/n7/uR7n4P3Pe7Rpc9fhgN7duNFv/xz+P13vw8vvvLqws/nog7+6Z3/jH948jPwd095tvTBQl+pE0Jc0xMhrl+QEXGnak3ZtxxxxecsEXGCMNlMjhCnAe++8pc7c6RocdCxfeQL5QaVHFaO8n73sjvb+0ME2i63W2wdHs/6QTm8cjoxH4Sqtt3zM20UO6TqSgK7Vf581WVq4d2D1Sn3O/LM3ArAjnn2ARY3OIRRRQAgC8wHDp3Z8hOudbi8umUvmTdfVrf3/Y+t4kCs2kzgNs2fndM2LsJGMW+/0+FVEs75dO9Xmz4Xsx6b4FE54gwi4vxjvAm3YrrxWEydKmf0+n7Ea7dUyWvMYyLnZ5YY1Zdo12CvcSPBAq9Sasu8nFXhXWTObFvTEnZUG7GYexW0zXxuGfP+M9ogbfGusb2HV860Ta506Gg4r9n7+WtmH5VUeO1dNG9+T3LmOkbOrFv+Eu+9sbrm98RKefU4DXjfzWa8p07Eux+ZxxzvMu+Jt2TeTmYBb1Bix7xrjOfM60ju8O5j7jDrVqjwkYdeWhDiLj1yADvyZRypztHnc83vJXeMnDDHktwxqN0xL6NqvHO5M7zF1VpgXm53+IDd6Ey7+CWruxOcXlspHI8yeiB59JIZ/M8lF+N/cDGCToKHX3cQl115Fy676i486PbjRte2Z7mBF37tWrzwa9cCAO7ZO4f/feTZ+MYjz8E3Hnk25pfbeMs/vh0+Ifa9+3mPwj/+wJNgKUaDd9/W45/Y9vGrP/RDuPqcc/CG//oA/Gz9+S1ovO6Ln8Qjj9yJX3jFy7FYq276u+wO01KQWYx7Ps1oSzRTg1Qp12aR1965bfNyaYs7bmUVQxqu/h2i+I42wvD+n58Md9xaOcytk7xyqc8ox65cW5+L2ojto1v6Xnp6kxxxBlXMbdPjrTgtDoRrcXz/59u7eUv6DqdNqPM6Un+RN5fVrCrCHTcx6zFzTsT5cv4x3v3XjDHaajned0sZ4+ukWmGda1KZHCHOECqCRfJ/TQ8dj9i1khjmiCM+L3VEaPehbgmTB5UjThKvD4fYKQ5FKGFUEIQiZH64Pu3CFQRh+3HTzv24c2YXzl45dv8xCxrPuv1a/Psjnja6CxMGRqVd7EfaVb5LTBS6uPLyc3Dl5ecAAOYXWnjENw7i0VfdiUdffSd2HTfLObj/8BL2f3IJL/7kNwEAsWvDT4rKxmce9xD88WueBbQHmDdLKbzjCU/At/fvx99d8W84Y3Gp8JEn3/wd/M+f/gV+9sdegWvPOXv1oNaYb7WxZ2UFe5dXsPf4CvY0Gtizsnz/37sbDexprGChWsUVT3gy/vnJT5McYAKAVeFlI2JN2T8oa0qTiDiXiojrkzUllSMulIg4QZhopleII/L9UOKNMJmQQpyhfWBA1JHIlcWr7U5EWVNKjriph8wRRwhEQv/pEsmsfWJCIwhCESqHBWWxIwiCUAql8LHzLsNPXbM+Ffyzb79GhLgpxMpyhJ31Y65crYpp/WJxRxWffMZF+OQzLgK0xrl3HL9flHvkt+5BJTKbZ1Ei3DUX7sdv/uoLkdvDsYT81lln4Xt++Zfw5//xDjztppsLPz9jaQnv/pu/x/X7z8CelRXsWmkUIug2Y9/yMl7/0Q/h5tP24YsP6U+eO2GyqcfFiLimCHF9gxo3G+WIy4vz1mSAQpxYUwrCZDO1q4yBRDtNNWTUUh9yxFEijLC96BBiLNWeCNODnWVw8/V2EJlSSGymj5FgREzmiBPxWxDKEFC2uhIRJwjCKfDxB11aEOIuOnYPzlo+irtmd4/oqoRBEBLRcJ2Kx7b36olSOHDuLhw4dxf+86WPgp1muODm+/Coa+7CZdfcjYfdcC88Qmjbitv378QvvOFliP3h9n1L1Spe/ROvxus+9Sn84sc/CUuv96HzsgyX3nkX+/c/5Ts3ixAnAABqVI44EeL6RucUc8RRG0ipjaYcOiLECcLUMbVCnETETTeUqFrpQ0SciLUC5REu1pTTjZcWB8+x44odzJCgJipBlgJayzMQhB6QNtuExY4gCEJZ7pzbg5t2noELjh9cd/zZt12Nt1z27BFdlTAIKi3ClrIyvD4kc2xc/7AzcP3DzsC//cjj4cUJHn79vbjs2rtx2TV34aE33wcn3zzR0pEdNbzud38IK3UiWdYQ0JaFv3nOs/DNs8/GX779PzDf7l+C8PmWmYWnML3UCSGuEYgQ1y8osSvIDCLisuK6Ytfqz7qiRMQJwvQxtUJcIDniphpKVA2MI+KIOiIRcdse2ppShLhphsoPF7tT2z2OHbllIbFsuPn6HdBuliERe1BB2BLKOicWIU4QhFPkY+ddWhDinnXgmyLETRmVVnH+3K6Nrg/p+i6uvuxsXH3Zam619LjCZdffhcd860489psH8NADR+7/7H276vj53/kBHNozO6rLvZ8vXPhQfM+v/iL+7m1vx8V33d2X3zkTFe0Ihe1JjbCmbPijEZ+nESoirmKw/uMREXFxn6wpO7aHHAoWHtiQEOQJ7DxDZol7jyBMIlO7wiURcdMNmSPOUCyh6kgkYu22h7amFJu8acanrN1EABoqsePA7a4X4vwsESFOEHpAWedIRJwgCKfKJ869BL/4vx9ad+xBS4fhpQm6hKW0MJlUWsXIimFGxPWiVfHxxcecjy8+5nwAwNxyGxffdA+cNMfXLj0XrUoxWmRUHNyxAy/7+dfht//7g3jFl75S+HnL83BkdgaHZ2ZwtDqDIzOzOFKv48jMLHY3Gvitj3xw3efrIsQJa4g15WCJiHGzUUQckSOuX9aUUAodx0N1QxRcmHXRtESMFYRJZHJWuBSQnGnQGF5ftAWI6zas+d6/w3bMfMlPkCbMHQnLvMGuyhiWXS3eNaqUV1U4pmK6RI5lctdK1IVKy58xJBJDR7YHq6Tmwrn/wXGezVo27HHW5g4gm+K0eXUkZ7ZCVmJ+L53iGLZAV9N1y2E4jeghblKqHuK1W14j7/0hgu6M+ZdTGaNiAQB4idftuFwdCdvFexcTguxWuEwnGqfFuyeK97gR7zB/b7jvqG2gX8e2ixrWTzBmj3SRheUWWLo1Xttapk2g4NZlkz7qVFE27xpz17wc93ulFVYxeMu8ck7b/LvZzDrCbf9N3zc/L75ojRkfjTN6X0Dm856b2+TVrWDRvL9JqrxrbO/h9RtWwvtuWWB+ndz7zx5bML6aypnzBt7QAt4K7/6ngflcStvMMTlTg+I8N597P0Je/T/5Gu+bncdCUMOOqLnuM/Wsg2MbcnGp2Pw6U2a/nTHXHbltOadOdud53y1p8tYEKrvNNwzW3NUx1864OIDt1pz7f76RwOFtTrz6rjNZ5TbSdALc8/Adq//RAAj3xjzh1X/FKwa788DzzuDid5//Evz7Y56EBx09gpUwxOH6LI7O1NE6SThRG6rVhfeujz4FgJlOp/C5zBve2A4AgqPmN8Xh6ofMaSJ3nMAZE3orvHNx5w1puHq+mXaxAWtbAdmucfvfpM7r71PmWIYzv2SPm3rkvGwR801fd5GWHNu5KApxUeDe//zKYG2xtth2/IIQ51sJlv0q+73Z2LaUYgub4K1PNrz5r9PhLZIkNWb9Z4z/AcDjzKUs5ppYg2dlqlLegLdbNy/HfbcnlckR4gwJY8JmYcgJfIXB0fGJXSuGUUtkXhXDxXdh+iBtT4mIKWF68Im2I5bd3kOlS0S+UX77giCshxrLRNJ+CYLQB1b8sCjExR0cq8yM6IqEfhNSOeJq0oecKrft2Yvb9uwt/fmVsKgwizWlcAIyIs6TiLh+QW3ypxwnNoPMEWf3rx2l8sRVU+buEkEQRg5z38/4ExL5wjoixE0NlGBWIXK+bQWVU47KDyZsLyJqICY54qYaSmgVa8rhEhOTFcrmQxCE9VA54qgFBUEQBFMaXlEcqBMRVMLkEjaJPqQqfciwWQkIIa4jQpywSp0Q4iRHXP84dSFucDniAKDlFkXXSsKLchIEYfRMsRBH7BCWHHFTQ5uIiDPPEUdMPCRH3LYnIkReEeKmGz8hBs+uCHHDhIqI84lJjSAI66EWCqgNJYIgCKZQC731ruzCnyYqLWLzclXmw8Om6fvIN9i3VbtdOBnTi16YKmrdoigrOeL6R4fYjE9tdNsMSohL+pUjDnREXCUVIU4QJpWpFeICwppSIuKmBypyjRJft4KyshQ7J4EciBnangqThU9ExElbMFxiYrIi1pSC0BvaZluEOEEQTh0yIo5YEBYmF9KaUiLiho62LDQIYaUu9pQCgFpcFF2o+iLwIB2RTlGI6/YxIk6EOEGYLqZWiKNsCiXaaXogI+IIq8mtIIU4iZrc9pzqjihh8vBTYvAs+SKHSpcQPqlJjSAI66H6J9lIIAhCP1ghIuJmxJpyqqCEOLGmHA1Unrh6JBGoAh0R15IccX2DtKY0cETyic2jVNoFLm1XhDhBmCamVogLyYg4GVROC1SOuMDUmpISa2XxfdtDLWCKNeV04xOivOSIGy50RJwIcYLQC2rHruSIEwShH5DWlESuImFyCcWacmxoBEVhZbYjwvd2x8pz1LpF0aUpQlzfOOUccURe8+6grSklR5wgTCwTtdLoV8s3hpWs2DBV98TYv3uxd1mXt+hecXjlApu32NhOzQfJdy7Ps87VTXlVpds1L9ddLnY0G2nnduFYpdsFFPHhTaAi4toVF7mvS5XXe8w7P+dCnr1E6PD84aOE99ys3Fyjbzd6PzfyXMd4C4a6WAV6kpe4HQnxoTBNYKU5tDK7L4qpI5S5zo1067x9FdE8r5y/Uu49ORmtDF7Qk8sxt4w4rXLnq7SIwbNyYXfKX2/KnAt19vLKJXPMnBFObl6my3sAlbvLV+SIso6uZujsGux+ISsxr8cAkDLfN7tlfr6Uu6OSeevchvl7WruHUa8AdGd4bULm8cpZKaPdYvQ1AKCYr2j9PrOC9RU6R5wq8VWTmtGpTirHu/8eo26Fx3h1i0sa8r4bGE0Jt26lVV67lTGGWxZzHGPSf56Mynnlcse8nB1z7yOzjjDa5G6Vd6oy7z/Jhtet4VJCXLvwOW0z7wkDm6kDctvkpG7+3XKb+QBSXsedMuZtrrV6Q2qt4ny2W3fu//lGdrlN43MBwEP2HWGVu3PRfO0iWeTNSeHynlvO6KZyr3iupXoIHFp/rKLaSGYeOIHd5tWRLGCOdyvmZTjzWACwmY70DrNN2KSKb4nKePfRZmomdqxRIzY/tFwf7ooCNfhIA1573K3yyjnMvhSMdQF2v92jSYgsYpO/kTVlsfJmiQ2nU/56sy2eGxURF2YxtAU4EfPd9hn3n7k3vTvDG/DaXfPGNQuZDRCzGnPvv9M2/27JDG/d1NXcQSEPznw7rUxtjBjJ1H5bKiIu9idKdxS2oENYSJrm8SLzqoh96bZHWxYiIhqKsi8UpgOPeLZi7TZcqIg4yuZDEIT1BBktxAmCIJwqdESc5KyaJgKJiBsbKGvKmY68b9sdypay6Uo0XD/ZNEdcSQHDJ3PE9dGaUnLECcJUMbVCXEDkCyN33AsTSdexkW3YReNlGZys/NYmKqdcROQHE7YfbUrolTxxUwvp6y7WlEMlJmyBRfwWhN4ERPtF5ToVBEEwpeERwgCxKCxMLrQ1pfQho4AW4sQKdrtTIywIxZayv2SWja61PmrLgiYFNgoqnUJfrSklR5wgTBVTK8RVIkJkCWRhdWpQCm0i519A5H3bDCqnnETECQAtyEqeuOmFiqbtSkTcUKFzxElEnCD0gtokIhG9giD0gxVCiKuLEDdVVJrEfLgmfcgoWAmL4opExAlURFxLIuL6DpUnLigpdpFCnNW/tecWmSNORHpBmFSmVogjI+ICGVROE5RoFhrYU1IRcZTlpbD96BDROabWp8LkQFlTSkTccIkJ4SBI5Z0ThF5Q1pQdYsIuCIJgilhTTj+0NaXMh0fBSqX4vs225X3b7tS6RcFFIuL6z6b2lCWgNo8mfRTiKGvKqkTECcLEMrVCHJUjTqwpp4uIEM0qpxgRFxECjLD9ICPixJpyaqGsKaUtGC5dQvikBFJBENZD54iT9ksQhFOn4VUKxyQibroIW8U+pC0RcSNhWXLECQQ1IvJJhLj+Q0XElVn/UTqHlxfT4/QzIq5NREBWCMtSQRAmg6kV4gJCiIt9iXCYJk7VmpKKnpOIOAGgc8SJNeX0QuUioyK0hMFB3W9KIBUEYT2SI04QhEHR8IuLfyLETQ92msOP1y8g55ZCV9J5jAQqIm5GIuK2PXREXLGuCKcGV4jzsqIIF1sOoFRfrgvYxJpSIuIEYWKZqFFWNy53uSrPSdvBO9o7oKPe2mOa2D0/Q7FjrsUqd/78UVa53UHTuIxj5axzrXR5u26OtqrGZRaWy1kqUdaUlW4X0L3LOlkGJ19/L1LLQlxTAMrdoyAwj9aYDXlezjM+r9xKzHtuyx3zcl7IWzSPK8NrhlRabkDUCYt1S9W7aJ9WonKdhLfMHIBZ5uU0r9lCzrz9acX8Gv0ls/t3KucCgO5cufO5FhERFzrIgvLXG+0rDsLLEO5qs8rtqPDahCw333+z3OBN9qIS/e0JWrPFCqwqCZpnlWuP3SZvX1E8z6tb2ubV5dw3L6dqvMhA3eE1ChbjdO29vPuveLcRive6Idph/rwdZgqGymHeeKuzy+y5+TkRzRD6yN3e39VfMjrV/cRzvHIpoylJQt476i/z7r+/wjtfGpiXyxusU6E7y7tGO2K8cMx3VDO3eirNO2GwaF6Oe40O5z4yy+UO71nHM7xyG9vkRkBZU0ZQKodWD9xAlTG+m827xpy5T6p6iFmZGbTOYJ6L12whSs0H88fjKurLRZGnXfFwvFvbtJzHGSQAOKu6yCrH4UDOfG9iXuXKE/PGRC0UF/0bhLgy047WzV/TOaZbBHPcmjLqFrffsGPec/OWeefjjC+4c1Juu5WGgHdbcSC6NBugs5u+Foep3bpt5tyGuZZgJebni+eY840Sr03bI+wfOzHs7tbXGRKBAF3bQWen4Txsi6q13C22DWEeo1uz4DV4HQenTva6F5uhcl45zVgTY5+LKZw6bd79T6rmdTl3eXN7zR5v8cplnvl347QHk8xURsQF3WJLG7kOtDWVX3fbQkWvUR0hBRU5Rwl7wvaEqguUlakwHfhEdKzkiBsuZEScWFMKwtZoTe7WpXb1CoIgmJJZNlru+sVJCxpVscSaCipEfrhWTfqPUbFCWFNKjjihHhMRcUS0snBq8CPiCGcdu7/rim2XEAmlHxaEiWUqlSkqGq4dyKBy2qDEkrJCHG1LKUKcsAot8opN3rRCCT6SI264xG5R+PRTeecEYSvcPIO9IYIosSykNjNEWhAEYQMNIhdRPRZxYBqotIoLue2KrJmMCkqIkxxxQk2EuKEQsYW44ny1a/d3Qy8lxEmOOEGYXKZTiIuJxPUiskwdVI44SmCjoPJ9RZJTRViDyhFXMcg/KEwWlOAjEXHDhbrfXiIRcYKwFRINJwjCoGn4lcIxEeKmg0qLsDauSh8yKpapHHGRvGvbHaq9bfiSI67fUOPnoIwQlxf98vstxG2MTAfWhDimnbcgCKNlKoW4gIhc6fgixE0bp2IfSNYREWuFNSjhXqwppxfKmrJLWCUKgyMmIhDFmlIQtoZaIJBNRYIg9BMyb1VXxIFpoNIWIW6ckIg4gaIWSUTcMCCtKTNeRFy/rSlT20HXWu924egcPmGLKQjC+DOVQhxlIReJEDd1RKcQtURFxFF2hML2hLSmLBltKUwelOAjEXHDhbrflEAqCMIDSEScIAiDZoWIvJCIuOmg2hQhbpxoex5Sa/3yXJCk8GQ8vK2pdYtCXEOEuL7TITaylYqII8SwpM8RccBm9pTFuiEIwvgzUSuNebOcmBYs5oVjbctHd6Fch6VyZXRdJ1g8xOsQv7pjhlVOOcXv2bOMzQxfZhbLI/MqFtxT7jl3u0TOgqUEwfHez2/2GLFzxfLgLpXPqxI55s/7zpViB1oKXpUEzKvIKl1zjd6qD3eSYDfNc+B4S+VuZBITCXEbCbwVswdhMW+J2xiezYDL3EilGXXSjnnfS1u8F0CXbO8oa8qO70KbvAaM9vhUOLZUY5XLF8zbIO3wnpvXKf/c0pzI+dlJ4R8v9xCI4qVwmPMX2+C7rYPx4nSZ0dqKWSXjOfPnXb+Tdy4n4tWtaAdvH5nNCGz2lnjXmLm8OmIn5c9HWbF3PA9ZyXVUm1n/q4d496Q7Y35POnt49zGp8fLkcetk5ptfZ8p1luI2P4y+lNPXA2Bfo9PmFezsMC/ntnnPOmemYOTcf6/Ba8gV88FR342yQKsl7XWf1Tajbg07lSVzaB3PM+4ltwP2eeW6qfnNrDhdzBK2h92ajYqzeWd5uMNbt7j5yB5WOY7zWtLlLXHlK7zxlt1hzJtjql4prAQBdrTb647OL0Y4VvfWyvG+WzpXtM8rBWNdzF/ktT8W03yG204mFfPrLDu+6lc5gM4R1wiCTeep3HMlVeZ8e4jjhDRg1q20d0PSDoo3zrO66Na3PqfVIHLNe47x/cx6jAmbfoC5eH3b4FRjJGnd6Dz3l+2Yvzc5c27DnRO5HfMOIAmGG2uUzvPOZzHW4HK2esMsyBw3eSvm/Y3aZi6r2yYijsonJkw2HcLKjIp0owiohXfi9wnbEyoiThLiTi8+kYssFmvKoULdb7GmFIStoSLiqGTzgiAIXMiIOCJCQ5g8KlREXE36kFGyEhZzMoo95famTghxYk3Zf0hryhIRcZQ9ZLfP1pQA0PaIjeJdWZ8ShElkSoU4YoewiCxTBxk+XtK6gcr3RVldCtuTiBAFytYtYfKghHmxphwusUtYUxLPRRCEB6Asc6ixkSAIAhcqIm5mw658YTKhcsR1KtKHjJKVgHjfiBxhwvahRlgBU+2ycGp0COvHMpv86Rxx/V9HoMRXEeIEYTKZUiGOyBHHtHYSxhdqselUIuIo8UXYnnA9woXJhIyII4QhYXB0CeHTk4g4QdgSMkecbDwTBKGPNDzJETethC2iD5EccSNlJSwutktE3DZGa9TiotjSDCQirt9wI+Ko+Wp3AOuKLUIoFCFOECaTqRTiAioiTqKdpg7qmZbpLAE6uqkjYq2wBlm3JCJuavGoiDhZzB4qYk0pCOaQ+S0lIk4QhD4i1pTTi1hTjh8NKiKuIxGo25Uw6cLR63N5RY6DZAARV9sd7kZsf0gRcS3KmlJSpwjCRDKVLXiFFOJkUXXaiE7BmpKKnKN+n7A9OZX8g8LkITniRg9lBUpNbIbBBYfvwdNv+TaO1mbw/kc+Fpllj+Q6BKEXkiNOEIRBI9aU00vYKo6z2lUZ/46SlVCsKYUHqFH54TyJhhsEXLctL8sKx7oDEOKoHHEV2RQjCBPJVApxlDVl25eFiWmDFkvK7QqhrCnFzkk4AbWQSeUVFKYDSsCXHHHDhRI+R2FN+dg7voN/+s83w81Xd58++fab8PMv/vGhX4cglIG2ppTxriAI/YOMiBNryqkgpHLEiTXlSFkmhTh537YrdUqII6ImhVOHbU1JRcQNwpqSiogTa0pBmEgGutKolAoAfAGAv3au92qt36iUOhfAuwDsBHAVgFdorUu0cnnPjwBAmBE7hCsOVFjcrUChuzzHzqyuWeWcY7yGOgvMz5dXyt2DAor33Tjlkplyz7kxR1iZ5QmSWu9zeqpYR1pVF2nV4Hot8+/mHOU9a29ZscpxsRjVpLOX995YTINcm7EBSJWrWqQQFyYJYPjI3RbvvSl7nf0gZW6q85fNv5tmBhZpZvVXae+CdpbdL7qcIFcKWWbDMngOLrMdTxZ45bhNsheZ30yLGZjmtgw+HBXfOT9NSrdFNlMnzzac9oeu+fK6+vDdt3wb+xrHcXBu57rPKeY6lbY57w3zYQ8xkI/7bsdzzA6AeUvAaEu43y1Y5jXkmVv+IqkNIh3fRV6yWUmZazkZox0BeP22xZ2pMKtWtIP33ZJZ80qZ1nh1RDvDG5Nz3hkAQMYr6C7zXjgrNj9flPOuUTPrlmKMrb0G736ER3h1y+4W60i3U1z8m213EB5/4BzxrPlN4d5H7rttMff2xDu4HY45lsebp88E5ouxs24H9VaxnDWrMetuLvycGS4anwsAPJv3AO5amTcuc2ypxjoXmG1rNmP+3LJN1hGWdhGL7VYLyd7Vwbhq8dqEMnMiCs57U3YMspG0wrv/Ky6vUXAYgb2cNRIAyJljmdlG8V1sugHsLV55p8O7j9z5nsp550sr5nWSOyZPS4ytm7XiuxfoLqKdW5e1fMJZx3eQGY6xVY93re0UF22qUQyV8e6/ts3vP7cec+dtaWB+jdx1NCvh3kfe+ThjQm0Ne02Y+W5Xzdvk8PD2CnoYdI64GMDTtdYXA7gEwHOUUo8D8McA/kJr/WAAiwBe3c+TkjnifIl2mjaoCLZKyaglavEqkjyCwhqkR7hExE0lXlYc9UaOC6jhDnS2O1QEIhW5PGjOXThSOHb2wrGhX4cglIHaqdsmkrkLgiBwaXiVwrF6IhE60wBlTRmJNeVIWakUF9tn2vK+bVdqhPVgwxdrykHAtaakcpoPLUecRMQJwkQyUCFOr9Jc+6+79kcDeDqA964dvwLAC/t53jAmBpUiskwdHeKZUiIsRUhY0UkeQeEEkeSI2zbQ+eHElnLYUF76XpZB6SGGhwKY7RS3x851TEL7BGF4BIQdDtV/CYIgcGm6xUXfeleEgWkgaFHWlNKHjJKVSjGMZrYl79t2hcwRJ0LcQGALccRYvDsIa0pio11VcsQJwkQy6Ig4KKVspdS1AI4A+CSA2wAsaa1PrH7eA+CMfp6zEhODShFZpg5KiCsrlpBRk5JXRViD9AgnxFth8pH8cGOCUuTuwaHmidOaFN1EiBPGlUByxAmCMGDajo9UrV8yCLIELuEoIEwWYZPYmFqTNZNRskwIcTMdWWzfrpA54jzJETcIqPFzpUTEGTVXHcRaQlsi4gRhahi4EKe1zrTWlwDYD+ByABeULauUeo1S6kql1JU6K78rPuwSg0qxppw6aCGunFgiEXHCVlARBWJNOZ1QdhKD2MUm9IaatPhDXOirduNCvkAAmCOi5ARhHBAhThCEgaMUGRVXE3vKicbpZnCT9WOe1FZI/CEmmBUKiDWlcDKUNWXTFwvyQdDeLCJOb50ni7amHEBEnEfkiEtEiBOESWTgQtwJtNZLAD4L4PEA5pRSJ1bc9gM4uEmZN2utH621frSyy19qQAlxYk05dVA54sqKJXSOOFl8F1YhrSnTZOg2ecLg8Yk8ZGLtNhq6xKRlmBFxm0W+SUScMK6EmQhxgiAMHioCQ+wpJ5uAyA/XqXmSI3nEUNaUM22JiNuu1OJiOysRcYMhtR0k1vqNCLbWZD75k6HWEroDiIhrkhFx0jYIwiQyUCFOKbVbKTW39u8QwDMB3IhVQe6lax97JYAP9PO8IWU7KBFxU8emEXE9dq0AQCCL78IWaGWhQ0RFUTaGwmQjOeLGBzIijmirB8VmkW8SESeMK9RYRoQ4QRD6zYpbXPitJbIAOMmETWJTquSHGzkSESecDGVN2ZAccQODjIoj1pZPhrJpHsRaAhURV8Y6UxCE8WPQq437AFyhlLKxKvq9R2v9IaXUDQDepZT6vwCuAfDWnr9JaezYvVLqpPW82GHV9kU4/bTFUuUdixf10k54g1e9n7fzrB2bn2++yhvItbu877Z4vGZcJquWu/8ZgMhxEJwUMWFpDdtLEPcQ1QJiF3m74kB7Bs9emz+3rMKrW50KqxjcZZ7WrhmuJHZnuDsoc0aVzA2cHCLPQ7hhkbMSJYit8r9EZeXPd6rl3E5vAZoirvOeWxqal3Mi3jUqZiCiKnE6Mkec5xjXL3+Jdx+TOu+eWAnvfG7DvEzK3Hhp2kRSNh6OSpGV0BVspnNssPDA/d9zpEl+ZsdKa93nAFbzDwBonG/+coe7eEJgmvDspfIV8w4nnufdEIups3L6KIBXl+2I+d1S3rttUrdCwpKmqz3YJdfHo7N4jWvk8r6bv2D+4MLDzPto8Z6bZo63NGO4ZbeGZkyyyhCHadriPTcwizmM6Y1iBlyXGVtQlOnLNpIy11sznzm2C+hyK2EIbJhGh06EeGa1Dmee+fm4Y7s04D2AxlnMcdq8eUVRAXMCwKTumS/GOo3iA2hXPXTzrZeHKswB1yNnSAOknniW+b1sdHjWfZ0FXjlnl3kD5Dj0C9Al3sHZdgfhTAdQCp2MNyhXHd7AKZk1v/+5wxx/BrxGIduk3eqJMu+DM6YrpHZ47VZAdG4Le320z9j8XjlN3tgiPMoqBn+RO04zL9OjedqckufquB5mN0QhBnkXi3Z10zJUxFzku8ZrCbrHGGFlhoiIS2N0Z3j1318yf27c+bYdM99txtiCO/53W7xr1DZzUloieGQj3PEnd7zVrQ1vnuK0t9dG+IF+W631twBcShy/Hav54gZCEBHRThIRN5VEnrtOiAOAStztLcSJfanQg47rYn7DMckTN334lBAnOeJGAmXjMcyIuNlok4i4TY4LwqgJMomIEwRh8Kz4RYW6JtaUE01IWlPK+HfUxL6L2LXhJw8IYG6WI4wTdALp37cb9YiIiAslIm5Q0I5bW6//kNaU9iAi4opCnETECcJkMuStmMMhjAkhLtheCut2oe2bd5abfaYjOeKEk6AWMysixE0dVIJlsakdDaQ1ZQ9f/n4yF9G54GY3OS4IoyZICWsx2UggCEKfoXISzRC5i4TJgbKm7FRF6BkHVqpFoaXeEivY7QgpxAUixA0Kcv2nhzUltZYwiBxxlBBXFSFOECaSqRTiAlKIk4WJaYQSz3r5OK9+hqgjEhEnnAQ1EJMccdMHNXiWHHGjoUtYU45DRNxsLBFxwngSUjbbEhEnCEKfWfGLQlxdIuImGjIiTnLEjQUNKk+cCHHbknpHhLhhQq3/UDbwJ0NtGh2Euw4VrVdJurBypu+gIAgjY0qFONqnV5g+KPGMEtk2QgkqEhEnnAwVFUVFHwiTjVhTjg+UAOoRQumgmIvpyLd6N4KTDTffiyCUgRKqI0eEOEEQ+kuDjIiTTSqTTKVFRMTVpP8YB6iIuJmWCN/bEToijpm8W+gJLcRtsjpcdQAAol9JREFUvf7jEWPxeADWlFpZtD1lD6FQEITxY/qEOK1Ja8rYkwiHaaTN8HGG1mTUXCRCnHASnIGYMHmQvu4SETcS4jGNiANkwVEYT6iIOMkRJwhCv6Ej4iRCZ5IJJCJubGmQQpy8b9uRmuSIGyrk+g/LmnIwbakIcYIwHUydEEdFw8Wujdyeuq8qgGdN6WUZbK3XHevaNlLb7uu1CZMNJ1mvMHlIjrjxgRJAqeczKLbKBbdZ/jhBGBV2nsHN10dqZkoNJEG8IAjbGyoirt6VDSqTjFhTji8rVeJ9EyFuWyI54oZL26WErl5CHOWuM5ixOCXE1WRTjCBMHBM0W1fopr0vt9IuNpRx4CB0y++qdy2eBdW5M8dZ5fb4DVa5wDKPFLCge3+I4PrGPla5O2xzz+Jjx+ulP9upEBEUdoysvvkzdAlrh8h3Eewys3wIPPP73015Yl+W8YTkZAfvFdeZMi/TZQqZvCoJlZjfk9wv/73aRN2yal20Ty9fp9MK77n5i+ZlGmebPzMAUEzHPXeIukTGnG/oEreftHYLHOS+WcXs7OVVZLvDfG7MQLGUcS/TKu+7ZYbOKZ1qsb1ykSAr8SxUzruPzknriHOdzRcVdyy3cddJ9y7awTsfPPM+kd3+r/AioyqL5t9NMdMT5Mw1P4u5J4LT3immFtzexezvvXL3vxYTkf2OBwVVul+1El49zkLeA493mz+ANOTVf4fZR3H7RDBuibfCbLeY381tmrflmcG46WRyl1dOM2em4VHz78bpDwF+HclC83tiR7z+N63w7v9m92RxZ7FDr+YR4vnV89iMNtnlTX8BPdy6BWX+DJyAN0g7c9cSqxxnfj/XJua9Mwqz7tbz4aPd8vP0k7mrNc8ql2TmfWnS5T1sK2K+N8fNrQITd/NOY8GpFY6FR3N0FkKAOd7VAbPhYpyPsUQFAHA6vP5eMdYtAN51dvYyB7yMdsRL0sKGxNSy0Jq3tuyIcmZb5y/y7j+3v+H0pQ7ToTUt+Yp2CHv3atSFvUVdodIotOsO0orZMy8zl20FRSEuQJc1VotnjYvw53tcYZJTJZlri1bGK+gv8Bq8eIf5JDhhvmtui/fdvCbvgWuLUR/ntldQzNSFiVG2lFEgu7umFTIijlicOpkgISJgxLpU2ABVt4Ie0ZbC5EHniJP2YBRQufmGGhG3hf3k7Cb54wRhVATEJgKxpRQEYRAs+5XCsZlYclZNMpQ1ZURsiBKGz0qFsKakhFNhqiGj4cIAUMzNgEJPOKlJqLnqoNYSmn6xbah2xZpSECaNqRPiAio/nC9C3LTS9s19nMn8cFJHhA3QA7Hh5asShgM5eBZrypFATVqoBNiDYiuxbSuRThBGQUjkhIgGlJNCEITtTSMohhPMbJFXVRh/giYhxNWkDxkHVsLi+zYrQty2o94pPnPJDzdYODnivIzIETcgm3g6R5xYUwrCpLEthLgokN1d0woloIXdrRduqZ93RIgTNtAhRN6KRMRNHZIjbnyg7vuwIuKcLEVti2TXEhEnjBsSEScIwrBY8YmcVbEs/k0ylBDXESFuLFgmcsRJRNz2oxYV5yUixA0W44g4rYebI84vCnESEScIk8fUCXGUNWXsixA3rVD2gb3EEjIijvg9wvamQ4gCYk05fYg15fhA3XdqcjMIZrtb7+yXiDhh3AhTOkecIAhCv6Ei4upRB9DMZCzCyKGtKWU+PA6sEGLLTEeE7+1GnXjmVN0Q+oepEOfkOewN/WBqWcjsweS7anliTSkI08AUCnHFhjKWHHFTC2VNGTAi4sSaUthIx6OsCcSactoIKCHOFSFuFHQJWz3q+QyCXkKbRMQJ40ZALAxIRJwgCIMgsR20N7Qvjs5R2SKSXBhvRIgbX1YqEhEnADNR8Zk3g2JElNA/qPWfrfo5KoXCIDf0SkScIEwHUyfEBTFhMyZC3NRCRbJVCDH2ZCihTqwphY1QEXG9kvUKkwedYFnag1FAR8QNx5pyppcQ15UFEGG8oCLiOhIRJwjCgGj4xZ34s8RCsTAZhFSOuLqMf8cBSoiTHHHbDyoiTqwpB0vbLQpdW63/+FR+uEEKcWSOOBHiBGHSmJxt/xpoLRcHJRtRi8Vjx5I6br39tNKnsiu8hb+F+QqrnLMzZ5U7PVhileNQd3gNvKXMLUssp/z96FSKE4YqIrgzm3eYdbu44NoNHASeWeTFnlrT6PMAsDPgRVV0c154e93lPbflrvkg78bD5d+xk0m6vGYoz5RxGRMHnfaO4j6FEBH0zvJiXBe8RdHMN/9uOXPubHO1RWV+jdCMMgBy5tpyOpP1/IwLKkeGjTw0a5dVMtzvljAdL5w24zp5Xw3a8BqpjRVeSSEuY24QbZy9+uW8ztZCXA2t+z8LAK55878Ko7vPMt6eKRXzynHqpMV07mR2bbB5wyZYjABLxbRe85q8cu3d5V64MCs23s1ZF639g7eKc1Z4Dy6rmD+4dDcvKjad5zVcziJvTJLVzL9be4b3rFTO+25WbF7O4q7vMLd6WswxSRowxoRD1hw499JxmfW4xatbtrX5+Rp+BXubK+uOzTY6OBLMQ/UebhVIZszLAEAWMNs4bp1krAvUKrwXJ3R47d3ecKX3h05GazJHXDMMkPbomD2Lt06yGPVex6FY6ZjPSZMO8+VmtOMAb7ylthjbNezimtJMK4KKLCjG/BcA8jrvuVkd8/6e0x4AvD5q9YS8Ypy5s/a5A1DzdquWEhFxXsh6JmWI53nlcm4/xdCWuddYtk62QsJtK+siqdKft6iIONuFHTHWckoMPykhLswjpIzmVW/R32+Gv8jsfxlrwgAARrPF7KLQrfIGCVox1zIZxZhLacZrMidIfOZaAuO7pSHzy00oUxcRR1nHUXnEhOmAerZh1CMijsgjGEkeQWEDZN0Sa8qpg84RJ33GKKAsQYeVI26us/UmidkeQp0gDJuAiogTa0pBEAbEikfY5Un+1InEjTPY2fqF0cSzkHqDWeAXzFgOiXetIxFx2416RETEEfk6hf5BW1NuvrZIbRgdqDUlIcSJNaUgTB4ixAkTTScwz+MlOeKEMkRE/kER4qYPyRE3PozSmnKuh9DWS6gThGFDWeW0iQUEQRCEftDwiwvA9bi4UCyMP6QtpeSHGxso+8F6J4LKmdFYwkQi1pTDZ2MuVAAIu1tYUxLzVCrneb9oERbRNemHBWHimD4hjsgPRu1sEKYDMmqJiHg7maBL5BEUIU7YQJuoE0GP/IPC5OEnxE42EeJGAjVxGVpEXHtroW2u3TbzthWEARMQ74aMdwVBGBQrRCTGTCxROpNI2CL6DxHixobEcdDesMZha42qzEO3FaQQ5zO9+IVSUM4SW+aIo4Q4WyLiBEHYmikU4oiBpYgsUwsloPUU4ohBrFhTChshrQl61C1h8qAG0JErfcYoICPiCKF0EPSKiPPyTCY6wlhBLQx0pO0SBGFAUBFxYk05mQSEEBfVZCPHOLFC2FPOij3ltqLepiLixJpykJDrP1vM/6gNo4O0pmwSEXEyPxWEyWP6hDgidFh2CE8vbdI+cOvdYqQ1pdiXChugc8TJTsRpg8wRJxFxIyEmRIThWVP2tp7sJdYJwjAhc8TJeFcQhAGxQlpTijAwiYRNYlOqRMSNFcsVQvhuy/u2nSAj4gKxphwk/YiIG2Su+RYREVmNRYgThEljolYbld3bF5vcIRy4gF3eUsr1eAt/yy3eDpXPLDyEVc52MuMyYcCL6PFd3j1ZWK4al7Gs8v7ncVBMKh1GCbJ0c43Z7xS/S1a1EHhm9+as2qLR5wFgxuENoA+0drLKVW2ecJQ45sm6TZ7byWQdXmJw5ZjbxPnV8vcjn1OFY2E3geOWf+/M39BVkjnzklaXt69CdYrfs1Q5xuPuzrFOBWuA40vKqjZ2XMCwemmXZ1uYM90OtcN8305jtOW8KgI0zCYi7ZliW+AnCeyo9wUks7wbqddGQXMldvXX8yaycH71uhaZN4VRrFrhvQDJ6bx+O9I1Rile+6OZ28HUCq+czUijYDH3X6Q+s20tWZWp5PGtGQfpjEH/YfHeG/c4b/rgtMwfOFeK1zVeD5ydxnvfLMaYpFrl5fVwbN53SzPz8Vaa8V7SpMurI/ES03bLY/SJzPoP5njLapnff3+Rd67KYd53S7aoWi1FROg0O3BaGvFO8/YurfGuMQt45VTGa5MtgzWEE7Q6vHr8ndYeVrlgv9kcNl0q1sWy1pT7vGWjc53guaffwCp3Z8d8DnxzhXcfDy/VWeW6TfNNMM7xre83FYE6txzDmeXV4y7vqwE7zfvEboXX/ntHeeVs5lyWg0qZYztGvzHTKt77lh32nBdxx63ccjlzRTllLJ2mFeZ8r+RjW67Qm/z1JmM8TxMb/H0HSd38Op1274tsubQQ5zaNT3f/HNiEbMjOqJy3zW3z6ojX4I2ttcVrE3Rifp25zVs3zR3mNTLn6WlleG1yL5RS/wLgBQCOaK0fvnZsB4B3AzgHwB0AXqa1XlRKKQB/BeB5ANoAfkxrffUgrmsKI+KonBmyw2ta6RARcUEP+0AyIk7sS4UNUJa2AVF3hMmGtJSQiLiRQN33oUXE9cgRBwDzbYmIE8aHgIjmpVwCBEEQ+gElDNS7EqEziVSIBf6oJnPhcYKyppwRa8ptRS2SiLhh07VtpNb6JXI3z+FuMh8lnXUGaE1JRsR1eZu6BGGb8DYAz9lw7PUAPq21Ph/Ap9f+DwDPBXD+2p/XAPiHQV3U9AlxkiNuW0E920qPRMaUmCJCnLARsm5FYk05bVA5yCRH3GgYrRDXW2QrI9YJwrAI0+JCqlhTCoIwKBqe5IibFkIiR1zZiDhhOKwEVI44ed+2E3VCiKNyhAl9RCkje0ovI5x1Briht+UVn38liQHNjPAXhClHa/0FAAsbDn8fgCvW/n0FgBeedPzf9CpfAzCnlNo3iOuaQiFOcmZsJ9qMqCWqjkS+RMAI66EGUUGSQuU8S0BhPKGEOImIGw2Up75HRCz2Ha1LiWxlxDpBGBZURJw4QAiCMChWCCGuLjvxJ5JKi5gLS0TcWEFFxFHCjDC9SI640dAm1o4r3U2EOGLDaHeAOeISx0HXWm9P6OY5KQgKgrApe7XWh9b+fR+AvWv/PgPA3Sd97p61Y31n+oQ4yppSop2mlq7rIFfrPWj9NIOdbe7xS1pTBlJHhPVoyyIXNamcYsJkYuU5vA1tRa4UEqb/tnBqjCoirhbHcEsI7BIRJ4wTYSobzwRBGB6kNaVExE0klBAnEXHjhVhTbm/sLEN1g/iTK0VaEwr9hRpLh5sIccO2pgSANmlPyctvLAhTgKOUuvKkP68xKay11gCGHlI6ddv+qfxgsjAxxSiFjueiuiHKLYwTNCv0YjplXyrWlAJFJ3ALwm3YTdAJpE2ZBshoOMcB1PgkmN1O0ELc4CPiygpskiNOGCcomxzJEScIwqCgrCklR9xkIhFx448IcdubWlQUVlqeD21NXRzF2EFFxG0qxJERcYNdYm95PuY22NRWkhiLqA30vIIwpqRa60cbljmslNqntT60Zj15ZO34QQBnnvS5/WvH+s7UteRkRJxY9Uw1HSKabSt7SjpH3NRp0kIfoNqOUPLETQ3ULrZI+ouRQVtTDj4irqzlpETECeNEQEbESfslCMJgICPixJpyIqFyxEUSETdWiBC3vanFYks5Kqggjs2sKSkhjprP9hMqKrImfbEgmPBBAK9c+/crAXzgpOM/qlZ5HIDlkyws+8rEqA9KaZy2e7nn5+ppsRGydmeozZffyd7t8m6LZfEiGnXGi75IuuY7n7OUZ7m20uLdE7tmvojq+2ZlqGi2XU4DUZ3uBCvE4lV9Lsa+6orReQ+2Z40+DwC3JLuNywCAbfHyki1EVVa55Y75QC9JeHVLebzvZh0xr//dBbOBUccqDnS8gxayrDg5orC6vHdb2+blnBbvXG6TGf3FeGwOM6CoO8cr14vN8sOprvkeFWs3bwBcr/LKnTt/nFVu3jOfxB+Pee3Itw7sN/p8V2vkSsE6KeG0l2dIZzLkPXaAapfX/2ZVjfq9zVKfnU1aSOZWK35LMfcxMS5z2F4JinFCt8E7l2aOQlNelUTO6KYs5hgtZq6XqJJtK2VNGc1ZsKrlo0hnZniLeq0ZXuRdsmR+Uyp38ipJvIv33LLa5tbmW5ZjnK5RciyxEcvmtQp5yrhIzRwjMOdE8Jl5eBn9tkp4381u8dp/f9H8fJrZ1SQh77t19m5eLtIBcihYJ/VK1SRGsitHGpg3ru4K8x31mfNtZp3kXOWOGd7GnZU2r+Po5mbtZNgs9hPHwwpW0t62d3dHO4zOdYLT/SVWuXPDY8Zljka8yJC7mztZ5Tik9a37mqX54rOoJ212m2A1eH2pdhltMqcMgDTkvaMOcy6bM/QSbjui58zGFrWV4tykUfOR7C2xITjmrcn4R3jlgmO8+59WzMtw678JVESc50X3zwFPxnGITXE1G+mM+TuQlmy2GtVi26B2dtA41+yc/oL5zezWec/a672MTxIsmr9v3GFr7vIKWl3meJdxoZw2CwAUb2rDvpd2bH5P3NZgVjyUUu8E8DQAu5RS9wB4I4A/AvAepdSrAdwJ4GVrH/8IgOcBuBVAG8CrBnJRmCAhrix+VFxYpSKmhOmBer6U/eQJqIi4rkTECQRkRNwW0ZbCZBGMwNdd2AKlELtO4R3z0hTRAC2m51pFhfjg/BzOWFxa/7mmWFMK4wPVfnXEmlIQhAGhlYWVIMBctF7Ar8UdLARiiTVJhG0qR5z0H+PEckUi4rYz9Q4RERdKRNwwICPi4s1yxG2S5mKANAMiR1wsOeIEgUJr/UOb/Oi7ic9qAK8b7BWtMnXWlGSOOMn/NdVQEXFbCnHEz+JAFt+FIibWBMLkQUfESX8xSug8cYO1p5xvFneuH9izq/g5QrAThFERpsVJt1hTCoIwSBpBURyYjUQcmDSoHHEdsaYcK1YqRdFlpi32c9sFUoir9I5YFU4dKiJuU2vKEawlNAmLUhHiBGGymC4hTmtaZJGFiamGWnii6sEJKJFOhDiBghLigkSEuGmBzBFHCEHC8KCEOCryp59QAtuBPUUbYSpyThBGgdI5AkKgjjxpvwRBGBwrQdFLrC5C3MRRpYS4mkTEjRPLkiNuWzPTLgorKxIRNxTaXlHwDDeJiKNymVNz2X5C5ogTIU4QJoqpEuK8JCukJOg6NjJ7qr6msAHKmnJTIW4TsbYri1cCQYfY0STWlNMDmWBZIuJGChkRN2AhjhLY7ty9E7lab4w+E0VwMqbJuiD0EdKW0nOhe+RSFARBOBVWiIi4GRHiJg6JiBt/VihrSomI2zbUIrGmHBVmEXHDT3PRoqwpIxHiBGGSmKoZexCJLeV2hLSmJOoCQIu1iWshd6bqVRD6BJWXSoS46YHKFznoXWzC1lBC6MCtKVtFa8qFWpXMzyFRccI4EKbFBYG2jHcFQRgwDZ+wyxMhbrLQWnLETQCU6DITRbDyfARXIwybOiG6NkWIGwpGOeJGsKm3SUTEVWMR6QVhkpgq9YEU4ohoKWG66PiEWLJJRBxpXerLwrtAQ+2ICruy42haoH3dpT0YJXSOuOFbUy5Wq1isFi24RIgTxoGQsEimNo4IgiD0E8qaciaSfnGSCDoJ7Hz9rtTYt5HJptSxIrcsrBC5oOpEpJQwfZA54kSIGwr0+k/5HHFdx+77NZ0MFREn1pSCMFlMzIqjAnBGbXnLz5x+fKlwLA8tPHTXEaNzeRbPesphlrOV7v0hguWueWd828Iu1rlWkuLEqwx+YJ5Py3PMoh+6YbGz89opkrR43GkXn1EcuMi1+eQjyc072btu3mtcBgBbMrc6qveHCLI58wgUFfMGHdrh7ezLGRq70za7H7FFWRMkYL6ypQmOmt9Lb+vmcfNyK7wvY8fm5TRzXJpUeS+ASrZ+3kFE5FhyeIvZ8zO8haizZxdY5RyL994kjLau5vIG93M7msZlsrD4zFzVRe5v/X2dJq+OpDWNuWbx2S2FFSxViv3e/EoL2APkPrMR6JpfZ+NwjXUqFfPuib9oXs5iaqV2k3cfc4fXtyUznHOxTsUmK86vC3iqeMPbvgutze5LxeflPJ0NeQuB9zLKtO0SN4TAavA6HLvJK5dVzdtk517ed/MXePWf856WqY8U8Tzv3ea2JcExxj1hNuM5V/NmnM9mpiVu7WeO/3v0bUszxblnVXfgMILiFDPY3T/O7O+rvAceVMzHQC5zjKZ4jw0rcfk1AXeR2IRWc7HDKzeOvS9idKQArl86jVWu7pnf/7uX5ljnQsZ7ACoyr5M66F1HVioBZjYIbxW0cZxwbOiFv8B7bxJGg6dS3n30lpn3n9mWc/obZTjOOoE2vEhKiOvutFCb693YxjFv4Jo2qqxyjKklAN5aDnf+ZUflnxu1sW3TiDjCmrI7p6BmzTtvnZa7kc3Z4vOtxrHxemFSZ9xLZh/ltngFU4b2rHzeuewur1x3B+8FyFzz8/krvLEFs9lCxryXccX8nnDuxyQzVVuf/JgYWAYTozUKTMgccZvYB1KRchIRJ2xGxyPq1oDzVQnDg7STGLCvu7A1MZGvk9pt2E8oa8rFagWL1eKElIqeE4RhEybFhUnKHUAQBKGfLIdERFxHrCkniVqL6D9q4iA0jpB54uR92xbMUBFxFYmIGwZt1yBHHGlNOYIccRIRJwgTxVQJcR4R3SBC3PRD5YirbJIjzu9KHRHK0yEGYptZEwiTh+SIGz+6lDXlgIW4uTYREVetYImypiQ+KwjDhuqHqI0jgiAI/aRBWOWJMDBZVIj8cFFV+o9xZIUQXmbb8r5tB2qEENesMkPUBSOoHHEm1pSjEOLEslYQJoupEuKCjkQ7bUcoIY7KBQfQEXFdqSPCJpgMxITJg46Ik8WIUUJGxA0wR5ybpqhH63cR5kphJQxpa0oiek4Qhk1IvBNtiYgTBGHAkBFxkQgDk0S1XYyciKoyFx5HVkKJiNuu1DvF97QZihA3DKgccSbWlIMW4poSEScIE89UCXEeZU0ZyqLqtNOhhLhNrCkpgU7EWmEzOm6xboWb1C1h8qAGzxEhBAnDY9gRcVSE23IYIrcsLBIRcWJNKYwDZEQcMRYSBEHoJytBURiY7Ui/OElUCWvKqCYbOcaRZbGm3LbU24Q1pUTEDQWTjdhemhWODVyIIwTZaiRCnCBMElMlxPmUNaWILFMPbU1Jd5ZkjjixphQ2gUrWGyYSETctSETc+BF7duEY9Zz6xRwhrJ0Q4EhrShHihDEgSCkhThZSBUEYLFSETp2wUBPGlyplTVmTufA4QllTihC3PaDa1abkiBsKRhFxhEPFKKwpaxIRJwgTxfQLcSKyTD2doLw1JZkTSuqIsAlURNxm0ZbC5CE54sYPOiJucO8cFeF2QoBbrFaJz4s1pTB6qKTxkiNOEIRBQwlxEhE3WZBCnOSIG0tWyIg4Eb63A2REXEUi4oYBFRFHjbuBTXLEDXg8LtaUgjD5TMyKo1Iap4fLW35md94sHptr4Lt23mx0rjPcRaPPnyBQvMXCisVrOO/o7jIu81nvQta5Zs/k7b5yVTFcuxcHO3NGn9+5qzgB3KnbOGfnQuH4fnepcKzju0i1uSadZMXIjV6EpxfraBnimNehZxZvh7zycuMyVoUXtWJZ5ucCgATmg9FMmz2zFmHVEqYxcl+XKq+YWx0SZV4mLc7VSpZjnAwAYF7OWy533zbiMDd/Os2tH0DYLrZPae7Bjsy/W5rxHvZ9rRlWucMLvHKWbf6+7ZtfYZ2L00ZS4nelkcFd3vr+WpyXBsCeQ8XKteLWEBy10UpqhZ/tWOogOGrDZs53op2MurWTN7bQFfP+FwC6mfk1apt3/50Wr5y3wmtLrK75+RjDGACA1+ZdY+NBvT/jOsUFgbbjQ0dm79zh47NGnz9BvcZrlGdr5guIccCr/w27KKSXQTXN2y0AgGvetqZVXv3PfF45TjuZhbwxmrZ59d8/xrv/OWNGmzHXNDVzjS13zO8JZzwCAIxpzSo9TkfmrIoi1hiU24/mzPtvd5j9lG3eCbiMMgBwzo7i3LUMe4NG6c+emRbXOeKaAxvl3vWzK7xrjFLeg2sk5i9q1OWdS/m858Zq7fLe9ZG0go3b0L55u5wFwxun5cwAfbd8NV6Hz5xfNs5k3BPeqaAsg4JaF/JXA8CRdBbpSu8+UpeoWxR+m1eO0/8CQLzDvB7nNaZLii5/kc0akZok6ULNFMfeVERcpD3kXfOxjGqVK9NKi+PbahTDMhwvWIxbyX23E96QHD6ju+GOEZIqb+CUMe+JYgyvo1neNYYLvLG8lfEavLTkWul2Zroi4jrF1iQNmBNqYWKgItr8aJOIOOJ4LHlVhE2gIgwkR9z04FGDZ0IIEoYHFZE4UGtKIkfcUrhmTVkpzhrmOhIRJ4weOiJOrCkFQRgsy5QQ12kDWhZdJoWgRYx9JSJuLFmuEu9bW6wpp51q1IW1oU3teC5SR9Y1h0HbK4r/m1pTUhFxg7amJKzoq90uVM4TWwRBGD5TJcS5hDVlGk7VVxQISCEuphduqeNiTSlsBpmsd4A2ecJwoXPESXswSroeJcQN7p2jhLWlsLr2N5EjjhDuBGHYBESuUiqnhSAIQj/pum5hnOTmueRPniDCllhTTgpkBKrkiJt6qPxwjVDyww0LajwdEkKcynP4aTGKdtBrCdqy0CKusbqJfaYgCOPHVKlUXlRsCJNQdo5MO11CSAs69MItmUfQl4V3gYYciMkgZ2oICFFVhLjRMg4RcYuVHhFxsvNfGDFURFwkOeIEQRgCVFRcPRJxYFIIm0REXF36j3FkpVIUX2aI3GHCdCH54UZL7DrI1XqbRz/N4GwQ3TxKhHMdQHHTfpSnJXniBGGimXohTqwpp584KE4eJCJO6AeUTSG1I0qYTKhIq9iRxYhRQokJlIVov5iNKGvKVQEucj1ExM7/alcmOsJoCQiL5DZhVSMIgtBvyLxVEqUzMUhE3ORARcTNijXl1CMRcSNGqVJRcaOwpTxB06eEOBHpBWFSmCohjrKmTMSacuqhhDQqFxywiRAnOeKETRBryumGtKaUHHEjhbSmJCY6/WKuTVhTVh6wpDwhyq0r0xF7SmG00DnipO0SBGHwkHZ5EhE3MYRkjjjZlDqOrFTEmnI7Uu8UN/xR0ZHC4KDWgDbmifNGKMRREXE1iYgThIlhqlQqv0PkiJOIuKknDomFW0KUXT1ORMD4UkcEGmphU6wppwdyJ5tYU44UagITDDRH3OYRccB6Ue7+MoR4JwjDhMrHJBFxgiAMAxHiJhvSmrImGznGkWVKiJOIuKmHsqZshmJNOUxaxJi6Eq9vO0cZEdciIuJqEhEnCBPDxKw45lrh1sbuLT+TNIuCypJXwWJa3NG+FTZyo8+f4AL/EKvcmXaTVy40L3eed4R1rlbO6/zvTeeNy1jKLP+O5RWfVxAl2BcsFzya59Li4NWpacx55oNa0+sEAN/mRXYcaxcXg8vQtHl12fPMrzNwed8tSnjNUOKaLzrqwExE6xC5oIIkBawM2uq9j0E7vFxSeXHe1RN3gScop7yqBTDqfxbwPNMVrxojOLb1+UIil6Ru8RYjGi3eTsVGk/GwAdgHeOfr7ipaOPfiqGNeBgDi2PxeNlD8XkGcwo62fpYuM0htvlkU1VpZBd7y6r9X3OL4YdexNqwa73wWR1PMmbkGLF774zbMz+e0eNfIuh/gtyUZYyijmXt1lOZdY+70bvACQoiLZy3YNbMbqpl1a+lInVUOJb7bRurzvJe7voMnmHdrvDHJnlnzMfmMP9xFkyQzr8zdnPcCRCnvPh6pz7DKdVcYY8IKb9xqubxBSd42vycWMbctgx3z3u0y5RpeceA40+kAhl1OxgzwUMx+QzO1Js1oy7Oct9e54vA2+1Wd8pEQlfapWVPu9xZLf/ZkztvDW4M4kpi3CV9zzmWd6+Z797LKOZXBbBjL8hyZpWDnD7xclW4Cz42QGG4cTJhjQk4bxG1/unOsYkjqvPNx2gS7zXu3M1X+ZPWlYn1q+CF0Uu7cVsCct53Gq8fdOfZA2byMzavHeqfhGlDYOz3JZkKcYq6/oV7+uzVqxQlNqCNklfLnZgwJoZjvtpUy522ccQIzlXvKW5KBw9wb4TXM60k8w2t/cqbqk1R459OM8yleszWxTFdEHGE7mIQS7TTt5I6F1F1flS0NON3i2+wRkXKxPzF6tDBslEKbjIoTe8ppwM8kR9y4Qe0kpCxE+8UckSNuOTgpIi4grCkjiYgTRgtpTSkRcYIgDAEqR9wM0ZcK40kgEXETg7YsNAhLQipiSpgeJEfc6KFcJjZaU/pEupLusCLiwuL1VcWaUhAmhukS4gjbwUSsKbcFXeI5+xElxBWPdYkcc4Jwgg6RQ1CEuOnAo3LEiRA3UqgJjDcoa0qtMUuIakvhSTniguLOf6qMIAwTKiKO6qsEQRD6DS3EiV3eJKByDb8tOeImCUqIm22JEDfNUELcighxQ4UW4sbImpLIEVeNRIgThElhuoQ4KiJOhLhtAfWcqTxxZEScCHHCFlDJeiVP3HRARVrFtrQHoyT2hhcRV+3GcPP1thAdx0XsPPDOL4dERByRV04QhgkdESdCnCAIg4fMEdcRIW4S8NtpwbU6Dh1oe6qWhKaK5VpRgJE8cdNNLSIi4ghBVhgc5EbsjRFx3dEJcU1CiKtJRJwgTAxTNeqihBexptwedEMiioKoD5RYKxFxwlZIRNz04hORVl2JiBsppDUlYSHaDyiLyeUNVpQSESeMI1QfRO3eFQRB6DdURFw9FmFgEgiaxCYOsaUcaygBZkasKaeaGSIijhJehMFRzppyzCLiRIgThIlhqoS4QKwpty1kRFynbESc1BFhcyQibnqhBJ5IhLiRQk1gKAvRfjBLRLZtFN42CnMAnVdOEIYJ1QdFRD5TQRCEfkNGxIk15UQQtIh5sNhSjjUrVYmI225IjrjRQwlxhYi4MRPiamJNKQgTw8SMvJQC5rytBx0BEe10l7UDUWy2S/jzR883+vwJ5n3e4twT5m9jlTvNWTYukzG118PJLKvctY39xmVuPH6acZmX21/D6Vh/P+4+PIdv7Tp93TFFPKLvxHtw35L59zu9Zn7/d1Z5kRT7q0uscp2MtzB3NKoZl2nEvJ1accRcPOya12Wdm5fpOMSOqGYKFZf4XUr3/gwF4zXNQt65cp9Xzl0xv0iLGdiUM3uq9r58y597WbHPWNpvIatuXY5Cr/CiUawOcz8Ms2qprjIu02ny3m2dmH+3dlacaPppAt2jDiRFvawndRTb48V6FdHOB/5/ZE8xIq6etdCdNz8fAGSMW+nUeC+OYrY/GcPi0GLuTfAXedfYOsO8HgNAd8783bYj3rmC47xyedDjnmiNkEgQv/OMNuZss13y3Dpy58FdrHJomjfmrUXe+FPv4L03XoVXmXNt/rwDm3mNVjHfcRksz/x5p5rXR610eYuG2Q7e+ZbcojjUC9tmtpEp7xq1b/7cOP0oAKTMPYa52/ueLM4ROavaHViD2TNTwGauNUZ13vOOE/N261jKG7g2DNcsThDsKteW7FpuFI5FhhFxt0W7jT5/gnmHt04SMCYPHrMyZsd54900NH+3rZLtAeXMUF/pQseGL3nGG5NwhgnhEd653CbvHU2qzDEh43VLZ3j9b7CzvHg6mxXnJ9lpwMyucutIGWO9AwBaTeaaDLMvZc1lc96zdkKzdiSuFNvwSrcLddJ39XNCiPMcWA7vfvhB+c6tO1d8xtW8A10v3/Ypxq20mrw+SvFeG2S++UU6Hd79r97Hu8j2Lt6Aq7PT/D3Nma9oFvDaBG7byuk3/AVmOzKhTE9EnNakNaXYDm4PIr/4nEMiQtKPiQgYyasibEGHiDIIE4mIm3SsPIeXrR9w5UohsSVCdpSQ1pQDioibaxNCXGX9gscSkSNuvi0RccLo8JMUll4/Wem6NjLJ8SMIwhBYrhDWlBIRNxFUCGtKUyFOGC6UNeVsS963aabWLgoyDSIyUhgcbWJ9sGBNSdjEDy0iLiwKYrWORMQJwqQwNbN2J8lh5esXJlLHQu5MzVcUtoAS0wJCdKOiJmNCxBOEE5DWlLEIcZOOT0SUxI7D2x4m9I2hCnGUNWVlQ464SnEn8lxHcsQJo4O0pQxkIVUQhOFAWVPOihA3EYQtov+oSv8xzixXCSvYjrxv0wwlqDQrkiNumHTIHHHr1w5Ia0pvWEIckSNOrCkFYWKYGpWKioaLJRpu20AtQhWEOK0REuLcsDpMYTIhhThCxBEmC0rciVxZjBg1MZGjz0sH875RgtpSuCEirkLkiJOIOGGEVESIEwRhhCwTQlw9FmFgEghbhDOM5Igba1aIiLiZtpkNtTBZ1InnK0LccGkHvBxxXWeEEXEixAnCxDDVQpzYUm4fyIi4DdaUfpdKqGojFzsnYQtIa0piIVSYLCghLh7S4FnYnHGLiFsJQuQboiTrcQQnY5rdC8IpQkbESWS/IAhDohkUhYF6HMHKzXNwCsMlFGvKiWOFsIKdaYvwPbVoTVpTihA3XDplrCmpiLghWVM2JSJOECaaqVEgPImI29Z0SkTEUVaVkh9O6AUlxAWEJ7gwWZDWlBIRN3JS20K2QfhydA57AMIXlSNuoxVlbllYDggbLkLEE4RhEBL9j0TECYIwLHLLQoMU40QcGHfCNhURJ/3HOLNC5AabFSFuavG7Kbx0/ZwncSxxcBoybcKaMiyzyX+U1pSSI04QJoapEeLEmnJ7QwlqGztLKj+cCHFCLyhrSsoaTJgsgs1yxAmjRSnSnnIQUXFkRFxYtKKk7SklT5wwGjbuyAVkLCMIwnCh7ClnJE/c2ENaU0pE3FhDW1PKuzatULaUjUogOcyHDLXJv1rGmnJIEXFiTSkIk81AWwql1JkA/g3AXgAawJu11n+llNoB4N0AzgFwB4CXaa0Xt/pdWgOH2jOb/nzHcrNwrOn6OBLVjK97f3XJuAyw9fVtxdtvv5xVzrXNLUA6Xd5gO814mm17uThR64UdmC+4LqBSOKaXFY4vPPD8Z48UB60t10Pd53VaT9l5i3GZx4S3s861w+J5wcfaZpX7TrLHuMyXVh7COten2g9llfP2mC+E25b5O5Pv0oVjVbcNZ0+JSZAqli2D75u/A4HLEyq6Ka+OtFrFiWEv8mM8Ww2LGYCY7dy8oLNSfKei0EG2M+HNdWJeG2lHvIlVWuPVLe2Zl1MrvH7DSnjfLXYdVJINYsNMjLi2xZBFm59rJim2IQuzFeT++nu0VK0Ax4tlM8a9hGVeJkt4dcsLeS9OPMuJPuS1I9w6koXM+u8z+oCcd/9zm/fddI86EqRFIS6p2Kh55mOZvUHDuAwAeGfyIlTvPD5vXCa/3XwcDwDZEq/d6jZ4U6ODi+Z94sFsF+tcbFzGe5Px6rEKeWMS2+XZG+b3md9/xLzv5jaYC6Iz5vef19IB6QzvHS3bbq1UAmDDzL3e7cBk2mEx97Sl5lNLAEDu8O6mxei3Leb4n0ual7vxQaM4LnBnM+zxyvcFh7qzpT97Mp89zJsncvq2xYhZSZi4R8z7G22XK9NozRWOzXQi4/GkYs5TOOM0xpAcAJBUmf0N0zTD23L1kSat8Ma7lTPK1eNdefFdjOsOzttxrPS5lru8+n/7Aq+cvcxcX5wzHyfUd/I2Qsax2TV2qBxx0fpOy6PSXLguPJ83/5qplF/vs4n+rBbHqM6W/x1Rp/gde8EdW6iM996ER8zbhMzjtSPdGq+NTJntFmeZNueqN8w22V9kzrcZ50tq22uzwaAj4lIAv6K1vgjA4wC8Til1EYDXA/i01vp8AJ9e+/8p4UdEdINExG0bIiJ8fOOu8ZCypiSinQThZMj8g0RdEiaLUfq6C1tD5okjIhhPlflWcTK3WC1u6lisFI9JRJwwKqiIuFhyxAmCMESWiX6xLhFxY0+FiIjbcpOTMHJWqOhTiYibWmqtomDXrsh61bAplSOOsqYc0lpC17WR2uuX8t00h0usbwiCMH4MVIjTWh/SWl+99u8GgBsBnAHg+wBcsfaxKwC88FTPtdGGEBCrnu0EmcdrY444Iq8KVU4QToa0PZUccROPCHHjS0Tk6huINWWraE25HBYXFylryvm25IgTRkOwMVoUQBTKWEYQhOFB2eXNihA39oStYv/RFSFurFkJKWtKnlOOMP7U24QQVxMhbtiQOeI2rC2OdC1BKdKestqR9CmCMAkMLUecUuocAJcC+DqAvVrrQ2s/ug+r1pWnhE/l/5Lk9duGNvGsN4olIbGLnNrtIggnQ9WRjdYEwuRBCfMixI0HdERcf4U4N01Rj9dPdnOlyJ3HSxIRJ4wREhEnCMKooTatSETc+FMhhDiJiBtvOq6HxFq/ZBekKXzZFDqVkBFxVRHihk2bsKYsRMSROeJ4FowcqEjJakfyxAnCJDAUIU4pVQPwPgC/qLVeOflnWmuNTSzwlVKvUUpdqZS6Umdb5wwIxJpyW0NZTG7sLAOis5SIOKEXpBAnk5+Jhxo8U5FYwvCJHSLCuc/WlHNERNtyGCK3isOixWoxIo4qLwjDgOp/JCJOEIRhQkXEzUTSL447oVhTTh5KYYUQvmfFnnIqoYS4lkTEDR0yR1wJIS4e4lpCm4iIqxERlYIgjB8DF+KUUi5WRbj/0Fq/f+3wYaXUvrWf7wNwhCqrtX6z1vrRWutHK3vrSyUj4mSH8LahXUIsoSLiYhHihB6Q1pSSI27ioXKOSUTceDCMHHGULSUV+bbZ8XmJiBNGRKUrEXGCIIwWMm9VJHZ54w5lTRlXpf8Yd8SecvtARTS1q/4IrmR7Q60tVsbJmhK0ECfWlIIwGQxUiFNKKQBvBXCj1vrPT/rRBwG8cu3frwTwgVM9l09FxInt4LaBjFraGBEnOeIEBpQQR9UlYbIY9S42YXNihxDi+pwjbr5VFNKoXHCbHZeIOGFUUP1PJA4QgiAMkeUKJcRJvzjukNaUdek/xh1S+JaIuKmEtqaU+emwiTwXuVLrjvlJCit/wKXNo9JceEMU4sSaUhAmlkG3FE8E8AoA1ymlrl079lsA/gjAe5RSrwZwJ4CX9fpFtsrx/NO+venPH+ncWzh21q4FPGzmEPHprbmluce4DAAcb9OLeL0IXN4Co+r9kQL1gNc4LzTpSIFeKGdrS1GKvGvurdyyigPUSrcLx83u/38tLX73OHTwyLmDxucDgNPdReMyrZy3o6lu8Xa3WIp0fe2Jp7LeH+oTtm1eRwAgy8z3Ebi2+f2IwmIzWe124fm939uow7OS0Nr8/rdj3iA9TXle5ppRtbTPe9aqw/Rb725eR4JO8R7Htgt0Lai2+fl0wPtuySzzXWPeSw6a64jCfLepSGU/S7fcOmQZdm07VooLhgu1CrKgWLGPzxX7l9luC+ms+ffTgfnzrtZ5/bZl8e5/NzEfXThtzogEYHZRALP6q9T8Ot0G77tp5lY3lW19vgqRo3TBqeJ4x3ycdkZlybgMAJw/c5RVrpubt6137GbuBGc8awBAj/u/Kdq8nMqZ52Kic/MXTsXMipzxxiRZlTcnshjPLfOZDRD3sTFOl1WZ4yZm3bI65Z53wym2NzOdjtF3tJnrhdy21WL0bQBQ9c3nYKHL27DXSXjvjWP1HluoLEfQWf9+aQWomoZvlb/eC0LztRUAwE5esSNR3bhMzmiPAeDekDcmTxjlTOY2y9ViRNxc2oJlMKa0Kry2NY+L5+5FWuHdf5sZ5KeY08R43rxRTmd59zEu+W6HjeK7uBRU0ErKj4WynNdIKofXJ+bMvtSumd/LisdrWznteOQ7qGwI9njk3MH7IxTnVFEM33NaA/vmVgrHy7DQMhvHrwTFdzNsJ6XXyBRjEmbVePc/b/HqZHeGMSdl7lFoncF8b7hLOQvm9z9navLxPK9NTmrcOdGQykwwAxXitNZfwubTle/u57mcqPgGJOHwkmUKo0Ui4oRBQUbEiTXlxENZHUqOuPGAjIgbQo64peom1pREjrh5wtpSEIYBZU0pVuyCIAwTKmfVTCQROuMMlR+uW7UBa7gbAgRzyIi4llhTTiN1wnK0VRNrylEQBW5BiAui9H4hzk2K68/dYUbEkdaUEhEnCJPAwHPEDQuXiG5IAhHitgsRIahtzONF5fWiRBZBOBkR4qYTyupQcsSNB3SOuMFbUy4SgtvqcSJHHFFeEIbBxvy3AJ1UXhAEYVCQOatEiBtrSCGuJuPeSWCFsoIVa8qppNYuCimtqozxRkEnIBxaOg+0o16XWH/2hrf+3AqLAq3kiBOEyWBqhDgnKlp3pBIRt21o+8UBysYIOImIEzjQ0ZYixE06o06wLGwOlauv3znijCLiKsXjc802z59VEE6RjdH+gETECYIwXJaJiLi6CHFjTUjlh6tK3zEJ0DniJCJuGqGFOImIGwWUEBecFCHndotz0+5QhTiJiBOESWVqhDiXsqaUiLhtAyWoVUpExHUIAU8QToaKiKMWQoXJghLmRYgbD4ZhTUlZSy4SghsARJ6HaEPdcPMctUgmO8LwCRNCiCMWCwRBEAYFKQyIEDfWVJrFvqNbl3HvJCDWlNuHOiHEtSuyXjUKqLF1ED0gvpERce7w1p9Ja0oij7QgCOPH9AhxpDXl1Hw9oQeUNaWfpLDyByIlqYV3qpwgnExMeH37SQYrK0bhCpMDFWElOeLGg2FYU861i9aSVC64EyxWij+bkzxxwgiokBFx0nYJgjA8lkWImzjCtlhTTiorlaIV7GxL3rdppEbliJOIuJFA2b6fHBFHCXHDzBFHW1PKJlFBmASmRqlyKCFOrCm3D0qh3cNCkLSmlMUroReb1K2AsCMQJgcqwkoi4sYD2ppyCBFxm1hTArRtpeSJE0ZBSLRdlH2OIAjCoIhcF117/Tzbz1L4RMSuMB5QOeLiqqyVTAKUFaxYU04nVERcqyYRcaOAWicM1uWIG0drSumDBWESmJhVRwXgbO/Ypj+vxsVOa1e9icurtxmfq2LxGrAzwiVWuUTzGuxWar475taVXaxznbVjkVVuiUgu3AvH4kUaxaFTsKM8v34EiztWIxl2oLhoOrM7gnZ4evSNnTOMy1zf2Mc61y6ft+BrgZfD6N7OjHGZQy3zMgAwX+Ht6Osk5guPUcJr8tquV6hb+m4P7Xpty3La4t3/VsP8u1lt5r4KZjGVKOMyPnPzJrOJBPTm10jmiHNcQCvoHeZ9QFDhCUUWt72LeJMyyy5uWumF55mXORXiSrFS+uhCVzcXv62G2f2ghLiGqsFp0nVmKShGxM2vdIDU8D0w/TyA0OPVrTTjvdzaMW+3tM1r63LH/H4AQB7wzqc98/ctY7orKGbQtN2jLa8QtjP3NHbgvvvmzM+lePdxV8gbk4SOeV1+8DmHWeeKU15/v9DaXJDfijNml1nlOKSaVydbXfN+I2G2I1wsXpOAZd98vmHZvJc0S3mDEsc170vTFq+vd+/hlVNZ2Qeg0AhC7Gw11x2tooN2WG4Mq3Lew4528Z6b9nnt3VxgPng9u7bAOteMwxNX9nm925+HpMW2dKlSxaHunNG5HhbeY/T5Ezx95gZWuXbNfL3jmvbZrHPdMbeDVc5mtCVRp/w72txbbHPm4jbCSvn5is2cb7RONy/TVcUIvlIwUy9z2xLFmd4wxsgAEEXl2sVaq7im+e3W6Wjl5d+DIGDOSV3mwHU3LwrLdhhzUsY8FuDV/6RCvHdJBzU3BrSGS0TEedUMIXj3f9/MitHn/Z3F8+/JGjhv9+Zr5idzuFk3Oh8AHF/Yeu1rM/Ru3vp6zNho6N3CG6PZMXNuyZzLplXzctxr9JaZ83SP993Ya3fbiOmJiIuKjWsWMmdzwkQSE7tW/PiBhdsgJqzoJCJOKEHkFSdLkidusqEiGqlILGH4DMOacrZdFOKWNskRt9nP5ojfIQiDhorub0u+W0EQhsxyUBQ/ZzvSL44rfpOYB4s15USwUi2+a3XJETd1OGmGcMMYL1cKLcIiURg8lNuEv2ZNaWc57Hy9uJHZCjlzgz+HDrHpJWzL+pQgTAJTI8TZnaIQl4ZT8/WEEsRBcTJxso+zHxGWHEQZQdhIh8glGHZloDPJUNaUkiNuPBi4EKc1mSOOygN3gqWQyhEn1pTC8KkQ1m8dYrOIIAjCIGkQQlxd8sSNLV6zGD0RV2XcOwmQOeKa8q5NGzUiv1cz9AElwQWjICKEuKCzOh91u8W152HaUgJAu1Ic+1c6/U3lIAjCYJgapcohhDiunZAwmdC7Vh5YvPWJiLjYFyFO6A21yLlxx5owWfgpFREn7cE4QOaI66MQV4tjuPn6MUPHdbeMiFwkIuIoe0tBGDQBsQlE8t0KgjBslkMqIk7EgXGFiojr1KTvmARWqkUhTiLipo86kfevUTG3ZhX6Ay3Era7/uISzTuINdx2hXSXWpyQiThAmgqlRqsiIOMLXV5heKFEtPCkKLogISw6G77Cw/aCEuIpExE00ZI44EeLGAjoirn/CNxUNt5Ut5erPqYg4EeKE4eKmaUFETiwLiSNtlyAIw4WKiJuRiLixxSOEuFisKScCKiKu3ooAzUyqJowlpBAXMvPtCafMVpv8qfxwyZAj4mhrStkoLgiTwHQIcVrDpnLESUTctoIS1fw4If99AomIE8pAWVNSUQnC5BAQwo7kiBsPBm1NSeV2o6wn1/2cjIgTa0phuFCWyFQOU0EQhEGzHBb7xbpExI0tXovYkCrWlBNB13MRbRgbO7lGhUi7IUwu9Q4VESdC3Kgg1xbXIuI8SohzR29NKRFxgjAZTIVSpRINa0NbmDuAdsVPeTvRK0dcQFhTSkScUAY6R5xMfiYZOkecCPPjAJWrr59C3DyZH848Ik6sKYVhExLtVtsVIU4QhOHTCIi8VSLEjS1+g5gHizXlxCD2lNNPrV3MESdC3OigIs6CMYqIi4jrq3QSqFwiZQVh3JmYVUcFjTmb3n3uJMWGMAsszNktPNRdNj7XE/wF4zIAkGC4jd69qfnju2nuNNa5VvKi/UgZZizzCdnxrMY6l10t3v/T9TLOqi4CAAIiIm7PjiYWk60XYDfjxmXze3msuXXUxWbc4exglVtq8J6b5xXfqV7sqvGiQ/ZUGqxyuTYX2m86upd1rqhafNcqTgdqdutdRzriDchUbF5Oe7z2RyW8DQvaMT9fUmedCuym1S1GSp+AzBEX2oCbw9qi3GY8Zv+dxmUA4LzKMVa5c/0jrHKeMn+3b4/3sM71hWMPZpVLCFvpIEtge5s/l2RHeaGuroptzuJMBSnRh5zg2I5iWzoXtaBrZgKhWzEX8OOU1440l3ntv7PCaH+Yc8+MmfpCW7xGwaqYC7ops/1xl3kLnOlZm9cRLyv2s53QRW2Ot/i92OLVkXsPz7HK+Yz6X6/wFhpPq/HGFtwxyaxrfp37w0XWuXzVv40JvWgyX9LD8Qyr3M1LvP7mkfsPGpfxNu7iLEkO3rhpKTZ/3+625ljn6jIjZR2D/SUrQXH+NNuI4DTL3Z9kltm47i4uVpfBcczHdgCQ5OYd3OEOr/4vO7w2edbp3Qc4zWJ992dT7HDM5m+fXbrQ6PMn2OPz2tZ93pJxmVbKa7eSLm9pLLXM65btmLU/jWqAPUvNdcfcu4FWXnJtgVf9Wdv2vYi5GZ55jTlTT3ZXzK8zrfIGvNWdvd+zXVmzcEzPA2fvMVub9G3eGOHW7i5WuTTiPYCZuvmGxrmAN95dic0FzY5f7EfzBnBvcxYzS8Vrbzke7m3OouryotKqrmHfZgNR4BTS7+gm0Kn0fiZZznhPmd22znjxP5yWJOUt7cLKeO2W0+LdlDQ0P1/GDTRihl9x21bNOB+nzCQzFV/Xbhd77ZxRsYXJJg6Lg2cvWh3kqlzDi0e/c0WYTNo+EREXS+j/JCM54sYX0pqSSIrNhYpkWyQi3k5mqUpExDUlIk4YLlQkNtU/CYIgDJoVIkfcbCT94rjiE9aUMbHRUBhPlqtETkYip5gwudSJiLhmlbljTThlKOesMNrcmrLrDb89pQS3ithTCsLYMx1CXKeoQmeVqfhqggFdwprS72wePh77NrQlgq3QG2pHVEhEWAqTQ0AsaEuOuPGAzhHXv/dtjhDQlqpbb59bJH4+JznihCFTITaASI44QRBGQYMQ4uqRWFOOKwFhTRnXRYibFBqENeVMW963aaJGWI22RIgbGR1CiAvuF+KK7Wl3BBv8O0SeuEpHhDhBGHemQq2yomJEXBaIwLLdoHLE+Wt54byo2Fkmvkw+hHJEZI44GeRMMlREXDSCnWxCkZh4Dn3NEUdFxBERbyezEobI1fpxxUwnhpPyrM0EgQO1gaBNbBQRBEEYNMshEaEjQtzY4reIDWg1GfdOCitErrAZyck4VVA54kSIGx1bRsQRqZFGEhFH5YlryRqVIIw7UyHEkRFx4VR8NcGAblDcheJ1NhfiqM8LAkWHsP6icg4Kk4GV5/Cy9QPoXCkktrQJ48AorCmXKltHxOWWRS46zhG/SxAGBRUR1yE2igiCIAwaKiJOhLjxxEpyuBs2LucWkIQy7p0UVoiIuFmxppwqaCFONluNiogQubaKiBtFigvSmlIi4gRh7JkKtcruUDnipuKrCQbERGfpR5sLcVQEnSBQUBEHFRHiJhZvs/xwSiKpxwHamrJ/Qtxcs2gpSVlPboSyr5Q8ccIwoSyRJSJOEIRRsCxC3MRA5oerybh3kqCtKWUMOk2QQhxhPSgMB8qa8sQ4nFpLGBtrSskRJwhjz8QoETkUrmw/iPzZg5cO41G4Z92xBbeKK9sPQhbeYXwuWxWFvTLstHiDodMd3gLjua75de6172adayHndSxHs63tvihaOS8Ef6uIOD8iwsfXhLh7O7Os8+XafPJSD4oDrDK0urwd70mHV85xzOuWbfHem8W49wI4RSM2ryedNm8wGxE2pmHShbKK0bjrYO4H0AHD7o47l27z3m3d67sT2O3hbpBwK7RYWs+KC0Wx69z/ecs2r8sXVA8blwGAp9ZuZJU7zeb1N109vGfwBTyYVc6dK/aJQZqgXtt8gc9kV/fOuFk4Fp2vUT13ectyyzt84Oj6Y2e6x3Bsd/k2rB2Zt0HNJV4bqXNeo5DVzOu/WubVK4vr7DnH2wjhB+blOm3eULk7z+sT3crmE+g6iu99XLFx9vwi61ythNcn3h3zxhZZZl4nj947xzrX8aDGKrdjnpf7cSYwj0w42OaNPy1l3v8CQGCb1/9U88YIac5rE0KX925/5/huVjkONvP+t9rm49aceR+1y7vGeEf5duuYV/w+9biDvGTzkIa8a3Q9XseRdnl12WGsC6TMsdY19+xnlfPO2notYffRRuFYVPWwkJrP0w936sZlAOCrd53DKnfB3iPGZaKM12+nK7w+UaXmfZsOzerxglPs02bbEVDyNQoP8e4JpwvwF1inKv1dNsKoxgCApG5+Qncvb7PBI/Yc6vmZ09PiPOTMfUt4/K4DRuc6HM8Yff4Ed/tzrHIhY2wNAJqxlsZZfwOAmme+BmcT9SOIElhKk04tqWfDUhr3rvDufxyZj60PqeK5Oodd3H10vmfZLDXvp2zG+jMApBGz/20wrpG33Mpuf7LiHolSKG1+wtzl1f+0uG+q3PmYapFbXGrpCfc+TipTETbmdooDGbFa2H50CbFkqxxxXYmIE0oSEdaUVGSCMBlIfrjxhvLYpyxAuMw1ipPopXrvEepyvSiIza3I7n9heFCWyJR1siAIwqBZCYurJvUogtK8hTphcITN4gaPqCZ9xySxXCEiUCVH3FRBvqd1eU9HRUSsFZ7IEUetJXRHYE3ZCosbYqpiTSkIY890CHFEtJMIcduPOCQWb7ewppQccUJZOoT1V9AVIW5SoXaxjWLwLNBQz8JPUoCxc4xirlGMKlqu9RbiKLFulhD1BGFQkDnixJpSEIQRkNk2mv76RUALGrWYux1dGBRhqzhniaoy7p0kViqUNaXkiJsmKtR7KoL5yIgJa8ogTqFyDa9bXH+OR2BN2QoIIS6SPlgQxp0pEeKKO+8SEVm2HVSEm79mTenFxMI7EUEnCBRtIuKgEsluo0nFT4oTnVgi4saGzLaQ2OuHJ5YGnPTUd9m7SYbahp2CuaJzb2yEEuKo6DpBGBRUJLZExAmCMCrIKB3JEzd2hC0i0qYqfccksUK8a7OSI26qIN/TmsxPR4W2FJmeJIgT0qmFcnQZNK2wuBmv2hEhThDGnakQ4hwqIk6EuG3HVhFxVI64WKwphZJEHrEjSiLiJhYqIi52ZUFinKAmM5QNiCmzzeIC4UotRG73Hg4tS0ScMGJEiBMEYZyg7ClnOiIOjBuVJhFpI5Z3EwUZEdeRiLhpokJYU8YSETdSOlRUXJTATYpri6MR4qiIONksLgjjzlQIcV672BCmIsRtO8gccWtCnEtFxIkQJ5SEsv6SHHGTi+SIG39iyp6yD3niKFvKMvnhVj9XzBE3uyILjsLwCMWaUhCEMWIllIi4SYCKtInFmnKiIKNP2/KuTQsq1wjahGOLvKcjJSKEuLDT3SQibjysKWsSEScIY89UCHFkRFw4FV9NMICOiMvW/X0yiS9irVCODhERF0pE3MRCCXGU8COMDmoy4/UjIo6IYCstxM2INaUwWiQiThCEcYKyyxMhbvygc8RJ3zFJkO+aRMRNDUE7gbUhFXYUOsgdWdMcJaQQFyVkjrhR5JtvUtaUkiNOEMaeiVl5zKFwU+s08mePaRwoHLvHnsdNrdNwqDtrfK77ohnjMsBqgmoOD60dZpXb5y0Zl/EUbyFzKStGApThO236mW15rm65RdECupg/yGlnONieRdQoVvXjdhUH27M43q6yTtfpmk9g0pQ3mAp9nuhjObycSr576gveZVlo8573whHGe5oq1rlaUa1wLOwkyBeKu5BOxo5559OMYpqpK1td3jW6TfNyNnNc2D6jONgtQzWkrRlmUFwkSgMb4drnk9T8Zi6mvDbyYDrPKnd3spNVrpVvXWcpMuaenT1hg1XuOzfsR0f5ANaX794ZYjmi+3Qdlqsj4aFi27ZQqaHd6p0j7rBbPHdtKcZK06ANY7zcOmFOwjPeu60YbYJmjibZ7ZbNG2+FvrldS5e5WKkWeDdFbXH7KSEu8l3sDXjvWrXGa5Q9i9cmL8e937ONHL2X17aiyatc3TpvYdNS5nXy4LL5HAUAmkd541Z7hXFPeM0InDNbrHKuy6tb7QPmY0KbOf7JfF77k/uMMTmnDACEzHKeWbnlevGdruVtZJXe94jxygAAPI83R3EcXt2KM/O23LZ4918xb8qNx7eebz/92E2FY4thBQtd87bkjMqycRkAsPbwvlvgmM+Buzmv/fd38ERkzRjbdRtm0eyNoPiu1TsR7A6grd7jxKTKu/8WYwkinue1rf4iqxiCY7zvFs+Zl9EZb0xuqa3bhFqrOPZo1zzYPcpxzrUZ3PYn9HjrVFXPfEzuWbz2/+4V3nw7Ihy0at0YYVr8zllgwbUznDm3xDrXCmOM7O4qPutaJy7VwYZV8/vPrSPtnDknPWbe/2bmSx2nVI47UOZUZYc3tIbiDX/gMtfunI55PdEOc8IxoUzFFosT9oMnExM2hcJ0ExM7Vvz4RI44ItRfrCmFknSI/GFhV/y3JxU6Ik4iZMcJ0pqSmPSYMt8qWkku1sot9FOfk4g4YZhQ/U47EGtKQRBGw3KVyJ3akX5x3Ki2i6tpnZr0HZNE6thobbCitrRGTaJfpgIqarVTlXd01FARcUGUkNaUyQjWEtoVIiJOrCkFYeyZCiHOk/xfAugdK0GUAFojIOpIJGKtUJKOR+SIE2vKiYUaPMdi7zZWxA4hxPXBmnKuSeSIKynEUZ+bJ36fIAyKkNhUFBHWyYIgCMNgpUJF6YgQN25QQpxYU04eZJ44ed+mgrBJ5ACWd3TkGFlTjiDffJu0ppTN4oJwMkqpO5RS1ymlrlVKXbl2bIdS6pNKqVvW/uaFzTKZDiGOyP8lQtz2I3NspPb6Km3nGm6S0VGTRMcqCBSx6yDf4BfmpymsnGn9I4wUyRE3/kREFGo/hDhKOFssmSNusU5ExDXbgGb6awmCIdQGEImIEwRhVFDCgETEjR+1VlGIa8si/8RBCd8ixE0HlRbheCBRqyOnQwhdQZTAJTb1UvnNB01LIuIEoSzfpbW+RGv96LX/vx7Ap7XW5wP49Nr/h8ZUCHF+R2wHhVWoqDg/TuETeVXEvlQojVJiTzlF+FQU9Qh2sQmbMzBrylOIiIs9F50N0Udulq968QvCEAiJXa4dieYVBGFErFAROpEIA+NGjYyIk0X+SYN830SImwoqVERcTcZ3o2Yzx62xiYijhDiJiBOEMnwfgCvW/n0FgBcO8+RTIcRREXFxKIuq2xEqyi2Ikk0i4qSOCOXZuAAPiD3lpCIRceNPTAjfQR/et7lTyBG32Wcpu0tBGARUnyNCnCAIo4LKESfWlONHtS2L/NMAFRE325b3bRoIm4TjgYjlIycKqbXFFB6xljCSiDgiYk82iApCAQ3gE0qpq5RSr1k7tldrfWjt3/cB2DvMC5oKIY6MbpBop20JGREXpZvkiJMJiFCeiMgT1w9hQBg+PpUjTiLixgoyR1w6GGvKJcJycjOWCBvL+YYsggjDgcoR1/FloUYQhNFACgMixI0dVESc5J+aPFZCiYibVkKxphxLOpts8h+XiLiu5yC11i/pe2kGtw9zZkGYEByl1JUn/XkN8Zknaa0vA/BcAK9TSj3l5B9qrTVWxbqhMTErjwrAHr9B/iyMix1XfSbCHr+Bg50543Pd25w1LgMAR5ZrrHJf65zLKmc55nUlz1TvDxF4AU9wyFLznSGux+s4Ai9B0/ULx9uLLlSreK8O6zqONGvIcp4e/ZjT7zIuc3H9bta56lbEKreclV9gXl+uXM6kk8k07z5+4/jZrHKNanHy34s84+1UyhMLbSpZr9MBapvX16zKOh1U07xp1oz2AACy4itTCpWb38vUvFqtMsNrf2oBvSOslhPHKw98vh2bT3y+fvQc4zIAcN3S6axyUcpbQMm1eR/w+D0HWOfyreIkpVS5vW2k9eJ1Vitt+Hvp6LOsZN+2o90qHDvu1pB1ytXnxbD4Us8eSpDPl3uRdGB+T+b20GOfQbF01HwsYx/h1Uc15DSboWs+vkiqvP635fEaV7VFU07miPNcNFLeuc6tHGOVe9BpR1nlDsbmebBvqfLq/9EOrwPmtP8A8NCZI8Zl9lWWWee6BvtZ5VoetxM2pxLwrJF2V4ttdBnuOdu8b2sv8MbIVsCbp7iuefufRry21a3x2i3TlKftHcXx6kynA1XiFqmENyet+ry65VjD63BmPN79n6/zIux7zWWrhBB3wNqJxbZ5O3nhzH3GZQDg7B3HWeUsxtrYTS3exvaDHm8NyGeMLZYY07bGXLF/mokilBnaZzVe/c8Z76nNDMiJGXMUAMg8XrktB1yb4DHXqRbird81RQwHFvwqvnDkwcbnmvd57ci+Om+85dq8+V7FMW/LzwwXWec6LeR9t5m5Ylt+pl5EPSOO71yENQccieusc3H7qHbFxUxz/Uu3313CyszWY76aZ37/7z4+Z1wGAFvi6O40r1uZz1uTdFd45bjtXWa+lImUubbIxVvilctdRpvMl8HSk/K+0b9a64Nrfx9RSv0X/v/27j1IsuyuD/z33Js331mVVf2a7pme0cxo9BxJSIwENstjDSwCE4jFjxVGIGzWxMZ6d+3dtXfFeiN2/QcOHCa83g1jYhVYBocxrFfYIFgw1mLZgBFCQvKCkDSa0Qwz090z093VlZXv+zz7R9V0d9X9ZVWeX2Vl3lv1/UQoNJ2dp+/NzHvPPff87u93gPcAeNUYc9la+7Ix5jIA9xu5YzgVGXGBUJoyYtnBM0nKcmtMYzFziRlx5EIq/9UUHgKg4qsJ/QEz4opF+j2k382VVEZyuzX/qFYsTSmUuyRaNC/LcmV1M2NYVpeIVqYvlKZkhk7xtEf5mUKWvSufvvAQKs+306ElnaPMiFu5SFjuqDpJEAgZcfEKSlMCwFgoTymVIyY6i4wxLWNM57X/BvCfAPg8gI8B+ODe2z4I4JeWuV+n4u69Osk/mRI3VtMR0mqJ6eNhjHooBOI48U4OpNKUXCOunIpSToJmk4IL0tp+TqzF+jA/YeGyRlyvlX/vxkiXwUHkoiksvj6pBoBRPglORHRMYmBgysBAkQRxgmqyf9yb+B5CLuNROgOhFCwDcadDa5gf441ayrI1tDBSIK42SRCE+bmEpLqaHBcpENecMBBHtOcSgH9pdu+XKwD+mbX2XxljPg3gnxtjfgjACwD+/DJ36lSMwKpSRpzQadLpNzMjjmvE0TFNqkKQN+Igp4yqXCOu8KTAqLS2n4v2JESQ7i/7MakGCIVze5aeELTbYEYcLYGU2S89fEREtCyjehWJ56GS3bu2NuIY1SRBJKz1SssnZUYMm1U+xFFCYgbqmIG406AprBE3YtbqyoVClbXqNEGlSBlxTSkjTlkvkeiUsdY+B+AdwutbAL55+Xu0q/SlKb04g5/sLyia+gZphYPLs8gpI46BOHIwrglPGwnHFRVfVcisilY0eCaZlBEn/W4uNgZSWUq3NYKk97M0JS1DcyoE4hyCyEREC2eMmBXXYVZcYbRH+bWMhop1tmn1dlia8tRqCgFzlqZcPTkjLkUQ5tdzS2qrmUsYMSOOqHRKH4iTsuHius+nvM4oMSNuZiCOT2rS/KQJzwYz4kqpJmTIsjRlsZxEacrjlqWc9X4G4mgZxIw44QERIqJlkgJx6xNeF4tCyogbCRkUVHwsTXl6tYbCGnE8T1cuEpY7qk6KlhGXL2EqBXaJqDhKH4gLpsL6cHVmNpxVUiCOGXG0CLOCvFQ+NWHwzNKUxSL9HsctTSllxElrvh1GCsRxjThaBnGNOI5jiGjF+mJwIJ+FRavRFkqUjRpce6qMxNKUPNdOBbE0ZZvn6aqFUkbcOEEQCxlxK1ojTsqIazEjjqjQSjXz2Ivzg49gIAdYXnuvZ/Kd5FEGU91FLw51X6fNdNl7aeTezq+6fx8AEA6VA4HI/YLUfEBX07haScX08bVwilqcn3i3TYOqSfGei8+ptveNa087t3lX7YZqWw9V8sf+PAKjC0rvZO5P131quqbaVj/RlUe50Xff3lpjqNpW1U8RbOaPocuNHh558PbMdqNI9yTbaM29XaehO2/GkW4id1BtuTdKdH2dH+j6rczK2wuEgM40CO6+f1a7w7y8te7cBgCMsUe/SbC5vrzgzwvjTVU7D7rP5vsZYqG8Rz2J4fvysZCmR/d1XSkjrt6CGc1/7d7xO/l/dzQG0vmOGW/oPk6IN3X9eC1QPiigGJMohlq7dIcIrLJdNMdxclAc68Z2tW3dDbm9Kn+4VpK/qZ7WA/i+xbN3zqu2NYx1Y7vXd26p2gUmfx09ypPrunETdF0ywkz3e7+1ed25zYVKX7Wt7zn/WVW7yLof/9NMN0YYZ7pj60uTy6p2D7e3ndusvU6XSVLxdB3eJHX/Lp8b6s7t6zu6EyDJ3PutYSf/W3fiCewRp1J2QTdRuF7TBR4C373/AYBYcd1o+Lrr7+PrW6p2a8Hs7+QdlXzfVNnMsFnTZS3+2xuvV7WrVnTf/1s2XnVuM05091+DHd39dtR0/73jgXsfecfm733Xp2PYtTm2P1E+qK5olrSVgzRF/wMAfqi7v6xM3NtVlMfx9vTwY6s5zPeHNypraAXu/eR6VddHjpRjQs11AwD6kfsckNfUHVvae9KXzEbutVovf75FVR/XJrvvbQe6OZkH2z1Vu8ZGfl7jcXMbT26+cmi7rdDtYVQA2OjorhtSZuE8+r77PibK9Wmtsl2wo7zf89yPyUyZJKvp6wBgclF33tTusDrhUUqfESeVppSeXKCzQSo3ub6Tv9Ge1AKWLyUn0vqDDWHNHio+qcQh14grlkgsTam7+X1NV8yIcwsobwvv32BpSloCKQNbWruUiGiZ5CwdXheLoj4SHlpuMZu6jHak7NMxS1OWnrVojfLBm6FQcpCWayrM/3R28oHOVZWlBOT+XOr3iag4Sh+IqwmlKSOWpjyzpGBJt58foHJ9OHIVCsdMjYG4UpLWiGNpymKRfo/qcUtTDvMTg9uupSmF93ONOFoGltgmoiIaCGvEsVxecUgTshMG4kpp0KgjO/AgcXsawU+P96AarVYtTFDJ9meehIGPmPemKxfW879BW8heXOUDvdOmEIgbH++emYhOVukDcVUxEMeL1lkVCpNS3X5+klR6uoXoMNIxUxcCOlR8VWGNuIg3O4USihlxxzvfuoP8QxmuGXH9RiM3CbI2naLCSRA6YdIacWOOZYhoxfrtfCBunRlxhdEYCoG4NrOpy8h6HgZ1oRQsA9+l1hay4UbMhiuEaWO+cXYcrDIQl79nro/5sDhRkZ2CQJwwocpA3Jk1b0Ycs1/IlZgRJ2QoUPFVhYAO+4RikQKjtWNmxHUXkBGXeR52GvkyXMyKo5NWj4SMuCoDcUS0Wn0hI64zZbm8omBpytOlL5anZCCuzMRAXIvB8iKQMuIkKy1NKWbEcY6KqMjKH4ibsDQl3SOVaeruCBlxLOdEjqSMOK4RV05SQIcZccUiZcRJAVQXG8Iaca6BuFltuqORap+I5iVlxE3qnKghotUSM+K4blVhNBiIO1X6zfzDYOsjnm9l1hoL68O1mBFXBJnvzVV2cpXzCFwjjqh8yh+IY2lKuo8ciJPWiOMNCLmZCv2KtEYlFZ8UiGNGXLFIv8fxM+KE0pRNt9KUgFzOcmPMjDg6WQ0hA3vCjDgiWjFxjThmxBVGQ1jPaNLmtaOsmBF3+sgZcQzEFcU85SkLlxHHQBxRoZVm5tEAeKR+J/f6A2k/91q1ndx9b2jdP2J0Xve19NfyA6N5xJmu4x5G7k9Cp5ku9hrGuu9kMnUf6K83dIPJKPXF0pTS5NX0vjKDaxXd9rbStnObz4ZXVNsa2Juqdld83WfLFG0u+kPVtp5sXVe1+/L6Rec2W2P3SXcA8I3FOMifb7Vpcug5NQ512QoV3/0XqFd0QYrMmqPfJBh69ug3HeDtKG/8+7o+clCXB6FVIaCzlbYQT3b7hariu7y0mb8WzSNVfv/9se56kymuAU/H7ucaANQC3TGZph7Gfv7cqUYJ0lTe/zQ++hiR1ohbWEZcMoRtHr1OnE3cf+9QcR0FgHiO72RRPMXnAoCk7d6PAIDRbQ5TxVhmOtBNhngbmispcLEtB3W7Vnh9Dei2x7ja6am2tR26H/8A8FvXH1e1Cyruayk2A91kwsOd/D3DPLqBLoBwI9pwbtP08pNv86gb3XfS8tz75JZyH9fscieGr0WbS9tWbHV9q2fc+7tuVXc89mr5rJl5JIoxQrSR71c74QRJ4/DPa1Pl+DPWja39RHe9iVL337sf6a4bWzu6+5Sve93zM/8uGOb73Z1GHa9rbqm2lVjdXMIzd86r2u3E7uNdzbkGAI22rr+rKq5tofJBmp1G/rq9vhMC4eHHqT/W/W5Zzf27NJHu3DbK5ZYT3VAGSct9nOYr59IOG6c9nG3nXkvXPVzt9PDV6y84byvMdMfWjdG6qp1WO3A/3/qJ7v5Xqx2EiBo+sHP4+9K6d/fzvDpeU20rUc4JW2FOxh9muD4+/PcMU/d7onY1/2DHPF7su4+RAcBq5kkyXf+TtXQdkLmj6xOMYgyUNnT3ltGa7ppYv6X7bKliCJR0dPtYVqXPiAuENeJilqY8s+bNdAuFgB3RYaRjhmvElY+XZgiS/YOYzABxhdeNIhEz4o5ZmrIrlqbUZMTl7/g3hixNSSerLpRCZnY/Ea2amBE3YUZcUTSljDiuP1VafWGdYp5v5dYc8RwtsnnmDVeZETcWjpUG14gjKrRTGYhLGIg7s6R1vMT3cfKKHInZllwjrnSqSf6aEQYVfYoNnYhIWCPuOKUpgyRBZ7r/ycvMGAwa7k9WbgvlLLtDlqakk1UP88e/dF0iIlqmQTv/6DMDA8UhlShjacryYmnK00cKlkvBFVqNsHF05thKA3FNIRA30WWuEdFylD8QN2FGHN0jreMlvq9WmqqsVBBS8FaaGKVik7IYV7nAMskWnRG3LqwPt9OqI/Pch0FSacqNEQNxdLIaU+GJaQbiiGjFBu18YGB9zEBcUTSYbXOqSBlxPN/KTcqIG7d5jhZFOMf84ioDcZOGlBHHQBxRkZ3KQFzUYCDurJrU5hu0MCOOXIVC8JalKctHCuaEQvYVrZb0mxwnI25DyFjrtXWLSsilKRmIo5MlPfgRcixDRCs2szSlPVvrfRRVcyhkxLV47SirnZZQmpKBuFJjRlyxzZcRt7pp9UlDqNo0iXkNJiqw8gfipNKUDMSdWfNnxPEGhNxIZU+ZEVc+UjBnqlwwnU6OGIg7RkacFCjbVgbipHXlusyIoxMmrRHHjDgiWrUk8DE+MI7yrUX7QDloWgFrUZcy4phtU1pcI+70YUZcsRW9NGVW8XLVvjwL1Cd8YJyoqEqTBmBgsVkZ5l5vhflBfrWV3H3vlWDbeVvvaX3FfQcBxHa5X+coy9fkP0ovVU48JvmJx3mMU/dBRC/R7eMz/QswwXxPftiWQae2e+x8eXhRtb3fvvm4c5tbA933+NVXXlK1q3n5QPU8AkW7S9W+alt1TzdIeOv6y85tXq2vqbbVixoI1vLfSWMaYa02uy7/beXvnWXug7nrt7uqbWmflbKZ+5pqtW3dOmzjq7rj2Pey3GuNNH+zE1V98b0uHu64X2uA3WNLY6vXVrXTiDzdzUUY666JWWYw9eVAXJZCXM/P+IcfyRtjISOuow3E6TPiTNM9mOj5umMznirHJIpOIW7pehLlJQpJqDsm/bb7fjbWdGuvTCLd8XWhORJfb8f58W5jPcGF5ghv7bhfDwHgVq2javfCrTeq2mn0rO57fHlbd73fXJO//6O88/x15zZfGF5Wbev6aF3VrqK4zmnGgwCwWdM9nFDzdA9cPN1zH8tHqbIfUY4XppF74Hy9sdy1nzTHCAD0mw00o/3j+bVggOH67O/Y39I9SPCyv6FqB093naoEinPA6LaVbOnGhF/uXhBfr4UxKun+fYkCH18YP4CH1nuqbX39xjOqdk92bqjaaeYSrk27qm09cf62qt1O5L7e8GjsPo8DAP01YU3G6QSoHn7upsu7bYBJdPd7fqRrF63pzjeruASMtnXn6NXXzb5PPDfJjzuaGxGuNrZxJeg5b0szRwgAFxr5udZ5RIp5CwDwFP3kJNVdN4ax7jtZr04RzfGgf1Lz4e99HqPt/60uR6bux5g2g9wD4hvhGNud2efU7bH7PNXFlu4Y0Y6ttwfu9wCpck7e1HXj3bij2161797fZVXlzJ0y/Spt6LaXKb6StH62MjhLnxFXmeYHHUm99B+LlKxn5lr/TVp/iOgwcdXHwdhTEGfwkuMFcGi5akIWI9eIKx7reYgq+Ru7qjIrrjuQMuJ0N9M9ISNuY6S7wSCal/Rk63SOp3SJiE6aVC5vfcQsnVVrjfIPcAybuglpKoZ+U1iTkedaqdWF8rHTNiseFEXRM+IAYNIUylOOmRFHVFSlj1hJa8QxEHe2SWt55d4zZwlLoruMQSiUATvOulW0fFwjrjwWWZ5SXCNOmxHXzLfrco04OmHSQwRcI46IikAKDnDdqtVrjYWSd02WvCuzflMqTbnczFlaLCkQN2EgrjDKEYjL9+sNof8nomIofcSqIgXimlwj7iyT1vLKvYeTV6QgrUFYmzIQVyZS4DTkGnGFJGUuawNxckbc4taI2xiNuSg2nShmxBFRUTEjrpha43xG3KjFQFyZ7UiBOAa9S63BjLhCkx7EPiiurToQx4w4ojIpfyBOKE0Z1xmIO8vmCcQxI440pOyD+pSDnDKRShuyVG0xiRlxygxUMSNOGYgLqwEmB4K3QZqhPc1PeBEtivTQxzwVAIiITpqcEccsnVVrj/IZESxNWW5iRhzPtVKrjRiIK7JSZMQ1pEAcM+KIiqr8gbgJ14ij/aSspYM4eUUaUgCXGXHlUpcy4gI+vFFEiyxN2R3mnxbeVpamBIDtVr6tFOwjWgST2dwi7MB8T+kSEZ00OTjALJ1VEzPiGIgrtVG9itTsX7S8EcXqNZRp9ZgRV2xlCMSNhUxnZsQRFdeJRqyMMR8xxtw0xnz+vtc2jTEfN8Y8s/f/G8fZRmUqlKZscFL1LJtnzZR5suaIDpICuLWQg5wyqQqBuIgZcYW0yNKUG0Jpyl47P3E4L6msZXfEQBydjKoUhKv6sJ4R3k1EtFwsTVlM0hpxLE1ZcsYw8H2K+HGGINyfWJD6BhHnMwtjnkpayYoDcVOWpiQqlZOeffxpAP8AwD+577UPAfgNa+2PGWM+tPfn//Gof8gCiG1+dwMhI25Sq959743YPc43yPLlNeZRN7oJwktBT9Wu64+c27Q8XeksbbtbSce5TZjpDksPu+vzzHOxjGr+3fd/8dYl1faiyH0/k1h3kf7M9YdV7aZj3c1Wq+Ne4sJa3YSg5+XP4Xm84dwt5zbnau7nzP2SZv7ZhXPpCN2qfPNjjG7NqMnLbec2trLc9akqO+7HsvLUhjfVPTMymeYHpd44/z2N/Nq+977hAfdj62pz27kNANR83XXji+MrqnZBy71MxVpTV/JG2yfc6e8GuaSMuGqYiEuxZcnhx4i0RtydRhtQnqO9en6duHO3pvDOH35e+KH7SRBfdm4CALCp7ryp7LjvY/2mMiCkbJa0ddfSieIJ4yTWdVzBncXdlNeEQNz0vjI0r6+/qvp3n2o9p2r32JPufSQA/Padx53b7ES6gPnNgft1FACSVPe7henyHuh44eVzqnbGW944oVbXTQBd3eip2vWn7lk+g21dVrT2e6zU3K/300j34KB2ydJaVTcm6beE0pSTCXDId2WVXaRNlReOqW6DtUvuY6As0+1j7CvHJCO5n/S28/dYd6ot9EYNPD3U3f8GnfxD0PO4GPRV7aqKYzJVPmt+e6q7brQC97H11Qu6+4abgzYGndru2sT3eSDYxnRj9jE+uuY+JwMAlcHyKk1p+4TKWHe+JS338617fqja1qz5rXY/379MWwFCGwAW+GT/9c7bavi6koSvTnTHyLm6bn6lU3GfX3yksaXa1o2wq2rXrYzRXDv6+2w1Q1yu7QAAHm7cUW2r6el+t5tRB8F6vl9+MOnh0Zbu+5qlWVlucE81BlKm+cSKe3QAh45zDpMqpmnrt3SdpLZvNbrLPTLFvGRleLYeLj3RK6u19jcBHOyJ3gfgZ/b++2cAfPdxtlGZCBlxTT5BcpbNt0YcM+LIXSSVppywFEiZSBlxUsCHVi9aaGnKfCDuOKUpe0Jpyu74eIF+olnqk/yNL8cxRFQUfSEjbm3EdatWrS2Uphw2WJqy7KTAd4fnWynVhbKUmofG6OREc5SmTGqrnX8Om8I985hzVERFtYrF1C5Za1/e++9XAOgexwLgxRn8ZH+0NfOANDhb0VTab57137hGHGlwjbjyq0fChDZLUxbSwtaIsxZdoUSWVF5yXtvNfEbcwaeTiRZFyojjOIaIiqLfFDLiWCpv5doTIRDHNeJKbyBloDIQV0pSII7rwxXLPIG4uLqKafV7wlb+mKmPWJqSqKhW2mNYay2AmXmLxpgfNsZ8xhjzmSzNv60S5sstJHUfMAzEnWX3l2uahRNYpBEJx02VgbhSkQI5zIgrprCa78s1i9F3JiGCdP94YVINxH9/XmJG3IgZcXQypIy46RwTA0REy8A14oqpLQRnGIgrPzEQN2QgrowaUiBOCKrQ6syzXh8z4ojIxSoCca8aYy4DwN7/35z1Rmvth621T1lrn/L8fHBNLEvZWO3TCLR607ky4jjAIXehMPHJjLhyEQNxzIgrpKmUESeUFj2KWJbyGNlwALDdEjLixsyIo5MhZ8RxHENExSBnxDEwsGrtSX7NIZamLD+Wpjw9pEAcS1MWy1ylKaurDcRJwdvaiHNUREW1iqjVxwB8cO+/Pwjgl7T/UCAE4uI614c76+aZnJoKJQaJjsLSlOXHjLjykAKkmtKUG0IgrnfMQFyvKWXEMRBHJ4MZcURUZMyIK6aOEAxlRlz59dssTXlaNIb5YDkDccUyX0bciktTChlxLE1JVFwn2mMYY34OwCcBvNEYc80Y80MAfgzAtxpjngHwLXt/VqlMZ5SmpDPtqMmpzABxwOOE3LE0ZflJGVVhwBueIlrUGnFSIG67nZ80dLEtlKbcYGlKOiFcI46IiqzfzF9TmRG3eq2xtEZcPohD5SKVpmRGXDnJa8RVV7AnNMt8a8StuDRli6UpicrkRO/irbXfO+Ovvtn138qsh4/ffvO+197wyiv4AD6177WtoLXvfb2p+2TbKNJNym5vt1XtGq38IHkenjdzeb2ZWrX8UzfzyKxu3b1e3z3roN063kByy+RLht1vWgtwa3zvtxpd7+g2pPhKrO/+mwHAZKw8VRPd7zaM3X+3SkN3sTdePqA+j88+/7Bzm0qQz6Kdx+b67gT7Dazn/m68U8WzvfNiuyTRDcqqd9yfkciUh4gX646RYOjeRruP1R3dMyPTdv6p32CUP97GWR3x4N57166690GXgr5zGwBIMt1n+7o3P6tq5xn3861d0V03bk5118RXX+4CACYQfr9hhngk3KAechh3d/JP5W+3WrCJB29D99nunM9PgnSnI2Ttw/uYrOF+vpmBbkziT3THluIQQaqd19NdEuGPdJ8t3HIfE/pj3bYqY13f+sL2Ru61d2zlj/ktr3X3vc9vXFBt60+0eqp2TzWfU7W7Emw7t4ms7jq6kx4+Fpzl1XhN1e58MHBus53o9rH2Bt14K1RchDerugcMAqMbb4WZrr97Yu2Wc5vRA7rJzlGia3dr4n5NjFLd8b891GV991/R3RN5oUVmDDx7r1NvhREqdzwkvvwZtPdECHV9ctDTfZfeA+77+cQF9+MRAKoP6M6bTkWeS7iY5Afrjz+6hepDFoNElxn3/9x4q6pdqhzvvu3cy85tap6uj3yp11W1G95U9OXa4z8zuG3z16n67RTDW7P3w1POCWhoxpEAULujaxfrbjdgPffvZDDUPcj33FCeK/iaO/nx1MvVtbvv74fuA+w3dmeu/HOoy80dVbtpqrtuTxTttGOLS1XdffqLk014c8xVvYo13Ax3z8vWjP74KGlFnyMTNqXSlIdnxD3QcB+3Xht3ndsAQKwcy3Sb7pn1qXLeOrqpGzdpe1bNcLd+W7etxpauUx4+pDsmvYr7txKcseeZS72gmlQSTiodR2fL5IjSlFOuq0JKk3r+2GlMdRP5tBq1JH/dmDIjrpAWlRF3EmvE9cSMOJampJPRmApPTHMsQ0QFYT0P/Ya0ThzLU65SUyp7J6wlROXSbwilYHmulVJjnD9Hxy1mxBVJVvEQB4dPm686I24qZMTVmRFHVFjlDsRx8XoSHDU5xckr0pL6l7rQD1FxcY248lhYIE4IkEmBNBdSIG+dgTg6IfUwH4ib1DlRQ0TFIQUH1iYMDqyStEYQA3HlJ63JyKB3ObWEYPmoxXUciyY8ojxlXFt1IE7KiOMcFVFRlToQJy1ez4w4krKW9v09A3GkNBXW5KkJE6RUXPVIuG4wEFdI0tp9i1sj7niBuH6zgczsL7uwNglRSXQlU4gO0wjzEzXS9YiIaFV2hHXimKWzQtaiIQTipAlbKhcx+3TCNeLKqDXKlzFkRlzxREfMMa86Iy5sSmvEcY6KqKhKHYgTM+IYiDvzjs6I4zFCOtKxJWUqUHHJGXGclCgiOSPO/Xw7iUBc5nnipKOUfUd0XA0pI67GiRoiKg5mxBVLbZLAy/avQxbVfKTBaieM6fj6wviTGXHl1BwJGXFtju+K5siMuBUH4uK6j+zAmodBlMGLlQs2EtGJKncgTlgjbnpENhSdfixNSSdFXiOOgbgyqSXC08HMiCukk1wjrteevaD9vKRgHteJo5MgXWeOyv4nIlomZsQVi5QNx7KUpwPPtdODpSnLITxkzJ36BlllxdPqxohZcXVmxREVUskDcUKJMWY7nXlHTU6FVd6EkI6cEcf622XCjLjyWFQgTgqOHTcjDpDXmZOCfkTHJa4Rx4eKiKhAmBFXLPL6cMy0OQ3E0pTjKWCt8G4qsuY4H4hjacriOSwjbtXZcK+ZtoT7Zq4TR1RIpY5azVOaMrUm956j3Lne1e2Qpxv8jOP8jcs8atfdJ2FubujSk21Tt+6NGbtfmMa+bh+zbPe33soOz3QYVmoYju89aWQbyjV9FL+3V1F+trHuVDWZ+/EPANVOvl75UTpN9zYAsDPM30zMw7vpPkiNO7rf+vZeP/JKtJb7u8o4xe1eW2yX9nQDadN1P060v7V/R9cuU8wBK7pjAEBFGdswYf5ZEylwGtnqvvd2q+4TR081n3NuAwDf1tZNUgVG15f4cO+3rqfy8X2U3xi8VdXOe+PuPj700nbu7y7Xe3j3G5/PvZ4dcnBdCvu51x586x14VzMMIl3/k1gP4QUfOPCzv6l1A1uPzg7y9Sbu29sZ6IKG2v4nSxUnaqZ7ritp6cZNZolzTanyuiF0P3OZ3MmPYSr9/Ae+k3TQ33vvb689rtrWONUdIw/W8ufmPFqe+zihbnRP8z5SvbXUdhpZVXeQvL3x0oL3ZLbA6CZxYqsbt95KOqp2Hd99faTY6ibPRpkuW2E7cc/Efmm6odrWZ5OrqnZxT/f928BiUBPKNQ+m8CL5mpI2dOOYykD3u3nK+chUcX1LlMfWuuI4BoAr9V7utYeiO7nXkjXv7ntrynN7mugeAvnKnXOqdtuR+zzJA/WBaltXuz1Vu+cT9987flFfmSG2dYS+j1p6b3xSTVO07mSYVhcbxAmG7mNCo5xaiZVfiVWOtzRdeaqck2lW8gE3AGgLa8Rl6+bu+9++ft15W+NMdwyEke5+o+rpfvCHG/k+6iip8sd+ZnRR1e5SbfceMmvOPg+SmoducG+y4mJV1/8MUt09abuyewzFrXw/tBGOEVfkY3YN7tcbzdw6ALww3FS1q/nu16nxSPc92pbumpjWdMdkcMu9L4l0Q0JEG7p91M7d1XrubRJdSKS0Sp4RxzXiKG9SP3zwMWFGHClJGQjS2j1UXNIaY1LmFa1eJDxhWI3cb/Y6O/kbjcG6bpB+v/6aUBqoz6f/afEaUX4Ch2MZIiqSHWbEFYqUCREKGRNUQsZgwPPtVGgKpSmZEVc80SEZcUlB1t2USlMyI46omEodiKuLpSk5MXHWHTU5xTXiSGtSyw+MG6H8hBsVUy0R1hZlacpCkkp9BI6BuEqconmgPn5mFnOT218TSgP1dU+zEx2mKVxnpOsREdGqMDBQLLWhEIhrMxB3WrAUbPl5aYbGRFi7vMn70qKJGrODbYkyI2rRIpamJCqNYvQaSlJG3PSQpxXobDhqjTiuq0JaYkZcxIy4MpHXiON1o4jiav53cQ3Etfv5ki+jTg2Zf/zhz866MAnCjDg6AXXhOsOMOCIqEmbEFUt9KEzwt3ndOC2kdeLWeb6VSkNYx3HcDBZyj0KLddgacUlB1oiTMp4ZiCMqplL38uIacTVOqJ51se8j8WYf2lNOXpGSNPFZZ2nKUqlLpSkr7BOKaBGlKcWylEImmwYz4mhZpIy4MTPiiKhAmKFTLMyIO914vpVfYyxUO2BZykKKDln+SKrgsgphKz+fUR1znoqoiModiBPXiOOE6plnzKHlJ5kRR1pRUEFm9q9aWktS+KlyVWpaKi/LUD3wW2XGIKoUYwBN+8mlKd2e7OsIgbFFrA8HyGvEMSOOTkKTGXFEVHA7TWHd1DGviasirxHH68ZpsdNo5l5bm/BhsDJpcX240oiE9ddeU5TSlMyIIyqPYvQaSsyIo1nGhwTbmBFHasaIx1aDWXGlUBXWhwsrFeBAcJWKIaoJgbjQNSMuX5pysF5T79P9mBFHy1LnGnFEVHBihs6UgbhVkUtTcp7ktBgIpSmZEVcuTSkQ1+bYrojCQ9aIK0pGHNeIIyqP0ozGMhi8sL2x7zUzsLn3vRBv7HvfZOJ+MfPX8hfFuVjdZG6W6NpF5zLnNkFPF3uNau7bAgCsuwcoWo38xOk8ouTe4XxYRlzc8lGr3bsoXegOVdt7fP22c5tHm1uqbWmNU91gzjP5c+soN8OOalufia+q2o0fcD9OtE8e+P69439aC9Ce7u8jWmmIiZ//rtOq7rzxuu6DpnSkCzBP67p99Ifu36bJdH2dNsxZObCPrXH+s4aVSu59Gi2ju25c8HXff2rdz1EAmCqaaT9bYI6XKSqtETerNGUnkPuDC6N8/z7tBnffnymv2y+P1nCzme/zOjtTjOPZ56Kn2JxVnjd+V/e7pQP3viRpqzYFqxxbWOXvBk2fHOn6By/U7ePaufwx24rzv2VwOUa3ufveMNUN53/92ptU7bRaVffePPB1/chjHd14K/B02+vH7pm23UA3adrwlfcpCtp+vKncx9TqzrdnJpec2wxT3UMZSaabdIsVn+1O2FJtazTVjf/Ttq5PNrFBryUE4sYTzPrYJtb91mlDt49ZoOuTTeq+n0+/clG1rUyxLQB49yP5a8A3bedfeynYxDPD3X175/qLqm19w7lnVO2e2nhB1e527D7AaPu6uYSv6ur65Irnfkx+WTeMR6Wyu63RA/l+aLPeQ/3RgdhuMtb1CZqHfprXdH1k0tZ9KXFHOU+loZgjAeS5ldY4f5yO29V979UcyzVPF3w5H+jmxLQGqfu46QvDy6ptab+T2O4ey5P67PMgqlbuvg8APtfXzW01fN2MxyTdvW97S+VG7u+GvRqeHsrjIw/ux3KkHP8MI914K0zc72/SsTLEESvHCMr79Ew1dae8cCjp9hGwip+gMjpbD8aXOiNOXLyepSkJhx8HoTC5SzQvqRwYM+LKgevDlYuYEee4RlxLWCNuuKDSlDud/L+zPmBGHC2YteJapIc9cEREtGwz16xSPjhEx9McCZnUTV43TotBS8iIG3EMWiZiRhzLxxZS1Ch+aUqpf29wjTiiQipGr6EkTkyw7CDh8AkqrhFHxyE9ESX1RVQ8tUQIxAUMzBeVvEacWyCuLZSmHC6oNOWOsEbcep+TjrRY1TiFn+0/pqKKj9Qv9RCeiE6ZKAh2y33fJ8gyNKPlZXDSPY1RfszLsnenR7+dD8R1GIgrFekcnXCNuEI6NBAXFKM0pRSIqzMQR1RIpb6Ll7JQGGQh4PDjgE+R03FIGXFNYf0eKp5aLK0Rx/6gqCKpNKWwNuxhTjIQF9YCTA+sS1tJM7TG7A9ocZgNR0RlsdOckRVHSydmxHGS/9ToCxlxDMSVC9eIK49D14grSEbclBlxRKVRjF5DiZMTNMthxwGPEToO6fhhRlw5VMWMOPYHRZUE+SFKkGTw0vnXgjjJQBwA7HSESccBJx1pcRpTYTKVZdiJqID6dQbiiqIhBuJ47TgtWJqy/KRg+ZjB8kI6LCNOquCyCpNm/tiRrgNEtHqlDsQ1pgzEkeywjDhmTdJxSMdPQ1ivkopHXiOOpSkLyxiEws1NJZ6/POWJB+JmlackWpC6kAXKsS4RFZGUEbc+5jVxFZpSaUpO8p8aYiBuyEBcmTAjrjzC+mFrxBUlEMeMOKKyKM0MpGcyvOPS9Xt/TjJUk/2TcZkB3nT1VcCYu69NU/fJimGsm6RLs+XGNY1xX4em6rutr/MaD7o1bzZqY+c2l2s7qm3dijp3/7uzOfvpjzc8dAvNK/e+h6/qvKja3rsbzzm3ueDlJ4Xn4Zuj3yOZWl3D2Lofy7fSlmpb39g9r2r3xckV5za3w7ZqW69O7x1b/lr+XHistoXrl27lXn+x1lVtrx64ld8DgLGwdt084lg3eIyMop9UdpFmomvoT/e3q6ZCacqggoOH++3Q/Vj+N6M3O7cBgGGav5GexzPDi6p2Ueb+e79r/SXVtmKrO7a+cPOBu/89DQLUDqwL9/z187lMtCyT+zp/K3/N++T0UTx9bXcbnqe7tkVPrwEAbps1vAE39/2d9x9q2BrLv0/SmT+b7662e38AQHnVBvyB++/mRbprjb+lvHlVfrjwvPsYyKS6z1bb1rV7pLu978+PvbqVe0/a8va976FmT7Wt54bnVO1e7q+p2g1D9+vURNEGAG5sr6vapYnuenNxY+DcZhzpAqr9QVPVzq+4H//xzXxgZR7BRV3wZaPjft8AALd77uO7tK+c8AwU/TgAxIpjS3Gvdywt5fUm2v1s0rpV7WyErJk/9sxU1/+bGdf7o7Su685t85j7d1JTjOMBYGtbd5/y+9eu5l6rDvL78MmdR9FPd8/pcaI7/l/Xzl+T5tH2dffAGxX3PmGY6uZynhnoxtZ1332y++L6ULWt3mT3HBsKAZu18RSeJ/dP2UD5AE/FvQ+K27p+y4t157ZJlGPQ0L2dPa87jqW5tNYo/29N2sG+9/7B4EHnbb067hz9JkEr0GVPVYzymqigmVsEgIqnmwN9cbQJAEgx+1jZMq277wN25601Wr7u+++Fu336rSB//aiOkrt/f1Cz4r497VxyZUa/dBTN2RZ0dOdoPNH1kTZU3ssqxneJbroVnjIeq+kjAaju02t3ljzeXbHSZsRVw3wnENUr+4JwdHaF9dkdYnTIEy1ER5GeiKo5rltFq1FLhMwSlqYsNHGdOGGtv1m6QpnInlBOUqvXzI+Iu2PdTSKRpCaUPg5rHMcQUfEwI64YTJahJZQ1HjWYbXNa9Nv5c41rxJVLQ8halcoL0upNDykJL92rrsJIKk3JjDiiQipvIG4qZDYwwEJ7osMuljxO6BikCdCaUCaXiqcmrRFXYSCuyMJAON+iOQNx1mJ9KATi2rpsEsl2M/9vdUejhf37RHVhvHvYhAAR0ar0hUDc2pjBgWVrTSN4dv/T5aNaFalfjBJqdHyDVj7rrzOaAvZsZRWUGUtTlse0pGvENSdcI46oiEobiJMyUCI+IUx7okMy4hiwpeNgRlx5cY248hEDcXNmxLXHIYJ0fymMSbWCcIHra0kZcRvMiKMFqk+EfovjGCIqIDkjjtfEZWtPhPVxm4tbH5dWLw4qmBzIxPEzy4n3EmmOGIgri/DQjLhiBOLGQiCuPokZnCcqoPIG4iZCII4TE7TnsEmqw4J0REeRMhHqzIgrBak0pRTooeIQS1POmRHXHeQn/3qdxWXDAUBPyogbMyOOFkd60GPK8S4RFVC/mV8jjhlxy9ceMxB3FgxawvnG8pSlIWXETVoMxBVREvhIfHnqPA6KMbeY+R7CA0FBP7NiZQ0iWq3SBuLE0pSHpAzT2RLNOBYyzyCtlPawpwIQM+I4wCmFmpQRxzXiCu04GXFdqSzlAteHA4BtrhFHJ6wmZcQ12G8RUfH0G1wjrgjak3wwZthgIO606bcZiCuzBjPiSmVWecq4IGvEAXJ5ygazZIkKp7QRCWnim6V66DWzsiOjug8Ys+S9odNEKmvHQFw5iBlxXCOu0ELh5mbeNeKWkhHXyv97G8yIowWSMuKktUqJiFZNKk25xkDc0okZcQzEnTpSRlyHgbhysBbNUf5Bq3GL96VFNas8ZVFKUwJyecrGmIE4oqIpzZ28B4uvWX/+7p8f8bdy72m2o33v0cqsLj4ZGN1kfN3TlbWrG/d22m1pP5sP95rEo0x3o/A588jd/6535M+Z1H1cqvX3vab5HgHgVtpxbnMj2VBtq5fqJo8jqzvFM+serNysDFXb2vR17d7ZfMG5za3qmmpbn/eu3P3vxlp+MLOZjvBgs5d7/dWh+zECANPY/XdLEl2/FY11A35/4D7ozOq6GuVepAueV8b72zXH+XM9tkHufV+6fdF5Wy/2def2Vq+tapf0dU9Mrj0wcG4zTnTbSjPdMTkZ3dvexBOOz8H+9wBAFuePx9bN/HVrq97GpH/fxEWiO7a8xu6xvLWR75vXp2OkDflYD3ru34l3S3dNTJq6883Pz98d6eA5NDflsgXTC9nRb5IoDkl/qPtsiTLmu1bdP2m9HuUnsW1r//uebF1Tbeuxxi1Vuz9qXDn6TYJboXt/5ykPkhd6uj45C3S/t2YvjdF9tlZLN9E6Dd2v98FFXRCl1VB0JAB8T3du+757u+r55WYvVyqpc5t6oLv/urOTz9aeRzrR3TeY+u5n63fz44X1cHL37/e1aSofYrutuyYOH3X//gHgTes7zm02a7pj61lln3BwvHUpy4/1JmsB1u7rO56+6T7WBYA/fFHX/0vjtHk88tBt5zZrNV0f+ZXb51TtphP3cXIWKSfv43vXqO1K/ppauWYwOJ9/3ZvqxuS1O+7tlNNG8HSnKLxQ99mM4nKTPK7byYP3UvVxBC/bf76HVR8D0wDu+/52onyw9SjDUNdH1nzdD9eu6a73mvkm7ZhwELt/j8D+739cq+Ic8g9b9rzGvvdVlOOYKNNdf5uVe3NTYTP/b2yGY/Qq+THBTuReJSZOdf1WraI7tgZj92PZ85Q3l5lyTkDZt1Z33NtlgfLefqq8t1He3mv68kQ3bC2t0mbEVab5Xzfm2l+0Z9axwPXh6LiksqdVYc1KKp6qmBFXmudRzqTjlabMT4Rtt09+jbiNETPiaHHEjDhWgCCiAtppCaUpR8yIW7aWkAEx4hpxp460JiNLwZZDk2UpS2fW+szSeuarMmnmH7hqMiOOqHBKG4gLJvlAXFIv7cehBYsacsCNwVo6LmkCtCo8GEDFI5WmnHKNuEKT1vCbNxC3IQTipFKSx7HdEtaIG3GNOFqcmvCgB9eII6Ii6gulKTtjlspbtpZQmpKBuNNHOt/WJgzElUFLCMSNWjxHi2zSkAOlRSpNKa4RJ1QEIqLVKm3kqjLJp/zGzeJ0grRaswJu8YwAHdG8pKzKKteIK4Vakh+IRlwjrtDkjLj5biikQNyiM+L6jTrSA+uOdsIQgRD0JdKQri/M7ieiImJGXDG0R8IacQzEnTpck7G8mkMhI67FjLgim8xcI67YGXFcI46oeEobiAuEDBRmxNFr4hmp48yIo+OKmBFXWlIAh6Upiy0Ubm5q0ZylKYXMtEUH4qzniRMh62NmxdFi1IRA3KwF44mIVmnYyAd7OtMQfspx8jIxI+5skEpTrjEDtRSkcoEMxBXbdGYgrjjzi2MhI67OjDiiwilt5ErKiEsYZKE9s0pTSkEUIhfSMSRNlFLxSKUpGYgrNql06LFKU7YXvxJwTyhPucHylLQgzIgjorLIPE8MDnSEwBCdnLYYiOMk/2nTb7A0ZVk1h/lzlIG4YpsViIuD4ozJ5Yw4BuKIiqa8gTgpI65R2o9DCzarBCUz4ui45Iw4BuLKQCpNyTXiik0uTakPxC06Iw4Atpv5f3NjNFr4duhsktaIm3KNOCIqKDlLh8GBZZIy4oZcf+rUkc41loItB2mNuHGbgbgim50RV5yHelmakqgcitNrHCGDh9/afuLun1+/fTP3nqftA/veAwCdivsTeInVBfQavu5pg05FV0Jgs+I+0VfzlvtERKb4LlOYo98kGKb3bjCiqrzdSTXY9z4A+Pidt6i2tz11n9DdCfMD5nncvLmuamcj3bHst9wDS49c2lJt61xdN2FdV5xvNU9XGuf+PmFYzd/IVqcpoizfnbaquoHPdr/r3Ma+qju2gonufAuGunYqVtfs4M9dD/PHdZIEqByI14yf6Tpva9jIZ2nPwwa6dt5Ed24Pn3PvS56+qDu26g3d8Z9N7p1LU5O/Ka1Osn3vAYDqzfz51+3nJyOGozVUb9y7SfES3XFsvXsH5U4tnxF37vYEwcX8v+1F7tuzypGar0xAqPbc99Eoz9FgoGsYr+l+N3/q3q62rTxGlF3kNN1/E12d5K9bg2o99z6NK8G2qt35bl/VbmrdJ5liq3uA6uX1rqrdq+Gaqt2Ves+5jae8uI2z0ztZlylPnPbl5WVc+UZ33dbYSfIZL/P4TPVhVbuXbm6q2nXa9663w04NuL3/7x/0trHT3f9ZtL91P9T1CWaqa9etugc23tJ+WbWtt3Wuq9odPE4eTvN9+xuu3gQevDd2fHZ0QbUt7bXn5qitarc1cr/fDnzd/d5aUzcnk6buY3KvrpuTmdy+933sNPLfzdpkKg5ANOMfAEhr7tcpqy0+oR5L6j5bsvhn82aq+vvvQaUSotN2kHvfRs29woZV9q1xpusj+5HuPlFzDYhS3U1RvaKcp63eG1ukLXl/g1a6731VT/eAdt3XtWvdN9/tr+VPonPRGBfqQ9W/vWobHffjv6bs/3dqumOkF+vuGyLFvaz2flvTjwOAp4zhepp5QuVnK6vSppBVhQnVkGUHaU9cm1WakhlxdDzMiCsvKSMuYmnKQpMy4urRfAPljXE+yN9rLr40pTQR0p2wNCUthlyakv0WERXToC2Uphxx3aplqg+FChAtZlKfNtIaxcw+LYfmkBlxZTOrGkVcoIw4qZ+vjViakqhoShuIkxevL04nSKtlfQ+REIyLeYzQMYUNoVSeUDqMikcKxIUVTkwUWahcIy5IErTD/RkSmTEY1HVPbR6mJ5Sm7ApBQCKN+lSYUOVYhogKatCSAnFcI26Z6iOhpHGb493Tpi8F4iYMepeBVC6Qa8QVWzizNGVxHvSfCqUp61wjjqhwGIijU0taD44ZcXRcSeAh8/anW1eSDH6yvFJFpFNLhesGA3GFJq4RJwRUD1oXMtJ26g1k3uKHPduNfJZdd8yMOFoMKSOO410iKqq+EIhbGzJLZ5lqQkZc2OZ147TpN7geY1mJGXEMxBXarIfg4kIF4oRKMmM+ME5UNKUNxImlemocYNI9UtAtErKZiJwYg1A4tgKWpyy8aiIF4tgnFJn0+8yTEScFwqTMtUWQ/t2NCTPiaDHqYiCODxAQUTHJpSmZEbdMLE15NgyEQNz6ZAov48OhRdccsTRl2UilKaOqDxjlotAnQOrnGyxNSVQ4pQ3EMSOOjiIF3aQsOSJXUtC/JqxbScVSZ2nK0hEz4uYIxC1rfbhZ/y4z4mhRmBFHRGXCjLjVMqlFbZLmXo9avG6cNqnvY1Cv5V5vszxl4TEjrnymwkNwRSpLCcwoTclAHJ1hxpj3GmOeNsY8a4z50Kr35zWlHZGJgbgZC2jS2RQJE1XSa0SuwkYF2N7/2jf+4pcx7O6ffLg91U3894b5mv9HsX1d/+fFuqe4/GU+3Gx1zcyBdmvTfHAk8tknFJm0Rtxjr9zCB/7tJ/e9VhnsvxF64ys3cu2WmRH3+puv4Ht/73dyrxtNvF75yJRVtqssMYboK+eKwlcXux+H0X4fym4LD9h7H87LLIIDZY9T3yAJSvscHRGdclJG3Du+dA1/7ld/f99r2j5yOswHHuZhlOPdR5+/6dzmcn1HtS0PukymSXZvAj8I80G4aasC6xUna4MWp99ooDPdf1P2A//uk+i19o9N/bFu3GAUh6R2/KmlHUtmmlvAF3Ube7C7v0+4dKOfew8z4opNegguDooViAuFQNz67TG+8aNP514fxrpr6TLtRO5zYhVPeR2NdHNpk75u/XlvqugotQMnJU+ZZ+Ap5gm9/NAl51MPP4E/3rw4979pjPEB/ASAbwVwDcCnjTEfs9Z+wX0PF6s0M5BZZvDp5x65+2c7yA8mP3PrYTwXXNj/omLMaXd0F0Fbm+PoERsucWDs685eM9ZdZEzi/tmaj+QHJvNYa+wfGG35LVw9EC15OrqIT996eN9rt3pt1fbigeLipfz+ESpHtJ5ue+nYvWt4/kuXVdt6XtUKsBXFZ1N+/83N/TOxO34DFzHc99qf+fDnVP82rdZUCPQEI/d+K0l152iyqRss2kDZl2huqGPdZxsPdYHo+yfLIuR/nzdffwU/+rO/6Pzvbm00EZ3bf52u3tZd2+6b88LWev5zPrp1G3/rl39B9W/TGff/Hv7X01qA66Puvtf+dfYW1aa8g08rzOlcTVd6ta14gqOmvAsM5rmjE1yq6cagGr5mhhNARznr6Cnu4DPNjRSAVDkTq/1OAqO8B1PQfrbYul9vtJ/rQmN49JsE/TXdxNz29r17qVe8tdzfv/vzL+Ldn39R9W/T8YWtSq6//6q1a6p/S9snvNJcV7Ubpe7zMp3KcjPCel33yWLtJPgLtY19fx6tB7mHQ//GL/266t+m1Ro0akiy/deXKw33MckDjcGidmkuYaqbUq4oxmmZ8vqbKNu1/HuZi/W1/Hg0rXm4UNt/vY2V24pUkWHg1vTe9Teu5Le9th3iA3/n91T/NlGRfOg7vxdfuTx/IA7AewA8a619DgCMMT8P4H0AVh6IK+0jtfVIqH1eZUYc3SMtqCqllBO5mtR4HJ0Woc/fssikjDitg08HL+zfPaFMOyLJhOMYIiqwftM9KEEna9rmdeO0GgqlYKmcWJqy2EJp2ZuClaacCBlxRGfYgwBeuu/P1/ZeW7nSBuIaUb6uMifH6X7XrmwIr3WXvyN06ly/2F31LtAC3G51EPvFGkDTftc38/241vMXzy/s37rfVruNQa345T3odHj5ki6rgIhoGV64uLnqXaADth/kA0On1UuXFzdOptUZNwP0uwyqFtmtK20k/v6M4JsP5TPAVymsV9Db4MMwdKZUjDGfue9/P7zqHZpHaQNxzTCfETdhRhzd55ff+3aM73ty/LNvv4pnHnNKZSUS/fy3PYVISP2ncvmnX/31gOGaGUV2fXMDH3+bruze/W5srOPX3vm2BexRXlyp4J/+ya87kX+b6KCPfte7Vr0LREQzfeXKBfzm255Y9W7QntQ3+N33P7bq3aAT8gvvfSem1dKsNkMz/Op/+iTSCh8OLbLReh2f/PbH7/458Q0+8WfeuMI9EhiDX/mzJ3O/S1RQibX2qfv+9+H7/u46gKv3/fmhvddWrpxXbWvxf/7pr0cjilEPIzSiGM0wRhiU8+PQyfjy6y/hB37yB/Et/+5L6Hfq+Nh7385Jd1qI33vbo/gLf/uH8C2f+iK6w8nM96WJcv0pxZpc/lR3bGsX1FYu46LbVqr7bLOWVokqFfzew6/HJ17/1mPsFS3LX/lL34/v/vRn8eRL1+BZeY2jw47jl85t4hff805srenWA53Hj7/3O/D5Bx/Cu59/HkGqXNl4Bu25ZrVDIsWSRJ5iPVgAUCwZBgBIlQmImu/SnyrXWm3ovpPq5bH4+rhZxe+8+zF87h0Pi39PRFQIxuAv/7UP4Ht++3N484svz7xue77u4pZGyoubcmndza77enutIF+5Zx7aNQErMy5uk06AL/zHl3Ht7cxSPK3+8E0P4oM//kF8y7//Erp9efwAANORcuCkOG/U923Kc3SJS4Qibeh28nxXXustCXx88e2X8TvfxGB5GXzkf/46fPGpy9h8dYQ/+por+OM3n0y1leP46Pe/Cy8+uoknP3cDQTT7ntRTnKba9QCtVc7BKjqFg+sszivLdPsYa+f7pu7fpfprVN6nK5fphhefzJz7C5sXXJt8GsATxphHsRuAez+Av7Do/dIwdsbguGi8WtU+8pH/1b2h4hiwO7r6zLamHAVozygNX/d7m7Gug9Gc9M1H3BelBYC1hm5h5ls93eRsPFAMaJXfP0JltMRTbk+xOaPdRyVbUXw25fff3Jx9Y3OYcKrrS9KBe3Zv9ZbuHLXKRGKjuTArD0c/Wu4kf6Ko4JPUdR8u2dSNcEykPN8U8162oxyFhcrrhmLwpuoPAFRvKwfP2mUcFLupHQRrJwu80P37r0x052jtjqoZQuWcoqbfatzWfY+TC7rvpPsNrzi3udQcqLblKWe9ztVGqnZtRadcU54Agacbk6fKp1N8RZRXO+mu5Sk6oExzI4Xlfo/Acr9L7WeLrfv1Jsx0k17PjpwnKwAAX9k+p2q3ve1+L1Wt56vbzCPcVpZvU06yvfXNLx39pgMebPZU29L0kQDQ8N2/y6anCxZq+4RXQl1Z5VHqPuDqVHRzAlq92L0U3DDWBcZe2NGVorxzvatqp7lv8JRzAtpu3FeMWwGoPlv0gG5M8s43/rGq3ZWG+7yY9hzV0gZnKopxWqa8/ibKdppxU6zcVqS83kep7l624rmfANuhrsyxNjimuU8Zx7rJrUgZUBtNdH15dMv9u7TKuUxvovv+K2NdXxL0FXM5yqmtL/3t/25srW3N+ntjzHcA+PsAfAAfsdb+qG5Li8UUMiIiIiIiIiIiIiIiIio1a+2vAvjVVe/HQaUKxGVD9+i2JmtA+1RNpkzB9JWR5lSRgZG1dI8a2aruCVXru3+20UD3pGOaKp9QHSlTGzRlF7QZQWPdZ0tbyppmiidktJ9N+2SHhqnqjv8w1D1ZoykxCQCe5vdWPkWi/fY1WWpZVbe1uK1rl1WWl0lnlfuozWzTZC0BQKbJ3FMex9oaM7ah6H8S5ROSLW0dHF0zDc11FNA/Wabh6RIbYFJlJmNP951EXfc24wd020qaus9240X3jJQbVpfFYurKx893dNdETZWEbF2ZNVxRjluVfYkm20Y7JtH2P5VA8/S58loT6Z4s9oLl1b1OtZnl6muie5Paui6zR1sGKlPeS2n6hHCkm4bQZttoM+e/csu99NiXE92a5PGO7sn6ypp7dttDF7Z129LcI0KfpeArxpLafkubJT4MtWUS3MXK7BevrRuoZX33381XVkkIBsrxrnJGM1Xcl2orAD17R1fC8Ctwb6ctlecpKylp5+CM4nyLlaWJ/YpuvBUoxk3hVDlvpL1uK8ckVjFOM8oKcEFNN5aPFPO0Zqg8/pXndlbXXRM9xVyaUf7W2r5VvYSEYjrf1yXpl9Zya8kRERERERERERERERERnREMxBERERERERERERERERGdAAbiiIiIiIiIiIiIiIiIiE4AA3FEREREREREREREREREJ4CBOCIiIiIiIiIiIiIiIqITUFn1DszNAkHPd24W9I1zGy9xbgIAyAL3bQFAfcuq2iV19zhq0lxu7NWL3duMrur2cRLr2pmp+3EFALWb7u1MptoUKmNdu2hdd0zCuncNXqTclO7rBxQfLa0pN6Y8bXzF8Q8AwXB5/RZ03Q9q2+4N07puW9bTHcdW+buZVNNKtzFNHwno+xKr+CqzQHfeJG3dweVFiuNf+T3Wb+v2UXtsJS33z6b5zQAARtdQc73xp7rvMZjo2k3Oaa9t7k20fWv7RV27HcV1ytZ032Plhu42QNdHAv7U/XdLR4FqW16qO0a0n03VB1ndZ9OOmzTtUuWxFSj6cQBIm7rtGcV52ujp9jFpqZqprhvei7pjZHpeN0iwga5dMHL/cBXFWBcAgqGqGabndMdWiKZ7I2X/07ipG1ykTfeT+8ZzDdW2vEQ7KFFS/GxZVdmPKPt/3bZ036N2vFtVzvpVd9zbVJRjQi9WtlOO06K2+28wjXUX4Mm4q2rnT93bJLpTG1lTe3Opa+aF7t9/pjyOU1+3k5Gimfbc9ifKPkH5/Wvm0hLFtQYAkrSqatfYcr8masfItW1dOz/UXbf9UNFIefmtDnQXt1pP17kOHnL/vcPukscWK8aMOCIiIiIiIiIiIiIiIqITwEAcERERERERERERERER0QlgII6IiIiIiIiIiIiIiIjoBDAQR0RERERERERERERERHQCGIgjIiIiIiIiIiIiIiIiOgErC8QZY95rjHnaGPOsMeZDq9oPIiIiIiIiIiIiIiIiopNQWcVGjTE+gJ8A8K0ArgH4tDHmY9baLxzWLlPsrfXd21S3rHujY/BiXbv2dubcZnRJF3v1Et134kXubbJAt4/Wq6rapY3lfbb6Hd22Mt+o2rV3VM3gh8s7B9K6rl2mOLezQPc9RuuqZghGunbVHffvPxjrfrNqP1W10/QJ1e1Qta24ozu3w03dJW6y6d4HBUPdsVUZ6X63ivIcDdfdP5vR7uNY951oromNLffrIQBUprp2RtcMk3PuHVcW6LZVv6PbSau4BGv7Vj/SHVvtG7p+azpWHP/K3xq6rwTBQPEDDHTbqm3r2nmJrp2mv4s7ui9ScxwD+jG5Sd0/m6+7JKrubQCojsm0qjyQte4o+5KJ+/efNFWbUp/blbF7G+39VzDUnQBpVdeutu2+n83byn5cMY4BgPqWqhkGjyiu21XlmFzZlwc3dO00koayobJP1vTlwVC3rbCrO7mN4lCuKPosADj/B7qby/EV3Q+XKsZ3ned1+5g1dPdtWUXZ39Xd29V7ugvwWDkHF7fd2wTKcat3Rzu4WJ6kpexbe8r5RcUhaZRjZC3tmFBzf9m8oby3V45lYN3bWU+3j4FyvqM20J1wvmIOon5rqtrWsnVecm+T1nXzfWW1qoy49wB41lr7nLU2AvDzAN63on0hIiIiIiIiIiIiIiIiWrhVBeIeBHB/nPTa3mtEREREREREREREREREp8JKSlPOyxjzwwB+GADgFz9VmoiIiIiIiIiIiIiIiOg1q8qIuw7g6n1/fmjvtX2stR+21j5lrX3KeKvaVSIiIiIiIiIiIiIiIiJ3q4pufRrAE8aYR40xVQDvB/CxFe0LERERERERERERERER0cKtpDSltTYxxvxXAH4dgA/gI9baP1rFvhARERERERERERERERGdBGOtXfU+zMUYkwGYzPjrCoBkibtDRKcP+xEiOi72I0S0COxLiOi42I8Q0SKwLyGi4zqJfqRhrS3dOmalCcQdxhjzGWvtU6veDyIqL/YjRHRc7EeIaBHYlxDRcbEfIaJFYF9CRMfFfuSe0kUOiYiIiIiIiIiIiIiIiMqAgTgiIiIiIiIiIiIiIiKiE3BaAnEfXvUOEFHpsR8houNiP0JEi8C+hIiOi/0IES0C+xIiOi72I3tOxRpxREREREREREREREREREVzWjLiiIiIiIiIiIiIiIiIiAql1IE4Y8x7jTFPG2OeNcZ8aNX7Q0TFZ4y5aoz5hDHmC8aYPzLG/NW91zeNMR83xjyz9/8bq95XIio+Y4xvjPmcMeZX9v78qDHmU3tjk//LGFNd9T4SUXEZY7rGmI8aY75kjPmiMeZPcExCRK6MMf/t3r3N540xP2eMqXNMQkSHMcZ8xBhz0xjz+fteE8cgZtf/sdef/IEx5l2r23MiKpIZfcnf3bu/+QNjzL80xnTv+7sf2etLnjbGfNtKdnpFShuIM8b4AH4CwLcDeAuA7zXGvGW1e0VEJZAA+O+ttW8B8LUA/spe3/EhAL9hrX0CwG/s/ZmI6Ch/FcAX7/vz3wHwv1lrXw9gG8APrWSviKgs/ncA/8pa+yYA78Buf8IxCRHNzRjzIID/BsBT1tonAfgA3g+OSYjocD8N4L0HXps1Bvl2AE/s/e+HAfzkkvaRiIrvp5HvSz4O4Elr7dsBfBnAjwDA3vzr+wG8da/NP9yL8ZwJpQ3EAXgPgGettc9ZayMAPw/gfSveJyIqOGvty9baz+799wC7E14PYrf/+Jm9t/0MgO9eyQ4SUWkYYx4C8KcB/NTenw2APwXgo3tvYV9CRDMZY9YBfAOAfwQA1trIWtsDxyRE5K4CoGGMqQBoAngZHJMQ0SGstb8J4M6Bl2eNQd4H4J/YXb8LoGuMubyUHSWiQpP6Emvtv7bWJnt//F0AD+399/sA/Ly1NrTWPg/gWezGeM6EMgfiHgTw0n1/vrb3GhHRXIwxrwPwTgCfAnDJWvvy3l+9AuDSqvaLiErj7wP4HwBke38+B6B334CTYxMiOsyjAG4B+Md7JW5/yhjTAsckROTAWnsdwI8DeBG7AbgdAL8PjkmIyN2sMQjnYIlI6y8B+LW9/z7TfUmZA3FERGrGmDaAXwDw16y1/fv/zlprAdiV7BgRlYIx5jsB3LTW/v6q94WISqsC4F0AftJa+04AIxwoQ8kxCREdZW8Np/dhN7h/BUAL+RJRREROOAYhouMyxvxN7C4R9LOr3pciKHMg7jqAq/f9+aG914iIDmWMCbAbhPtZa+2/2Hv51ddKK+z9/81V7R8RlcLXAfguY8wfY7c89p/C7lpP3b2yUADHJkR0uGsArllrP7X3549iNzDHMQkRufgWAM9ba29Za2MA/wK74xSOSYjI1awxCOdgiciJMeYHAXwngO/bC+wDZ7wvKXMg7tMAnjDGPGqMqWJ3ob+PrXifiKjg9tZw+kcAvmit/Xv3/dXHAHxw778/COCXlr1vRFQe1tofsdY+ZK19HXbHIP/GWvt9AD4B4M/uvY19CRHNZK19BcBLxpg37r30zQC+AI5JiMjNiwC+1hjT3LvXea0v4ZiEiFzNGoN8DMAPmF1fC2DnvhKWRET7GGPei91lPL7LWju+768+BuD9xpiaMeZRAE8A+L1V7OMqmHsByfIxxnwHdtdn8QF8xFr7o6vdIyIqOmPMfwTgtwD8Ie6t6/Q/YXeduH8O4GEALwD489bagwsXExHlGGO+CcBft9Z+pzHmMexmyG0C+ByAD1hrwxXuHhEVmDHmqwD8FIAqgOcA/EXsPizJMQkRzc0Y87cA/GfYLf/0OQD/OXbXXOGYhIhExpifA/BNAM4DeBXA/wLgFyGMQfaC/P8Au2VvxwD+orX2MyvYbSIqmBl9yY8AqAHY2nvb71pr/4u99/9N7K4bl2B3uaBfO/hvnlalDsQRERERERERERERERERFVWZS1MSERERERERERERERERFRYDcUREREREREREREREREQngIE4IiIiIiIiIiIiIiIiohPAQBwRERERERERERERERHRCWAgjoiIiIiIiIiIiIiIiOgEMBBHRERERES0IsaYrjHmv9z77yvGmI+uep+IiIiIiIhocYy1dtX7QEREREREdCYZY14H4FestU+uel+IiIiIiIho8Sqr3gEiIiIiIqIz7McAPG6M+Q8AngHwZmvtk8aYHwTw3QBaAJ4A8OMAqgC+H0AI4DustXeMMY8D+AkAFwCMAfxla+2Xlv0hiIiIiIiISMbSlERERERERKvzIQBfsdZ+FYC/ceDvngTwPQDeDeBHAYytte8E8EkAP7D3ng8D+K+ttV8N4K8D+IfL2GkiIiIiIiKaDzPiiIiIiIiIiukT1toBgIExZgfAL++9/ocA3m6MaQP4kwD+b2PMa21qy99NIiIiIiIimoWBOCIiIiIiomIK7/vv7L4/Z9i9l/MA9Pay6YiIiIiIiKiAWJqSiIiIiIhodQYAOpqG1to+gOeNMX8OAMyudyxy54iIiIiIiOh4GIgjIiIiIiJaEWvtFoB/b4z5PIC/q/gnvg/ADxlj/j8AfwTgfYvcPyIiIiIiIjoeY61d9T4QERERERERERERERERnTrMiCMiIiIiIiIiIiIiIiI6AQzEEREREREREREREREREZ0ABuKIiIiIiIiIiIiIiIiITgADcUREREREREREREREREQngIE4IiIiIiIiIiIiIiIiohPAQBwRERERERERERERERHRCWAgjoiIiIiIiIiIiIiIiOgEMBBHREREREREREREREREdAL+f+itReBxkDxdAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "execution_count": 31 + } + ], + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "source": [], + "outputs": [], + "metadata": {} + } + ], + "metadata": { + "orig_nbformat": 4, + "language_info": { + "name": "python", + "version": "3.9.7", + "mimetype": "text/x-python", + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "pygments_lexer": "ipython3", + "nbconvert_exporter": "python", + "file_extension": ".py" + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3.9.1 64-bit ('miniconda3': virtualenv)" + }, + "interpreter": { + "hash": "822ce188d9bce5372c4adbb11364eeb49293228c2224eb55307f4664778e7f56" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file From 8cca3987aa4b7d7f08ce488f75abdfa8519480be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:22:12 +0000 Subject: [PATCH 61/64] Update documentation --- docs/source/index.md | 1 + docs/source/models/tacotron1-2.md | 63 +++++++++++++ docs/source/training_a_model.md | 91 +++++++++++++++++-- docs/source/tutorial_for_nervous_beginners.md | 6 +- 4 files changed, 152 insertions(+), 9 deletions(-) create mode 100644 docs/source/models/tacotron1-2.md diff --git a/docs/source/index.md b/docs/source/index.md index d5f77ad4..756cea8e 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -47,6 +47,7 @@ models/glow_tts.md models/vits.md models/forward_tts.md + models/tacotron1-2.md .. toctree:: :maxdepth: 2 diff --git a/docs/source/models/tacotron1-2.md b/docs/source/models/tacotron1-2.md new file mode 100644 index 00000000..90833ecb --- /dev/null +++ b/docs/source/models/tacotron1-2.md @@ -0,0 +1,63 @@ +# 🌮 Tacotron 1 and 2 + +Tacotron is one of the first successful DL-based text-to-mel models and opened up the whole TTS field for more DL research. + +Tacotron mainly is an encoder-decoder model with attention. + +The encoder takes input tokens (characters or phonemes) and the decoder outputs mel-spectrogram* frames. Attention module in-between learns to align the input tokens with the output mel-spectrgorams. + +Tacotron1 and 2 are both built on the same encoder-decoder architecture but they use different layers. Additionally, Tacotron1 uses a Postnet module to convert mel-spectrograms to linear spectrograms with a higher resolution before the vocoder. + +Vanilla Tacotron models are slow at inference due to the auto-regressive* nature that prevents the model to process all the inputs in parallel. One trick is to use a higher “reduction rate” that helps the model to predict multiple frames at once. That is, reduction rate 2 reduces the number of decoder iterations by half. + +Tacotron also uses a Prenet module with Dropout that projects the model’s previous output before feeding it to the decoder again. The paper and most of the implementations use the Dropout layer even in inference and they report the attention fails or the voice quality degrades otherwise. But the issue with that, you get a slightly different output speech every time you run the model. + +Tsraining the attention is notoriously problematic in Tacoron models. Especially, in inference, for some input sequences, the alignment fails and causes the model to produce unexpected results. There are many different methods proposed to improve the attention. + +After hundreds of experiments, @ 🐸TTS we suggest Double Decoder Consistency that leads to the most robust model performance. + +If you have a limited VRAM, then you can try using the Guided Attention Loss or the Dynamic Convolutional Attention. You can also combine the two. + + +## Important resources & papers +- Tacotron: https://arxiv.org/abs/2006.06873 +- Tacotron2: https://arxiv.org/abs/2008.03802 +- Double Decoder Consistency: https://coqui.ai/blog/tts/solving-attention-problems-of-tts-models-with-double-decoder-consistency +- Guided Attention Loss: https://arxiv.org/abs/1710.08969 +- Forward & Backward Decoder: https://arxiv.org/abs/1907.09006 +- Forward Attention: https://arxiv.org/abs/1807.06736 +- Gaussian Attention: https://arxiv.org/abs/1910.10288 +- Dynamic Convolutional Attention: https://arxiv.org/pdf/1910.10288.pdf + + +## BaseTacotron +```{eval-rst} +.. autoclass:: TTS.tts.models.base_tacotron.BaseTacotron + :members: +``` + +## Tacotron +```{eval-rst} +.. autoclass:: TTS.tts.models.tacotron.Tacotron + :members: +``` + +## Tacotron2 +```{eval-rst} +.. autoclass:: TTS.tts.models.tacotron2.Tacotron2 + :members: +``` + +## TacotronConfig +```{eval-rst} +.. autoclass:: TTS.tts.configs.tacotron_config.TacotronConfig + :members: +``` + +## Tacotron2Config +```{eval-rst} +.. autoclass:: TTS.tts.configs.tacotron2_config.Tacotron2Config + :members: +``` + + diff --git a/docs/source/training_a_model.md b/docs/source/training_a_model.md index deb94e85..3e781461 100644 --- a/docs/source/training_a_model.md +++ b/docs/source/training_a_model.md @@ -1,18 +1,19 @@ # Training a Model -1. Decide what model you want to use. +1. Decide the model you want to use. Each model has a different set of pros and cons that define the run-time efficiency and the voice quality. It is up to you to decide what model servers your needs. Other than referring to the papers, one easy way is to test the 🐸TTS community models and see how fast and good each of the models. Or you can start a discussion on our communication channels. -2. Understand the configuration, its fields and values of your model. +2. Understand the configuration, its fields and values. For instance, if you want to train a `Tacotron` model then see the `TacotronConfig` class and make sure you understand it. -3. Go to the recipes and check the recipe of your target model. +3. Check the recipes. - Recipes do not promise perfect models but they provide a good start point for `Nervous Beginners`. A recipe script for - `GlowTTS` using `LJSpeech` dataset looks like below. Let's be creative and call this `train_glowtts.py`. + Recipes are located under `TTS/recipes/`. They do not promise perfect models but they provide a good start point for + `Nervous Beginners`. + A recipe for `GlowTTS` using `LJSpeech` dataset looks like below. Let's be creative and call this `train_glowtts.py`. ```python # train_glowtts.py @@ -20,7 +21,8 @@ import os from TTS.trainer import Trainer, TrainingArgs - from TTS.tts.configs import BaseDatasetConfig, GlowTTSConfig + from TTS.tts.configs.shared_config import BaseDatasetConfig + from TTS.tts.configs.glow_tts_config import GlowTTSConfig from TTS.tts.datasets import load_tts_samples from TTS.tts.models.glow_tts import GlowTTS from TTS.utils.audio import AudioProcessor @@ -183,3 +185,80 @@ 8. Return to the step 1 and reiterate for training a `vocoder` model. In the example above, we trained a `GlowTTS` model, but the same workflow applies to all the other 🐸TTS models. + + +# Multi-speaker Training + +Training a multi-speaker model is mostly the same as training a single-speaker model. +You need to specify a couple of configuration parameters, initiate a `SpeakerManager` instance and pass it to the model. + +The configuration parameters define whether you want to train the model with a speaker-embedding layer or pre-computed +d-vectors. For using d-vectors, you first need to compute the d-vectors using the `SpeakerEncoder`. + +The same Glow-TTS model above can be trained on a multi-speaker VCTK dataset with the script below. + +```python +import os + +from TTS.config.shared_configs import BaseAudioConfig +from TTS.trainer import Trainer, TrainingArgs +from TTS.tts import BaseDatasetConfig, GlowTTSConfig +from TTS.tts.datasets import load_tts_samples +from TTS.tts.glow_tts import GlowTTS +from TTS.tts.utils.speakers import SpeakerManager +from TTS.utils.audio import AudioProcessor + +# define dataset config for VCTK +output_path = os.path.dirname(os.path.abspath(__file__)) +dataset_config = BaseDatasetConfig(name="vctk", meta_file_train="", path=os.path.join(output_path, "../VCTK/")) + +# init audio processing config +audio_config = BaseAudioConfig(sample_rate=22050, do_trim_silence=True, trim_db=23.0) + +# init training config +config = GlowTTSConfig( + batch_size=64, + eval_batch_size=16, + num_loader_workers=4, + num_eval_loader_workers=4, + run_eval=True, + test_delay_epochs=-1, + epochs=1000, + text_cleaner="phoneme_cleaners", + use_phonemes=True, + phoneme_language="en-us", + phoneme_cache_path=os.path.join(output_path, "phoneme_cache"), + print_step=25, + print_eval=False, + mixed_precision=True, + output_path=output_path, + datasets=[dataset_config], + use_speaker_embedding=True, +) + +# init audio processor +ap = AudioProcessor(**config.audio.to_dict()) + +# load training samples +train_samples, eval_samples = load_tts_samples(dataset_config, eval_split=True) + +# ONLY FOR MULTI-SPEAKER: init speaker manager for multi-speaker training +speaker_manager = SpeakerManager() +speaker_manager.set_speaker_ids_from_data(train_samples + eval_samples) +config.num_speakers = speaker_manager.num_speakers + +# init model +model = GlowTTS(config, speaker_manager) + +# init the trainer and 🚀 +trainer = Trainer( + TrainingArgs(), + config, + output_path, + model=model, + train_samples=train_samples, + eval_samples=eval_samples, + training_assets={"audio_processor": ap}, +) +trainer.fit() +``` diff --git a/docs/source/tutorial_for_nervous_beginners.md b/docs/source/tutorial_for_nervous_beginners.md index dc5e9a6c..828314ad 100644 --- a/docs/source/tutorial_for_nervous_beginners.md +++ b/docs/source/tutorial_for_nervous_beginners.md @@ -29,10 +29,10 @@ each line. import os # GlowTTSConfig: all model related values for training, validating and testing. - from TTS.tts.configs import GlowTTSConfig + from TTS.tts.configs.glow_tts_config import GlowTTSConfig # BaseDatasetConfig: defines name, formatter and path of the dataset. - from TTS.tts.configs import BaseDatasetConfig + from TTS.tts.configs.shared_config import BaseDatasetConfig # init_training: Initialize and setup the training environment. # Trainer: Where the ✨️ happens. @@ -79,7 +79,7 @@ each line. # Initiate the Trainer. # Trainer provides a generic API to train all the 🐸TTS models with all its perks like mixed-precision training, - # distributed training etc. + # distributed training, etc. trainer = Trainer( TrainingArgs(), config, From 9e483fb4f01fe46fc9f78190458d340d9a26831b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:22:33 +0000 Subject: [PATCH 62/64] Update ljspeech download --- recipes/ljspeech/download_ljspeech.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/ljspeech/download_ljspeech.sh b/recipes/ljspeech/download_ljspeech.sh index 14ef058d..9468988a 100644 --- a/recipes/ljspeech/download_ljspeech.sh +++ b/recipes/ljspeech/download_ljspeech.sh @@ -10,5 +10,5 @@ tar -xjf LJSpeech-1.1.tar.bz2 shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv head -n 12000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv tail -n 1100 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv -mv LJSpeech-1.1 $RUN_DIR/ +mv LJSpeech-1.1 $RUN_DIR/recipes/ljspeech/ rm LJSpeech-1.1.tar.bz2 \ No newline at end of file From d9c291b06c90b19b5c02921c98b08a99b3610c4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 16:25:06 +0000 Subject: [PATCH 63/64] Update .gitignore --- .gitignore | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 95939d32..2bfd0906 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,15 @@ version.py # jupyter dummy files core +# ignore local datasets +recipes/WIP/* +recipes/ljspeech/LJSpeech-1.1/* +recipes/vctk/VCTK/* +VCTK-Corpus-removed-silence/* + +# ignore training logs +trainer_*_log.txt + # files used internally fro dev, test etc. tests/outputs/* tests/train_outputs/* @@ -134,9 +143,6 @@ notebooks/data/* TTS/tts/layers/glow_tts/monotonic_align/core.c .vscode-upload.json temp_build/* -recipes/WIP/* -recipes/ljspeech/LJSpeech-1.1/* -recipes/ljspeech/tacotron2-DDC/LJSpeech-1.1/* events.out* old_configs/* model_importers/* From 25759d6a619918efa3a0d84e1f1143533da44183 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 21 Oct 2021 17:07:41 +0000 Subject: [PATCH 64/64] Split tests --- .github/workflows/{main.yml => aux_tests.yml} | 7 +-- .github/workflows/style_check.yml | 50 +++++++++++++++++++ .github/workflows/tts_tests.yml | 49 ++++++++++++++++++ .github/workflows/vocoder_tests.yml | 49 ++++++++++++++++++ Makefile | 9 ++++ .../__init__.py} | 0 tests/{ => aux_tests}/model_manager.py | 0 tests/{ => aux_tests}/test_audio_processor.py | 0 .../test_extract_tts_spectrograms.py | 0 tests/{ => aux_tests}/test_speaker_encoder.py | 0 .../test_speaker_encoder_train.py | 0 tests/{ => aux_tests}/test_speaker_manager.py | 0 tests/aux_tests/test_stft_torch.py | 0 tests/{ => aux_tests}/test_text_processing.py | 0 14 files changed, 159 insertions(+), 5 deletions(-) rename .github/workflows/{main.yml => aux_tests.yml} (93%) create mode 100644 .github/workflows/style_check.yml create mode 100644 .github/workflows/tts_tests.yml create mode 100644 .github/workflows/vocoder_tests.yml rename tests/{test_stft_torch.py => aux_tests/__init__.py} (100%) rename tests/{ => aux_tests}/model_manager.py (100%) rename tests/{ => aux_tests}/test_audio_processor.py (100%) rename tests/{ => aux_tests}/test_extract_tts_spectrograms.py (100%) rename tests/{ => aux_tests}/test_speaker_encoder.py (100%) rename tests/{ => aux_tests}/test_speaker_encoder_train.py (100%) rename tests/{ => aux_tests}/test_speaker_manager.py (100%) create mode 100644 tests/aux_tests/test_stft_torch.py rename tests/{ => aux_tests}/test_text_processing.py (100%) diff --git a/.github/workflows/main.yml b/.github/workflows/aux_tests.yml similarity index 93% rename from .github/workflows/main.yml rename to .github/workflows/aux_tests.yml index 68be9274..d5fe1bb3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/aux_tests.yml @@ -1,4 +1,4 @@ -name: CI +name: aux-tests on: push: @@ -45,8 +45,5 @@ jobs: run: | python3 -m pip install .[all] python3 setup.py egg_info - - name: Lint check - run: | - make lint - name: Unit tests - run: make test + run: make test_aux diff --git a/.github/workflows/style_check.yml b/.github/workflows/style_check.yml new file mode 100644 index 00000000..4a30c26d --- /dev/null +++ b/.github/workflows/style_check.yml @@ -0,0 +1,50 @@ +name: style-check + +on: + push: + branches: + - main + pull_request: + types: [opened, synchronize, reopened] +jobs: + check_skip: + runs-on: ubuntu-latest + if: "! contains(github.event.head_commit.message, '[ci skip]')" + steps: + - run: echo "${{ github.event.head_commit.message }}" + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.9] + experimental: [false] + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + - name: check OS + run: cat /etc/os-release + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y git make + sudo apt install -y python3-wheel gcc + make system-deps + - name: Upgrade pip + run: python3 -m pip install --upgrade pip + - name: Install TTS + run: | + python3 -m pip install .[all] + python3 setup.py egg_info + - name: Lint check + run: | + make lint \ No newline at end of file diff --git a/.github/workflows/tts_tests.yml b/.github/workflows/tts_tests.yml new file mode 100644 index 00000000..d05dca90 --- /dev/null +++ b/.github/workflows/tts_tests.yml @@ -0,0 +1,49 @@ +name: tts-tests + +on: + push: + branches: + - main + pull_request: + types: [opened, synchronize, reopened] +jobs: + check_skip: + runs-on: ubuntu-latest + if: "! contains(github.event.head_commit.message, '[ci skip]')" + steps: + - run: echo "${{ github.event.head_commit.message }}" + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.6, 3.7, 3.8, 3.9] + experimental: [false] + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + - name: check OS + run: cat /etc/os-release + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y git make + sudo apt install -y python3-wheel gcc + make system-deps + - name: Upgrade pip + run: python3 -m pip install --upgrade pip + - name: Install TTS + run: | + python3 -m pip install .[all] + python3 setup.py egg_info + - name: Unit tests + run: make test_tts diff --git a/.github/workflows/vocoder_tests.yml b/.github/workflows/vocoder_tests.yml new file mode 100644 index 00000000..69e74dbf --- /dev/null +++ b/.github/workflows/vocoder_tests.yml @@ -0,0 +1,49 @@ +name: vocoder-tests + +on: + push: + branches: + - main + pull_request: + types: [opened, synchronize, reopened] +jobs: + check_skip: + runs-on: ubuntu-latest + if: "! contains(github.event.head_commit.message, '[ci skip]')" + steps: + - run: echo "${{ github.event.head_commit.message }}" + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: [3.6, 3.7, 3.8, 3.9] + experimental: [false] + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }} + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + - name: check OS + run: cat /etc/os-release + - name: Install dependencies + run: | + sudo apt update + sudo apt install -y git make + sudo apt install -y python3-wheel gcc + make system-deps + - name: Upgrade pip + run: python3 -m pip install --upgrade pip + - name: Install TTS + run: | + python3 -m pip install .[all] + python3 setup.py egg_info + - name: Unit tests + run: make test_vocoder diff --git a/Makefile b/Makefile index bffed7ff..c2091ca0 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,15 @@ test_all: ## run tests and don't stop on an error. test: ## run tests. nosetests -x --with-cov -cov --cover-erase --cover-package TTS tests --nologcapture --with-id + +test_vocoder: ## run vocoder tests. + nosetests tests.vocoder_tests -x --with-cov -cov --cover-erase --cover-package TTS tests.vocoder_tests --nologcapture --with-id + +test_tts: ## run tts tests. + nosetests tests.tts_tests -x --with-cov -cov --cover-erase --cover-package TTS tests.tts_tests --nologcapture --with-id + +test_aux: ## run aux tests. + nosetests tests.aux_tests -x --with-cov -cov --cover-erase --cover-package TTS tests.aux_tests --nologcapture --with-id ./run_bash_tests.sh test_failed: ## only run tests failed the last time. diff --git a/tests/test_stft_torch.py b/tests/aux_tests/__init__.py similarity index 100% rename from tests/test_stft_torch.py rename to tests/aux_tests/__init__.py diff --git a/tests/model_manager.py b/tests/aux_tests/model_manager.py similarity index 100% rename from tests/model_manager.py rename to tests/aux_tests/model_manager.py diff --git a/tests/test_audio_processor.py b/tests/aux_tests/test_audio_processor.py similarity index 100% rename from tests/test_audio_processor.py rename to tests/aux_tests/test_audio_processor.py diff --git a/tests/test_extract_tts_spectrograms.py b/tests/aux_tests/test_extract_tts_spectrograms.py similarity index 100% rename from tests/test_extract_tts_spectrograms.py rename to tests/aux_tests/test_extract_tts_spectrograms.py diff --git a/tests/test_speaker_encoder.py b/tests/aux_tests/test_speaker_encoder.py similarity index 100% rename from tests/test_speaker_encoder.py rename to tests/aux_tests/test_speaker_encoder.py diff --git a/tests/test_speaker_encoder_train.py b/tests/aux_tests/test_speaker_encoder_train.py similarity index 100% rename from tests/test_speaker_encoder_train.py rename to tests/aux_tests/test_speaker_encoder_train.py diff --git a/tests/test_speaker_manager.py b/tests/aux_tests/test_speaker_manager.py similarity index 100% rename from tests/test_speaker_manager.py rename to tests/aux_tests/test_speaker_manager.py diff --git a/tests/aux_tests/test_stft_torch.py b/tests/aux_tests/test_stft_torch.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_text_processing.py b/tests/aux_tests/test_text_processing.py similarity index 100% rename from tests/test_text_processing.py rename to tests/aux_tests/test_text_processing.py