diff --git a/.pylintrc b/.pylintrc index afc22d3c..6e9f953e 100644 --- a/.pylintrc +++ b/.pylintrc @@ -64,6 +64,11 @@ disable=missing-docstring, too-many-public-methods, too-many-lines, bare-except, + ## for avoiding weird p3.6 CI linter error + ## TODO: see later if we can remove this + assigning-non-slot, + unsupported-assignment-operation, + ## end line-too-long, fixme, wrong-import-order, diff --git a/TTS/speaker_encoder/speaker_encoder_config.py b/TTS/speaker_encoder/speaker_encoder_config.py index e830a0f5..8212acc7 100644 --- a/TTS/speaker_encoder/speaker_encoder_config.py +++ b/TTS/speaker_encoder/speaker_encoder_config.py @@ -1,5 +1,5 @@ from dataclasses import asdict, dataclass, field -from typing import List +from typing import Dict, List from coqpit import MISSING @@ -14,7 +14,7 @@ class SpeakerEncoderConfig(BaseTrainingConfig): audio: BaseAudioConfig = field(default_factory=BaseAudioConfig) datasets: List[BaseDatasetConfig] = field(default_factory=lambda: [BaseDatasetConfig()]) # model params - model_params: dict = field( + model_params: Dict = field( default_factory=lambda: { "model_name": "lstm", "input_dim": 80, @@ -25,9 +25,9 @@ class SpeakerEncoderConfig(BaseTrainingConfig): } ) - audio_augmentation: dict = field(default_factory=lambda: {}) + audio_augmentation: Dict = field(default_factory=lambda: {}) - storage: dict = field( + storage: Dict = field( default_factory=lambda: { "sample_from_storage_p": 0.66, # the probability with which we'll sample from the DataSet in-memory storage "storage_size": 15, # the size of the in-memory storage with respect to a single batch diff --git a/TTS/vocoder/configs/univnet_config.py b/TTS/vocoder/configs/univnet_config.py index 85662831..67f324cf 100644 --- a/TTS/vocoder/configs/univnet_config.py +++ b/TTS/vocoder/configs/univnet_config.py @@ -1,4 +1,5 @@ from dataclasses import dataclass, field +from typing import Dict from TTS.vocoder.configs.shared_configs import BaseGANVocoderConfig @@ -95,7 +96,7 @@ class UnivnetConfig(BaseGANVocoderConfig): # model specific params discriminator_model: str = "univnet_discriminator" generator_model: str = "univnet_generator" - generator_model_params: dict = field( + generator_model_params: Dict = field( default_factory=lambda: { "in_channels": 64, "out_channels": 1, @@ -120,7 +121,7 @@ class UnivnetConfig(BaseGANVocoderConfig): # loss weights - overrides stft_loss_weight: float = 2.5 - stft_loss_params: dict = field( + stft_loss_params: Dict = field( default_factory=lambda: { "n_ffts": [1024, 2048, 512], "hop_lengths": [120, 240, 50], @@ -132,7 +133,7 @@ class UnivnetConfig(BaseGANVocoderConfig): hinge_G_loss_weight: float = 0 feat_match_loss_weight: float = 0 l1_spec_loss_weight: float = 0 - l1_spec_loss_params: dict = field( + l1_spec_loss_params: Dict = field( default_factory=lambda: { "use_mel": True, "sample_rate": 22050, @@ -152,7 +153,7 @@ class UnivnetConfig(BaseGANVocoderConfig): # lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) lr_scheduler_disc: str = None # one of the schedulers from https:#pytorch.org/docs/stable/optim.html # lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) - optimizer_params: dict = field(default_factory=lambda: {"betas": [0.5, 0.9], "weight_decay": 0.0}) + optimizer_params: Dict = field(default_factory=lambda: {"betas": [0.5, 0.9], "weight_decay": 0.0}) steps_to_start_discriminator: int = 200000 def __post_init__(self):