Fix linter issues ofr p3.6

This commit is contained in:
Eren Gölge 2021-08-30 15:13:04 +00:00
parent 738eee0cf9
commit 6782d3eab7
3 changed files with 14 additions and 8 deletions

View File

@ -64,6 +64,11 @@ disable=missing-docstring,
too-many-public-methods, too-many-public-methods,
too-many-lines, too-many-lines,
bare-except, bare-except,
## for avoiding weird p3.6 CI linter error
## TODO: see later if we can remove this
assigning-non-slot,
unsupported-assignment-operation,
## end
line-too-long, line-too-long,
fixme, fixme,
wrong-import-order, wrong-import-order,

View File

@ -1,5 +1,5 @@
from dataclasses import asdict, dataclass, field from dataclasses import asdict, dataclass, field
from typing import List from typing import Dict, List
from coqpit import MISSING from coqpit import MISSING
@ -14,7 +14,7 @@ class SpeakerEncoderConfig(BaseTrainingConfig):
audio: BaseAudioConfig = field(default_factory=BaseAudioConfig) audio: BaseAudioConfig = field(default_factory=BaseAudioConfig)
datasets: List[BaseDatasetConfig] = field(default_factory=lambda: [BaseDatasetConfig()]) datasets: List[BaseDatasetConfig] = field(default_factory=lambda: [BaseDatasetConfig()])
# model params # model params
model_params: dict = field( model_params: Dict = field(
default_factory=lambda: { default_factory=lambda: {
"model_name": "lstm", "model_name": "lstm",
"input_dim": 80, "input_dim": 80,
@ -25,9 +25,9 @@ class SpeakerEncoderConfig(BaseTrainingConfig):
} }
) )
audio_augmentation: dict = field(default_factory=lambda: {}) audio_augmentation: Dict = field(default_factory=lambda: {})
storage: dict = field( storage: Dict = field(
default_factory=lambda: { default_factory=lambda: {
"sample_from_storage_p": 0.66, # the probability with which we'll sample from the DataSet in-memory storage "sample_from_storage_p": 0.66, # the probability with which we'll sample from the DataSet in-memory storage
"storage_size": 15, # the size of the in-memory storage with respect to a single batch "storage_size": 15, # the size of the in-memory storage with respect to a single batch

View File

@ -1,4 +1,5 @@
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Dict
from TTS.vocoder.configs.shared_configs import BaseGANVocoderConfig from TTS.vocoder.configs.shared_configs import BaseGANVocoderConfig
@ -95,7 +96,7 @@ class UnivnetConfig(BaseGANVocoderConfig):
# model specific params # model specific params
discriminator_model: str = "univnet_discriminator" discriminator_model: str = "univnet_discriminator"
generator_model: str = "univnet_generator" generator_model: str = "univnet_generator"
generator_model_params: dict = field( generator_model_params: Dict = field(
default_factory=lambda: { default_factory=lambda: {
"in_channels": 64, "in_channels": 64,
"out_channels": 1, "out_channels": 1,
@ -120,7 +121,7 @@ class UnivnetConfig(BaseGANVocoderConfig):
# loss weights - overrides # loss weights - overrides
stft_loss_weight: float = 2.5 stft_loss_weight: float = 2.5
stft_loss_params: dict = field( stft_loss_params: Dict = field(
default_factory=lambda: { default_factory=lambda: {
"n_ffts": [1024, 2048, 512], "n_ffts": [1024, 2048, 512],
"hop_lengths": [120, 240, 50], "hop_lengths": [120, 240, 50],
@ -132,7 +133,7 @@ class UnivnetConfig(BaseGANVocoderConfig):
hinge_G_loss_weight: float = 0 hinge_G_loss_weight: float = 0
feat_match_loss_weight: float = 0 feat_match_loss_weight: float = 0
l1_spec_loss_weight: float = 0 l1_spec_loss_weight: float = 0
l1_spec_loss_params: dict = field( l1_spec_loss_params: Dict = field(
default_factory=lambda: { default_factory=lambda: {
"use_mel": True, "use_mel": True,
"sample_rate": 22050, "sample_rate": 22050,
@ -152,7 +153,7 @@ class UnivnetConfig(BaseGANVocoderConfig):
# lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) # lr_scheduler_gen_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
lr_scheduler_disc: str = None # one of the schedulers from https:#pytorch.org/docs/stable/optim.html lr_scheduler_disc: str = None # one of the schedulers from https:#pytorch.org/docs/stable/optim.html
# lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1}) # lr_scheduler_disc_params: dict = field(default_factory=lambda: {"gamma": 0.999, "last_epoch": -1})
optimizer_params: dict = field(default_factory=lambda: {"betas": [0.5, 0.9], "weight_decay": 0.0}) optimizer_params: Dict = field(default_factory=lambda: {"betas": [0.5, 0.9], "weight_decay": 0.0})
steps_to_start_discriminator: int = 200000 steps_to_start_discriminator: int = 200000
def __post_init__(self): def __post_init__(self):