mirror of https://github.com/coqui-ai/TTS.git
Corrected Code Style
This commit is contained in:
parent
d0f34b2fd9
commit
2deccb6eb3
|
@ -3,7 +3,7 @@ from typing import Tuple
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn # pylint: disable=consider-using-from-import
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import torch.nn.utils.parametrize as parametrize
|
from torch.nn.utils import parametrize
|
||||||
|
|
||||||
from TTS.tts.layers.delightful_tts.kernel_predictor import KernelPredictor
|
from TTS.tts.layers.delightful_tts.kernel_predictor import KernelPredictor
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import torch.nn as nn # pylint: disable=consider-using-from-import
|
import torch.nn as nn # pylint: disable=consider-using-from-import
|
||||||
import torch.nn.utils.parametrize as parametrize
|
from torch.nn.utils import parametrize
|
||||||
|
|
||||||
|
|
||||||
class KernelPredictor(nn.Module):
|
class KernelPredictor(nn.Module):
|
||||||
|
@ -37,7 +37,9 @@ class KernelPredictor(nn.Module):
|
||||||
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
|
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
|
||||||
|
|
||||||
self.input_conv = nn.Sequential(
|
self.input_conv = nn.Sequential(
|
||||||
nn.utils.parametrizations.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
|
nn.utils.parametrizations.weight_norm(
|
||||||
|
nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)
|
||||||
|
),
|
||||||
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
|
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.utils.parametrize as parametrize
|
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
from torch.nn.utils import parametrize
|
||||||
|
|
||||||
|
|
||||||
@torch.jit.script
|
@torch.jit.script
|
||||||
|
|
|
@ -44,7 +44,9 @@ class KernelPredictor(torch.nn.Module):
|
||||||
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
|
kpnet_bias_channels = conv_out_channels * conv_layers # l_b
|
||||||
|
|
||||||
self.input_conv = nn.Sequential(
|
self.input_conv = nn.Sequential(
|
||||||
nn.utils.parametrizations.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
|
nn.utils.parametrizations.weight_norm(
|
||||||
|
nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)
|
||||||
|
),
|
||||||
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
|
getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -314,7 +316,9 @@ class UnivNetGenerator(nn.Module):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.conv_pre = nn.utils.parametrizations.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode="reflect"))
|
self.conv_pre = nn.utils.parametrizations.weight_norm(
|
||||||
|
nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode="reflect")
|
||||||
|
)
|
||||||
|
|
||||||
self.conv_post = nn.Sequential(
|
self.conv_post = nn.Sequential(
|
||||||
nn.LeakyReLU(lReLU_slope),
|
nn.LeakyReLU(lReLU_slope),
|
||||||
|
|
|
@ -11,7 +11,9 @@ class ResStack(nn.Module):
|
||||||
resstack += [
|
resstack += [
|
||||||
nn.LeakyReLU(0.2),
|
nn.LeakyReLU(0.2),
|
||||||
nn.ReflectionPad1d(dilation),
|
nn.ReflectionPad1d(dilation),
|
||||||
nn.utils.parametrizations.weight_norm(nn.Conv1d(channel, channel, kernel_size=kernel, dilation=dilation)),
|
nn.utils.parametrizations.weight_norm(
|
||||||
|
nn.Conv1d(channel, channel, kernel_size=kernel, dilation=dilation)
|
||||||
|
),
|
||||||
nn.LeakyReLU(0.2),
|
nn.LeakyReLU(0.2),
|
||||||
nn.ReflectionPad1d(padding),
|
nn.ReflectionPad1d(padding),
|
||||||
nn.utils.parametrizations.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)),
|
nn.utils.parametrizations.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)),
|
||||||
|
|
|
@ -3,7 +3,7 @@ from typing import List
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import torch.nn.utils.parametrize as parametrize
|
from torch.nn.utils import parametrize
|
||||||
|
|
||||||
from TTS.vocoder.layers.lvc_block import LVCBlock
|
from TTS.vocoder.layers.lvc_block import LVCBlock
|
||||||
|
|
||||||
|
|
|
@ -6,10 +6,10 @@ import torch
|
||||||
from coqpit import Coqpit
|
from coqpit import Coqpit
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.nn.utils.parametrizations import weight_norm
|
from torch.nn.utils.parametrizations import weight_norm
|
||||||
|
from torch.nn.utils.parametrize import remove_parametrizations
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from torch.utils.data.distributed import DistributedSampler
|
from torch.utils.data.distributed import DistributedSampler
|
||||||
from trainer.trainer_utils import get_optimizer, get_scheduler
|
from trainer.trainer_utils import get_optimizer, get_scheduler
|
||||||
from torch.nn.utils.parametrize import remove_parametrizations
|
|
||||||
|
|
||||||
from TTS.utils.io import load_fsspec
|
from TTS.utils.io import load_fsspec
|
||||||
from TTS.vocoder.datasets import WaveGradDataset
|
from TTS.vocoder.datasets import WaveGradDataset
|
||||||
|
|
Loading…
Reference in New Issue