mirror of https://github.com/coqui-ai/TTS.git
refactor: remove duplicate get_padding
This commit is contained in:
parent
c30fb0f56b
commit
4bd3df2607
|
@ -10,10 +10,6 @@ from TTS.tts.utils.helpers import sequence_mask
|
|||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
||||
def get_padding(kernel_size, dilation=1):
|
||||
return int((kernel_size * dilation - dilation) / 2)
|
||||
|
||||
|
||||
class TextEncoder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
|
|
|
@ -9,16 +9,13 @@ from torch.nn.utils.parametrizations import weight_norm
|
|||
from torch.nn.utils.parametrize import remove_parametrizations
|
||||
|
||||
from TTS.utils.io import load_fsspec
|
||||
from TTS.vocoder.models.hifigan_generator import get_padding
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
||||
def get_padding(k, d):
|
||||
return int((k * d - d) / 2)
|
||||
|
||||
|
||||
class ResBlock1(torch.nn.Module):
|
||||
"""Residual Block Type 1. It has 3 convolutional layers in each convolutional block.
|
||||
|
||||
|
|
|
@ -19,10 +19,11 @@ from TTS.tts.utils.speakers import SpeakerManager
|
|||
from TTS.utils.io import load_fsspec
|
||||
from TTS.vc.configs.freevc_config import FreeVCConfig
|
||||
from TTS.vc.models.base_vc import BaseVC
|
||||
from TTS.vc.modules.freevc.commons import get_padding, init_weights
|
||||
from TTS.vc.modules.freevc.commons import init_weights
|
||||
from TTS.vc.modules.freevc.mel_processing import mel_spectrogram_torch
|
||||
from TTS.vc.modules.freevc.speaker_encoder.speaker_encoder import SpeakerEncoder as SpeakerEncoderEx
|
||||
from TTS.vc.modules.freevc.wavlm import get_wavlm
|
||||
from TTS.vocoder.models.hifigan_generator import get_padding
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -12,10 +12,6 @@ def init_weights(m: torch.nn.Module, mean: float = 0.0, std: float = 0.01) -> No
|
|||
m.weight.data.normal_(mean, std)
|
||||
|
||||
|
||||
def get_padding(kernel_size, dilation=1):
|
||||
return int((kernel_size * dilation - dilation) / 2)
|
||||
|
||||
|
||||
def intersperse(lst, item):
|
||||
result = [item] * (len(lst) * 2 + 1)
|
||||
result[1::2] = lst
|
||||
|
|
|
@ -7,7 +7,8 @@ from torch.nn.utils.parametrize import remove_parametrizations
|
|||
|
||||
import TTS.vc.modules.freevc.commons as commons
|
||||
from TTS.tts.layers.generic.normalization import LayerNorm2
|
||||
from TTS.vc.modules.freevc.commons import get_padding, init_weights
|
||||
from TTS.vc.modules.freevc.commons import init_weights
|
||||
from TTS.vocoder.models.hifigan_generator import get_padding
|
||||
|
||||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@ import torch
|
|||
from torch import nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
from TTS.vocoder.models.hifigan_generator import get_padding
|
||||
|
||||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
||||
|
@ -29,7 +31,6 @@ class DiscriminatorP(torch.nn.Module):
|
|||
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
|
||||
super().__init__()
|
||||
self.period = period
|
||||
get_padding = lambda k, d: int((k * d - d) / 2)
|
||||
norm_f = nn.utils.spectral_norm if use_spectral_norm else nn.utils.parametrizations.weight_norm
|
||||
self.convs = nn.ModuleList(
|
||||
[
|
||||
|
|
|
@ -15,8 +15,8 @@ logger = logging.getLogger(__name__)
|
|||
LRELU_SLOPE = 0.1
|
||||
|
||||
|
||||
def get_padding(k, d):
|
||||
return int((k * d - d) / 2)
|
||||
def get_padding(kernel_size: int, dilation: int = 1) -> int:
|
||||
return int((kernel_size * dilation - dilation) / 2)
|
||||
|
||||
|
||||
class ResBlock1(torch.nn.Module):
|
||||
|
|
Loading…
Reference in New Issue