mirror of https://github.com/coqui-ai/TTS.git
chore(freevc): remove duplicate DDSConv and ElementwiseAffine
Already exist as: TTS.tts.layers.vits.stochastic_duration_predictor.DilatedDepthSeparableConv TTS.tts.layers.vits.stochastic_duration_predictor.ElementwiseAffine
This commit is contained in:
parent
9f80e043e4
commit
d65bcf65bb
|
@ -45,48 +45,6 @@ class ConvReluNorm(nn.Module):
|
||||||
return x * x_mask
|
return x * x_mask
|
||||||
|
|
||||||
|
|
||||||
class DDSConv(nn.Module):
|
|
||||||
"""
|
|
||||||
Dialted and Depth-Separable Convolution
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
|
|
||||||
super().__init__()
|
|
||||||
self.channels = channels
|
|
||||||
self.kernel_size = kernel_size
|
|
||||||
self.n_layers = n_layers
|
|
||||||
self.p_dropout = p_dropout
|
|
||||||
|
|
||||||
self.drop = nn.Dropout(p_dropout)
|
|
||||||
self.convs_sep = nn.ModuleList()
|
|
||||||
self.convs_1x1 = nn.ModuleList()
|
|
||||||
self.norms_1 = nn.ModuleList()
|
|
||||||
self.norms_2 = nn.ModuleList()
|
|
||||||
for i in range(n_layers):
|
|
||||||
dilation = kernel_size**i
|
|
||||||
padding = (kernel_size * dilation - dilation) // 2
|
|
||||||
self.convs_sep.append(
|
|
||||||
nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding)
|
|
||||||
)
|
|
||||||
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
|
|
||||||
self.norms_1.append(LayerNorm2(channels))
|
|
||||||
self.norms_2.append(LayerNorm2(channels))
|
|
||||||
|
|
||||||
def forward(self, x, x_mask, g=None):
|
|
||||||
if g is not None:
|
|
||||||
x = x + g
|
|
||||||
for i in range(self.n_layers):
|
|
||||||
y = self.convs_sep[i](x * x_mask)
|
|
||||||
y = self.norms_1[i](y)
|
|
||||||
y = F.gelu(y)
|
|
||||||
y = self.convs_1x1[i](y)
|
|
||||||
y = self.norms_2[i](y)
|
|
||||||
y = F.gelu(y)
|
|
||||||
y = self.drop(y)
|
|
||||||
x = x + y
|
|
||||||
return x * x_mask
|
|
||||||
|
|
||||||
|
|
||||||
class WN(torch.nn.Module):
|
class WN(torch.nn.Module):
|
||||||
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
|
||||||
super(WN, self).__init__()
|
super(WN, self).__init__()
|
||||||
|
@ -303,24 +261,6 @@ class Flip(nn.Module):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
class ElementwiseAffine(nn.Module):
|
|
||||||
def __init__(self, channels):
|
|
||||||
super().__init__()
|
|
||||||
self.channels = channels
|
|
||||||
self.m = nn.Parameter(torch.zeros(channels, 1))
|
|
||||||
self.logs = nn.Parameter(torch.zeros(channels, 1))
|
|
||||||
|
|
||||||
def forward(self, x, x_mask, reverse=False, **kwargs):
|
|
||||||
if not reverse:
|
|
||||||
y = self.m + torch.exp(self.logs) * x
|
|
||||||
y = y * x_mask
|
|
||||||
logdet = torch.sum(self.logs * x_mask, [1, 2])
|
|
||||||
return y, logdet
|
|
||||||
else:
|
|
||||||
x = (x - self.m) * torch.exp(-self.logs) * x_mask
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
class ResidualCouplingLayer(nn.Module):
|
class ResidualCouplingLayer(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
Loading…
Reference in New Issue