mirror of https://github.com/coqui-ai/TTS.git
Attention convolution padding correction for TF "SAME"
This commit is contained in:
parent
c52d3f16f9
commit
fd830c6416
|
@ -42,7 +42,7 @@ class LocationSensitiveAttention(nn.Module):
|
|||
super(LocationSensitiveAttention, self).__init__()
|
||||
self.kernel_size = kernel_size
|
||||
self.filters = filters
|
||||
padding = int((kernel_size - 1) / 2)
|
||||
padding = [(kernel_size - 1) // 2, (kernel_size - 1) // 2]
|
||||
self.loc_conv = nn.Conv1d(
|
||||
2,
|
||||
filters,
|
||||
|
|
|
@ -136,7 +136,7 @@ class CBHG(nn.Module):
|
|||
padding=[(k - 1) // 2, k // 2],
|
||||
activation=self.relu) for k in range(1, K + 1)
|
||||
])
|
||||
# max pooling of conv bank, padding with nn.functional
|
||||
# max pooling of conv bank, with padding
|
||||
# TODO: try average pooling OR larger kernel size
|
||||
self.max_pool1d = nn.Sequential(
|
||||
nn.ConstantPad1d([0, 1], value=0),
|
||||
|
|
Loading…
Reference in New Issue