Attention convolution padding correction for TF "SAME"

This commit is contained in:
Eren 2018-09-19 14:16:21 +02:00
parent c52d3f16f9
commit fd830c6416
2 changed files with 2 additions and 2 deletions

View File

@ -42,7 +42,7 @@ class LocationSensitiveAttention(nn.Module):
super(LocationSensitiveAttention, self).__init__()
self.kernel_size = kernel_size
self.filters = filters
padding = int((kernel_size - 1) / 2)
padding = [(kernel_size - 1) // 2, (kernel_size - 1) // 2]
self.loc_conv = nn.Conv1d(
2,
filters,

View File

@ -136,7 +136,7 @@ class CBHG(nn.Module):
padding=[(k - 1) // 2, k // 2],
activation=self.relu) for k in range(1, K + 1)
])
# max pooling of conv bank, padding with nn.functional
# max pooling of conv bank, with padding
# TODO: try average pooling OR larger kernel size
self.max_pool1d = nn.Sequential(
nn.ConstantPad1d([0, 1], value=0),