From 66f8d0e26063cb28be3497808cc3a494df0160eb Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Tue, 22 Jan 2019 18:18:21 +0100 Subject: [PATCH] Attention biased chaged --- layers/attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/layers/attention.py b/layers/attention.py index 93d0d5a1..cb2b0f80 100644 --- a/layers/attention.py +++ b/layers/attention.py @@ -52,9 +52,9 @@ class LocationSensitiveAttention(nn.Module): stride=1, padding=0, bias=False)) - self.loc_linear = nn.Linear(filters, attn_dim, bias=False) - self.query_layer = nn.Linear(query_dim, attn_dim, bias=False) - self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=False) + self.loc_linear = nn.Linear(filters, attn_dim, bias=True) + self.query_layer = nn.Linear(query_dim, attn_dim, bias=True) + self.annot_layer = nn.Linear(annot_dim, attn_dim, bias=True) self.v = nn.Linear(attn_dim, 1, bias=True) self.processed_annots = None # self.init_layers()