From 5ffc0543b76858ee5a25fc60c3de9d0369e43dd5 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Thu, 21 Nov 2024 13:06:20 +0100 Subject: [PATCH] refactor(bark): remove custom layer norm Pytorch LayerNorm supports bias=False since version 2.1 --- TTS/tts/layers/bark/model.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/TTS/tts/layers/bark/model.py b/TTS/tts/layers/bark/model.py index 68c50dbd..54a9cece 100644 --- a/TTS/tts/layers/bark/model.py +++ b/TTS/tts/layers/bark/model.py @@ -12,18 +12,6 @@ from torch import nn from torch.nn import functional as F -class LayerNorm(nn.Module): - """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False""" - - def __init__(self, ndim, bias): - super().__init__() - self.weight = nn.Parameter(torch.ones(ndim)) - self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None - - def forward(self, x): - return F.layer_norm(x, self.weight.shape, self.weight, self.bias, 1e-5) - - class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() @@ -119,9 +107,9 @@ class MLP(nn.Module): class Block(nn.Module): def __init__(self, config, layer_idx): super().__init__() - self.ln_1 = LayerNorm(config.n_embd, bias=config.bias) + self.ln_1 = nn.LayerNorm(config.n_embd, bias=config.bias) self.attn = CausalSelfAttention(config) - self.ln_2 = LayerNorm(config.n_embd, bias=config.bias) + self.ln_2 = nn.LayerNorm(config.n_embd, bias=config.bias) self.mlp = MLP(config) self.layer_idx = layer_idx @@ -158,7 +146,7 @@ class GPT(nn.Module): wpe=nn.Embedding(config.block_size, config.n_embd), drop=nn.Dropout(config.dropout), h=nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]), - ln_f=LayerNorm(config.n_embd, bias=config.bias), + ln_f=nn.LayerNorm(config.n_embd, bias=config.bias), ) ) self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)