mirror of https://github.com/coqui-ai/TTS.git
refactor(bark): remove custom layer norm
Pytorch LayerNorm supports bias=False since version 2.1
This commit is contained in:
parent
705551c60c
commit
5ffc0543b7
|
@ -12,18 +12,6 @@ from torch import nn
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
|
|
||||||
|
|
||||||
class LayerNorm(nn.Module):
|
|
||||||
"""LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False"""
|
|
||||||
|
|
||||||
def __init__(self, ndim, bias):
|
|
||||||
super().__init__()
|
|
||||||
self.weight = nn.Parameter(torch.ones(ndim))
|
|
||||||
self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return F.layer_norm(x, self.weight.shape, self.weight, self.bias, 1e-5)
|
|
||||||
|
|
||||||
|
|
||||||
class CausalSelfAttention(nn.Module):
|
class CausalSelfAttention(nn.Module):
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -119,9 +107,9 @@ class MLP(nn.Module):
|
||||||
class Block(nn.Module):
|
class Block(nn.Module):
|
||||||
def __init__(self, config, layer_idx):
|
def __init__(self, config, layer_idx):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
|
self.ln_1 = nn.LayerNorm(config.n_embd, bias=config.bias)
|
||||||
self.attn = CausalSelfAttention(config)
|
self.attn = CausalSelfAttention(config)
|
||||||
self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
|
self.ln_2 = nn.LayerNorm(config.n_embd, bias=config.bias)
|
||||||
self.mlp = MLP(config)
|
self.mlp = MLP(config)
|
||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
|
|
||||||
|
@ -158,7 +146,7 @@ class GPT(nn.Module):
|
||||||
wpe=nn.Embedding(config.block_size, config.n_embd),
|
wpe=nn.Embedding(config.block_size, config.n_embd),
|
||||||
drop=nn.Dropout(config.dropout),
|
drop=nn.Dropout(config.dropout),
|
||||||
h=nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
|
h=nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),
|
||||||
ln_f=LayerNorm(config.n_embd, bias=config.bias),
|
ln_f=nn.LayerNorm(config.n_embd, bias=config.bias),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
|
self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)
|
||||||
|
|
Loading…
Reference in New Issue