refactor(tortoise): remove unused do_checkpoint arguments

These are assigned but not used for anything.
This commit is contained in:
Enno Hermann 2024-11-21 12:40:12 +01:00
parent 4ba83f42ab
commit 705551c60c
4 changed files with 2 additions and 13 deletions

View File

@ -93,12 +93,10 @@ class AttentionBlock(nn.Module):
channels,
num_heads=1,
num_head_channels=-1,
do_checkpoint=True,
relative_pos_embeddings=False,
):
super().__init__()
self.channels = channels
self.do_checkpoint = do_checkpoint
if num_head_channels == -1:
self.num_heads = num_heads
else:

View File

@ -175,7 +175,6 @@ class ConditioningEncoder(nn.Module):
embedding_dim,
attn_blocks=6,
num_attn_heads=4,
do_checkpointing=False,
mean=False,
):
super().__init__()
@ -185,7 +184,6 @@ class ConditioningEncoder(nn.Module):
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim
self.do_checkpointing = do_checkpointing
self.mean = mean
def forward(self, x):

View File

@ -16,7 +16,6 @@ class ResBlock(nn.Module):
up=False,
down=False,
kernel_size=3,
do_checkpoint=True,
):
super().__init__()
self.channels = channels
@ -24,7 +23,6 @@ class ResBlock(nn.Module):
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_scale_shift_norm = use_scale_shift_norm
self.do_checkpoint = do_checkpoint
padding = 1 if kernel_size == 3 else 2
self.in_layers = nn.Sequential(
@ -92,14 +90,14 @@ class AudioMiniEncoder(nn.Module):
self.layers = depth
for l in range(depth):
for r in range(resnet_blocks):
res.append(ResBlock(ch, dropout, do_checkpoint=False, kernel_size=kernel_size))
res.append(ResBlock(ch, dropout, kernel_size=kernel_size))
res.append(Downsample(ch, use_conv=True, out_channels=ch * 2, factor=downsample_factor))
ch *= 2
self.res = nn.Sequential(*res)
self.final = nn.Sequential(normalization(ch), nn.SiLU(), nn.Conv1d(ch, embedding_dim, 1))
attn = []
for a in range(attn_blocks):
attn.append(AttentionBlock(embedding_dim, num_attn_heads, do_checkpoint=False))
attn.append(AttentionBlock(embedding_dim, num_attn_heads))
self.attn = nn.Sequential(*attn)
self.dim = embedding_dim

View File

@ -196,31 +196,26 @@ class DiffusionTts(nn.Module):
model_channels * 2,
num_heads,
relative_pos_embeddings=True,
do_checkpoint=False,
),
AttentionBlock(
model_channels * 2,
num_heads,
relative_pos_embeddings=True,
do_checkpoint=False,
),
AttentionBlock(
model_channels * 2,
num_heads,
relative_pos_embeddings=True,
do_checkpoint=False,
),
AttentionBlock(
model_channels * 2,
num_heads,
relative_pos_embeddings=True,
do_checkpoint=False,
),
AttentionBlock(
model_channels * 2,
num_heads,
relative_pos_embeddings=True,
do_checkpoint=False,
),
)
self.unconditioned_embedding = nn.Parameter(torch.randn(1, model_channels, 1))