mirror of https://github.com/coqui-ai/TTS.git
Data loader bug fix 2
This commit is contained in:
parent
3c084177c6
commit
1ff8d6d2b7
|
@ -125,6 +125,7 @@ class LJSpeechDataset(Dataset):
|
|||
mel = torch.FloatTensor(mel)
|
||||
mel_lengths = torch.LongTensor(mel_lengths)
|
||||
stop_targets = torch.FloatTensor(stop_targets)
|
||||
|
||||
return text, text_lenghts, linear, mel, mel_lengths, stop_targets, item_idxs[0]
|
||||
|
||||
raise TypeError(("batch must contain tensors, numbers, dicts or lists;\
|
||||
|
|
|
@ -256,9 +256,12 @@ class Decoder(nn.Module):
|
|||
greedy = not self.training
|
||||
|
||||
if memory is not None:
|
||||
print(memory.shape)
|
||||
|
||||
# Grouping multiple frames if necessary
|
||||
if memory.size(-1) == self.memory_dim:
|
||||
memory = memory.view(B, memory.size(1) // self.r, -1)
|
||||
print(memory.shape)
|
||||
assert memory.size(-1) == self.memory_dim * self.r,\
|
||||
" !! Dimension mismatch {} vs {} * {}".format(memory.size(-1),
|
||||
self.memory_dim, self.r)
|
||||
|
|
2
train.py
2
train.py
|
@ -82,7 +82,7 @@ def train(model, criterion, data_loader, optimizer, epoch):
|
|||
linear_input = data[2]
|
||||
mel_input = data[3]
|
||||
mel_lengths = data[4]
|
||||
|
||||
|
||||
current_step = num_iter + args.restore_step + epoch * len(data_loader) + 1
|
||||
|
||||
# setup lr
|
||||
|
|
|
@ -17,13 +17,13 @@ def prepare_data(inputs):
|
|||
def _pad_tensor(x, length):
|
||||
_pad = 0
|
||||
assert x.ndim == 2
|
||||
return np.pad(x, [[0, 0], [0, length - x.shape[1]]], mode='constant', constant_values=_pad)
|
||||
|
||||
x = np.pad(x, [[0, 0], [0, length - x.shape[1]]], mode='constant', constant_values=_pad)
|
||||
return x
|
||||
|
||||
def prepare_tensor(inputs, out_steps):
|
||||
max_len = max((x.shape[1] for x in inputs)) + 1 # zero-frame
|
||||
remainder = max_len % out_steps
|
||||
return np.stack([_pad_tensor(x, max_len + remainder) for x in inputs])
|
||||
return np.stack([_pad_tensor(x, max_len + (out_steps - remainder)) for x in inputs])
|
||||
|
||||
|
||||
def _pad_stop_target(x, length):
|
||||
|
@ -35,7 +35,7 @@ def _pad_stop_target(x, length):
|
|||
def prepare_stop_target(inputs, out_steps):
|
||||
max_len = max((x.shape[0] for x in inputs)) + 1 # zero-frame
|
||||
remainder = max_len % out_steps
|
||||
return np.stack([_pad_stop_target(x, max_len + remainder) for x in inputs])
|
||||
return np.stack([_pad_stop_target(x, max_len + (out_steps - remainder)) for x in inputs])
|
||||
|
||||
|
||||
def pad_per_step(inputs, pad_len):
|
||||
|
|
Loading…
Reference in New Issue