mirror of https://github.com/coqui-ai/TTS.git
Lr update
This commit is contained in:
parent
7304978be3
commit
235ce071c6
Binary file not shown.
|
@ -52,6 +52,8 @@ class LJSpeechDataset(Dataset):
|
|||
keys = list()
|
||||
|
||||
text = [d['text'] for d in batch]
|
||||
text_lenghts = [len(x) for x in text]
|
||||
max_text_len = np.max(text_lengths)
|
||||
wav = [d['wav'] for d in batch]
|
||||
|
||||
# PAD sequences with largest length of the batch
|
||||
|
|
4
train.py
4
train.py
|
@ -97,7 +97,7 @@ def main(args):
|
|||
else:
|
||||
criterion = nn.L1Loss()
|
||||
|
||||
n_priority_freq = int(3000 / (c.sample_rate * 0.5) * c.num_freq)
|
||||
n_priority_freq = int(3000 / (c.sample_rate * 0.5) * c.num_freq)
|
||||
|
||||
#lr_scheduler = ReduceLROnPlateau(optimizer, factor=c.lr_decay,
|
||||
# patience=c.lr_patience, verbose=True)
|
||||
|
@ -121,7 +121,7 @@ n_priority_freq = int(3000 / (c.sample_rate * 0.5) * c.num_freq)
|
|||
current_step = i + args.restore_step + epoch * len(dataloader) + 1
|
||||
|
||||
# setup lr
|
||||
current_lr = lr_decay(init_lr, current_step)
|
||||
current_lr = lr_decay(c.lr, current_step)
|
||||
for params_group in optimizer.param_groups:
|
||||
param_group['lr'] = current_lr
|
||||
|
||||
|
|
Loading…
Reference in New Issue