mirror of https://github.com/coqui-ai/TTS.git
Update synthesis.py
This commit is contained in:
parent
35fc7270ff
commit
d0c27a9661
|
@ -193,14 +193,15 @@ def synthesis(
|
||||||
# convert outputs to numpy
|
# convert outputs to numpy
|
||||||
# plot results
|
# plot results
|
||||||
wav = None
|
wav = None
|
||||||
if hasattr(model, "END2END") and model.END2END:
|
model_outputs = model_outputs.squeeze()
|
||||||
wav = model_outputs.squeeze(0)
|
if model_outputs.ndim == 2: # [T, C_spec]
|
||||||
else:
|
|
||||||
if use_griffin_lim:
|
if use_griffin_lim:
|
||||||
wav = inv_spectrogram(model_outputs, model.ap, CONFIG)
|
wav = inv_spectrogram(model_outputs, model.ap, CONFIG)
|
||||||
# trim silence
|
# trim silence
|
||||||
if do_trim_silence:
|
if do_trim_silence:
|
||||||
wav = trim_silence(wav, model.ap)
|
wav = trim_silence(wav, model.ap)
|
||||||
|
else: # [T,]
|
||||||
|
wav = model_outputs
|
||||||
return_dict = {
|
return_dict = {
|
||||||
"wav": wav,
|
"wav": wav,
|
||||||
"alignments": alignments,
|
"alignments": alignments,
|
||||||
|
|
|
@ -42,31 +42,3 @@ def gradual_training_scheduler(global_step, config):
|
||||||
if global_step * num_gpus >= values[0]:
|
if global_step * num_gpus >= values[0]:
|
||||||
new_values = values
|
new_values = values
|
||||||
return new_values[1], new_values[2]
|
return new_values[1], new_values[2]
|
||||||
|
|
||||||
|
|
||||||
def lr_decay(init_lr, global_step, warmup_steps):
|
|
||||||
r"""from https://github.com/r9y9/tacotron_pytorch/blob/master/train.py
|
|
||||||
It is only being used by the Speaker Encoder trainer."""
|
|
||||||
warmup_steps = float(warmup_steps)
|
|
||||||
step = global_step + 1.0
|
|
||||||
lr = init_lr * warmup_steps**0.5 * np.minimum(step * warmup_steps**-1.5, step**-0.5)
|
|
||||||
return lr
|
|
||||||
|
|
||||||
|
|
||||||
# pylint: disable=dangerous-default-value
|
|
||||||
def set_weight_decay(model, weight_decay, skip_list={"decoder.attention.v", "rnn", "lstm", "gru", "embedding"}):
|
|
||||||
"""
|
|
||||||
Skip biases, BatchNorm parameters, rnns.
|
|
||||||
and attention projection layer v
|
|
||||||
"""
|
|
||||||
decay = []
|
|
||||||
no_decay = []
|
|
||||||
for name, param in model.named_parameters():
|
|
||||||
if not param.requires_grad:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if len(param.shape) == 1 or any((skip_name in name for skip_name in skip_list)):
|
|
||||||
no_decay.append(param)
|
|
||||||
else:
|
|
||||||
decay.append(param)
|
|
||||||
return [{"params": no_decay, "weight_decay": 0.0}, {"params": decay, "weight_decay": weight_decay}]
|
|
||||||
|
|
Loading…
Reference in New Issue