mirror of https://github.com/coqui-ai/TTS.git
add start char but remove end char
This commit is contained in:
parent
d6307fbb7f
commit
f96945443e
|
@ -31,6 +31,7 @@ def synthesis(model, text, CONFIG, use_cuda, ap, truncated=False):
|
||||||
chars_var = torch.from_numpy(seq).unsqueeze(0)
|
chars_var = torch.from_numpy(seq).unsqueeze(0)
|
||||||
if use_cuda:
|
if use_cuda:
|
||||||
chars_var = chars_var.cuda()
|
chars_var = chars_var.cuda()
|
||||||
|
# chars_var = chars_var[:-1]
|
||||||
if truncated:
|
if truncated:
|
||||||
decoder_output, postnet_output, alignments, stop_tokens = model.inference_truncated(
|
decoder_output, postnet_output, alignments, stop_tokens = model.inference_truncated(
|
||||||
chars_var.long())
|
chars_var.long())
|
||||||
|
|
|
@ -44,8 +44,7 @@ def phoneme_to_sequence(text, cleaner_names, language):
|
||||||
'''
|
'''
|
||||||
TODO: This ignores punctuations
|
TODO: This ignores punctuations
|
||||||
'''
|
'''
|
||||||
# sequence = [_phonemes_to_id['^']]
|
sequence = [_phonemes_to_id['^']]
|
||||||
sequence = []
|
|
||||||
clean_text = _clean_text(text, cleaner_names)
|
clean_text = _clean_text(text, cleaner_names)
|
||||||
phonemes = text2phone(clean_text, language)
|
phonemes = text2phone(clean_text, language)
|
||||||
# print(phonemes.replace('|', ''))
|
# print(phonemes.replace('|', ''))
|
||||||
|
@ -54,7 +53,7 @@ def phoneme_to_sequence(text, cleaner_names, language):
|
||||||
for phoneme in phonemes.split('|'):
|
for phoneme in phonemes.split('|'):
|
||||||
sequence += _phoneme_to_sequence(phoneme)
|
sequence += _phoneme_to_sequence(phoneme)
|
||||||
# Append EOS char
|
# Append EOS char
|
||||||
sequence.append(_phonemes_to_id['~'])
|
# sequence.append(_phonemes_to_id['~'])
|
||||||
return sequence
|
return sequence
|
||||||
|
|
||||||
|
|
||||||
|
@ -82,7 +81,7 @@ def text_to_sequence(text, cleaner_names):
|
||||||
List of integers corresponding to the symbols in the text
|
List of integers corresponding to the symbols in the text
|
||||||
'''
|
'''
|
||||||
sequence = []
|
sequence = []
|
||||||
# sequence = [_phonemes_to_id['^']]
|
sequence = [_phonemes_to_id['^']]
|
||||||
# Check for curly braces and treat their contents as ARPAbet:
|
# Check for curly braces and treat their contents as ARPAbet:
|
||||||
while len(text):
|
while len(text):
|
||||||
m = _curly_re.match(text)
|
m = _curly_re.match(text)
|
||||||
|
@ -95,7 +94,7 @@ def text_to_sequence(text, cleaner_names):
|
||||||
text = m.group(3)
|
text = m.group(3)
|
||||||
|
|
||||||
# Append EOS token
|
# Append EOS token
|
||||||
sequence.append(_symbol_to_id['~'])
|
# sequence.append(_symbol_to_id['~'])
|
||||||
return sequence
|
return sequence
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue