mirror of https://github.com/coqui-ai/TTS.git
Merge pull request #44 from idiap/phoneme-cleaners
Add multilingual phoneme cleaner
This commit is contained in:
commit
bd9b21d946
|
@ -3,6 +3,7 @@
|
||||||
# TODO: pick the cleaner for languages dynamically
|
# TODO: pick the cleaner for languages dynamically
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
from anyascii import anyascii
|
from anyascii import anyascii
|
||||||
|
|
||||||
|
@ -17,35 +18,38 @@ from .french.abbreviations import abbreviations_fr
|
||||||
_whitespace_re = re.compile(r"\s+")
|
_whitespace_re = re.compile(r"\s+")
|
||||||
|
|
||||||
|
|
||||||
def expand_abbreviations(text, lang="en"):
|
def expand_abbreviations(text: str, lang: str = "en") -> str:
|
||||||
if lang == "en":
|
if lang == "en":
|
||||||
_abbreviations = abbreviations_en
|
_abbreviations = abbreviations_en
|
||||||
elif lang == "fr":
|
elif lang == "fr":
|
||||||
_abbreviations = abbreviations_fr
|
_abbreviations = abbreviations_fr
|
||||||
|
else:
|
||||||
|
msg = f"Language {lang} not supported in expand_abbreviations"
|
||||||
|
raise ValueError(msg)
|
||||||
for regex, replacement in _abbreviations:
|
for regex, replacement in _abbreviations:
|
||||||
text = re.sub(regex, replacement, text)
|
text = re.sub(regex, replacement, text)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def lowercase(text):
|
def lowercase(text: str) -> str:
|
||||||
return text.lower()
|
return text.lower()
|
||||||
|
|
||||||
|
|
||||||
def collapse_whitespace(text):
|
def collapse_whitespace(text: str) -> str:
|
||||||
return re.sub(_whitespace_re, " ", text).strip()
|
return re.sub(_whitespace_re, " ", text).strip()
|
||||||
|
|
||||||
|
|
||||||
def convert_to_ascii(text):
|
def convert_to_ascii(text: str) -> str:
|
||||||
return anyascii(text)
|
return anyascii(text)
|
||||||
|
|
||||||
|
|
||||||
def remove_aux_symbols(text):
|
def remove_aux_symbols(text: str) -> str:
|
||||||
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
|
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def replace_symbols(text, lang="en"):
|
def replace_symbols(text: str, lang: Optional[str] = "en") -> str:
|
||||||
"""Replace symbols based on the lenguage tag.
|
"""Replace symbols based on the language tag.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
text:
|
text:
|
||||||
|
@ -77,14 +81,14 @@ def replace_symbols(text, lang="en"):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def basic_cleaners(text):
|
def basic_cleaners(text: str) -> str:
|
||||||
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
|
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
text = collapse_whitespace(text)
|
text = collapse_whitespace(text)
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def transliteration_cleaners(text):
|
def transliteration_cleaners(text: str) -> str:
|
||||||
"""Pipeline for non-English text that transliterates to ASCII."""
|
"""Pipeline for non-English text that transliterates to ASCII."""
|
||||||
# text = convert_to_ascii(text)
|
# text = convert_to_ascii(text)
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
|
@ -92,7 +96,7 @@ def transliteration_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def basic_german_cleaners(text):
|
def basic_german_cleaners(text: str) -> str:
|
||||||
"""Pipeline for German text"""
|
"""Pipeline for German text"""
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
text = collapse_whitespace(text)
|
text = collapse_whitespace(text)
|
||||||
|
@ -100,7 +104,7 @@ def basic_german_cleaners(text):
|
||||||
|
|
||||||
|
|
||||||
# TODO: elaborate it
|
# TODO: elaborate it
|
||||||
def basic_turkish_cleaners(text):
|
def basic_turkish_cleaners(text: str) -> str:
|
||||||
"""Pipeline for Turkish text"""
|
"""Pipeline for Turkish text"""
|
||||||
text = text.replace("I", "ı")
|
text = text.replace("I", "ı")
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
|
@ -108,7 +112,7 @@ def basic_turkish_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def english_cleaners(text):
|
def english_cleaners(text: str) -> str:
|
||||||
"""Pipeline for English text, including number and abbreviation expansion."""
|
"""Pipeline for English text, including number and abbreviation expansion."""
|
||||||
# text = convert_to_ascii(text)
|
# text = convert_to_ascii(text)
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
|
@ -121,8 +125,12 @@ def english_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def phoneme_cleaners(text):
|
def phoneme_cleaners(text: str) -> str:
|
||||||
"""Pipeline for phonemes mode, including number and abbreviation expansion."""
|
"""Pipeline for phonemes mode, including number and abbreviation expansion.
|
||||||
|
|
||||||
|
NB: This cleaner converts numbers into English words, for other languages
|
||||||
|
use multilingual_phoneme_cleaners().
|
||||||
|
"""
|
||||||
text = en_normalize_numbers(text)
|
text = en_normalize_numbers(text)
|
||||||
text = expand_abbreviations(text)
|
text = expand_abbreviations(text)
|
||||||
text = replace_symbols(text)
|
text = replace_symbols(text)
|
||||||
|
@ -131,7 +139,15 @@ def phoneme_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def french_cleaners(text):
|
def multilingual_phoneme_cleaners(text: str) -> str:
|
||||||
|
"""Pipeline for phonemes mode, including number and abbreviation expansion."""
|
||||||
|
text = replace_symbols(text, lang=None)
|
||||||
|
text = remove_aux_symbols(text)
|
||||||
|
text = collapse_whitespace(text)
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def french_cleaners(text: str) -> str:
|
||||||
"""Pipeline for French text. There is no need to expand numbers, phonemizer already does that"""
|
"""Pipeline for French text. There is no need to expand numbers, phonemizer already does that"""
|
||||||
text = expand_abbreviations(text, lang="fr")
|
text = expand_abbreviations(text, lang="fr")
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
|
@ -141,7 +157,7 @@ def french_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def portuguese_cleaners(text):
|
def portuguese_cleaners(text: str) -> str:
|
||||||
"""Basic pipeline for Portuguese text. There is no need to expand abbreviation and
|
"""Basic pipeline for Portuguese text. There is no need to expand abbreviation and
|
||||||
numbers, phonemizer already does that"""
|
numbers, phonemizer already does that"""
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
|
@ -157,7 +173,7 @@ def chinese_mandarin_cleaners(text: str) -> str:
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def multilingual_cleaners(text):
|
def multilingual_cleaners(text: str) -> str:
|
||||||
"""Pipeline for multilingual text"""
|
"""Pipeline for multilingual text"""
|
||||||
text = lowercase(text)
|
text = lowercase(text)
|
||||||
text = replace_symbols(text, lang=None)
|
text = replace_symbols(text, lang=None)
|
||||||
|
@ -166,7 +182,7 @@ def multilingual_cleaners(text):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def no_cleaners(text):
|
def no_cleaners(text: str) -> str:
|
||||||
# remove newline characters
|
# remove newline characters
|
||||||
text = text.replace("\n", "")
|
text = text.replace("\n", "")
|
||||||
return text
|
return text
|
||||||
|
|
|
@ -30,7 +30,7 @@ config = AlignTTSConfig(
|
||||||
run_eval=True,
|
run_eval=True,
|
||||||
test_delay_epochs=-1,
|
test_delay_epochs=-1,
|
||||||
epochs=1000,
|
epochs=1000,
|
||||||
text_cleaner="phoneme_cleaners",
|
text_cleaner="multilingual_phoneme_cleaners",
|
||||||
use_phonemes=False,
|
use_phonemes=False,
|
||||||
phoneme_language="de",
|
phoneme_language="de",
|
||||||
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
||||||
|
|
|
@ -40,7 +40,7 @@ config = GlowTTSConfig(
|
||||||
run_eval=True,
|
run_eval=True,
|
||||||
test_delay_epochs=-1,
|
test_delay_epochs=-1,
|
||||||
epochs=1000,
|
epochs=1000,
|
||||||
text_cleaner="phoneme_cleaners",
|
text_cleaner="multilingual_phoneme_cleaners",
|
||||||
use_phonemes=True,
|
use_phonemes=True,
|
||||||
phoneme_language="de",
|
phoneme_language="de",
|
||||||
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
||||||
|
|
|
@ -45,7 +45,7 @@ config = SpeedySpeechConfig(
|
||||||
test_delay_epochs=-1,
|
test_delay_epochs=-1,
|
||||||
epochs=1000,
|
epochs=1000,
|
||||||
min_audio_len=11050, # need to up min_audio_len to avois speedy speech error
|
min_audio_len=11050, # need to up min_audio_len to avois speedy speech error
|
||||||
text_cleaner="phoneme_cleaners",
|
text_cleaner="multilingual_phoneme_cleaners",
|
||||||
use_phonemes=True,
|
use_phonemes=True,
|
||||||
phoneme_language="de",
|
phoneme_language="de",
|
||||||
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
||||||
|
|
|
@ -49,7 +49,7 @@ config = Tacotron2Config( # This is the config that is saved for the future use
|
||||||
gradual_training=[[0, 6, 64], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]],
|
gradual_training=[[0, 6, 64], [10000, 4, 32], [50000, 3, 32], [100000, 2, 32]],
|
||||||
double_decoder_consistency=True,
|
double_decoder_consistency=True,
|
||||||
epochs=1000,
|
epochs=1000,
|
||||||
text_cleaner="phoneme_cleaners",
|
text_cleaner="multilingual_phoneme_cleaners",
|
||||||
use_phonemes=True,
|
use_phonemes=True,
|
||||||
phoneme_language="de",
|
phoneme_language="de",
|
||||||
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
||||||
|
|
|
@ -40,7 +40,7 @@ config = VitsConfig(
|
||||||
run_eval=True,
|
run_eval=True,
|
||||||
test_delay_epochs=-1,
|
test_delay_epochs=-1,
|
||||||
epochs=1000,
|
epochs=1000,
|
||||||
text_cleaner="phoneme_cleaners",
|
text_cleaner="multilingual_phoneme_cleaners",
|
||||||
use_phonemes=True,
|
use_phonemes=True,
|
||||||
phoneme_language="de",
|
phoneme_language="de",
|
||||||
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
phoneme_cache_path=os.path.join(output_path, "phoneme_cache"),
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
from TTS.tts.utils.text.cleaners import english_cleaners, phoneme_cleaners
|
from TTS.tts.utils.text.cleaners import english_cleaners, multilingual_phoneme_cleaners, phoneme_cleaners
|
||||||
|
|
||||||
|
|
||||||
def test_time() -> None:
|
def test_time() -> None:
|
||||||
|
@ -19,3 +19,8 @@ def test_currency() -> None:
|
||||||
def test_expand_numbers() -> None:
|
def test_expand_numbers() -> None:
|
||||||
assert phoneme_cleaners("-1") == "minus one"
|
assert phoneme_cleaners("-1") == "minus one"
|
||||||
assert phoneme_cleaners("1") == "one"
|
assert phoneme_cleaners("1") == "one"
|
||||||
|
|
||||||
|
|
||||||
|
def test_multilingual_phoneme_cleaners() -> None:
|
||||||
|
assert multilingual_phoneme_cleaners("(Hello)") == "Hello"
|
||||||
|
assert multilingual_phoneme_cleaners("1:") == "1,"
|
||||||
|
|
Loading…
Reference in New Issue