mirror of https://github.com/coqui-ai/TTS.git
293 KiB
293 KiB
None
<html lang="en">
<head>
</head>
</html>
Jupyter Notbook for phoneme coverage analysis¶
This jupyter notebook checks dataset configured in config.json for phoneme coverage. As mentioned here https://github.com/mozilla/TTS/wiki/Dataset#what-makes-a-good-dataset a good phoneme coverage is recommended.
Most parameters will be taken from config.json file in mozilla tts repo so please ensure it's configured correctly for your dataset. This notebook used lots of existring code from the TTS repo to ensure future compatibility.
This analysis logic is based on feedback and code snipplets from the amazing Neil Stoker (https://discourse.mozilla.org/u/nmstoker/summary), so all honor to him.
I provide this notebook without any warrenty but it's hopefully useful for your dataset analysis.
Happy TTS'ing :-)
Thorsten Müller
In [1]:
# set some vars TTS_PATH = "/home/thorsten/___dev/tts/thorstenMueller" CONFIG_FILE = "/home/thorsten/___dev/tts/thorstenMueller/TTS/config.json" CHARS_TO_REMOVE = ".,:!?'"
In [2]:
cd $TTS_PATH
/home/thorsten/___dev/tts/thorstenMueller
In [3]:
# import stuff from TTS.utils.text import * from TTS.datasets.preprocess import load_meta_data from TTS.utils.io import load_config from tqdm import tqdm from matplotlib import pylab as plt # extra imports not included in requirements.txt import collections import operator
In [4]:
# Load config.json properties CONFIG = load_config(CONFIG_FILE) # Load some properties from config.json CONFIG_METADATA = load_meta_data(CONFIG.datasets)[0] CONFIG_DATASET = CONFIG.datasets[0] CONFIG_PHONEME_LANGUAGE = CONFIG.phoneme_language CONFIG_TEXT_CLEANER = CONFIG.text_cleaner CONFIG_ENABLE_EOS_BOS_CHARS = CONFIG.enable_eos_bos_chars # Will be printed on generated output graph CONFIG_RUN_NAME = CONFIG.run_name CONFIG_RUN_DESC = CONFIG.run_description
In [5]:
# print some debug information on loaded config values print(" > Run name: " + CONFIG_RUN_NAME + " (" + CONFIG_RUN_DESC + ")") print(" > Dataset files: " + str(len(CONFIG_METADATA))) print(" > Phoneme language: " + CONFIG_PHONEME_LANGUAGE) print(" > Used text cleaner: " + CONFIG_TEXT_CLEANER) print(" > Enable eos bos chars: " + str(CONFIG_ENABLE_EOS_BOS_CHARS))
> Run name: thorsten-de (github.com/thorstenMueller/deep-learning-german-tts) > Dataset files: 110 > Phoneme language: de > Used text cleaner: phoneme_cleaners > Enable eos bos chars: False
In [6]:
# Get phonemes from metadata phonemes = [] for phrase in tqdm(CONFIG_METADATA): if len(phrase[0]) > 0: tmpPhrase = phrase[0].rstrip('\n') for removeChar in CHARS_TO_REMOVE: tmpPhrase = tmpPhrase.replace(removeChar,"") seq = phoneme_to_sequence(tmpPhrase, [CONFIG_TEXT_CLEANER], CONFIG_PHONEME_LANGUAGE, CONFIG_ENABLE_EOS_BOS_CHARS) text = sequence_to_phoneme(seq) text = text.replace(" ","") phonemes.append(text)
75%|███████▌ | 83/110 [00:05<00:01, 15.05it/s][WARNING] fount 1 utterances containing language switches on lines 1 [WARNING] extra phones may appear in the "de" phoneset [WARNING] language switch flags have been kept (applying "keep-flags" policy) 100%|██████████| 110/110 [00:07<00:00, 14.99it/s]
In [7]:
s = "" phonemeString = s.join(phonemes) d = {} collections._count_elements(d, phonemeString) sorted_d = dict(sorted(d.items(), key=operator.itemgetter(1),reverse=True)) # remove useless keys sorted_d.pop(' ', None) sorted_d.pop('ˈ', None) phonemesSum = len(phonemeString.replace(" ","")) print("Dataset contains " + str(len(sorted_d)) + " different ipa phonemes.") print("Dataset consists of " + str(phonemesSum) + " phonemes")
Dataset contains 39 different ipa phonemes. Dataset consists of 2620 phonemes
In [8]:
print("5 rarest phonemes") rareList = dict(sorted(sorted_d.items(), key=operator.itemgetter(1), reverse=False)[:5]) for key, value in rareList.items(): print(key + " --> " + str(value) + " occurrences")
5 rarest phonemes y --> 1 occurrences ø --> 2 occurrences ( --> 2 occurrences ) --> 2 occurrences j --> 4 occurrences
In [9]:
# create plot from analysis result x = [] y = [] for key, value in sorted_d.items(): x.append(key) y.append(value) plt.figure(figsize=(50,50)) plt.title("Phoneme coverage for " + CONFIG_RUN_NAME + " (" + CONFIG_RUN_DESC + ")", fontsize=50) plt.xticks(fontsize=50) plt.yticks(fontsize=50) plt.barh(x,y, align='center', alpha=1.0) plt.gca().invert_yaxis() plt.ylabel('phoneme', fontsize=50) plt.xlabel('occurrences', fontsize=50) for i, v in enumerate(y): plt.text(v + 2, i - .2, str(v), fontsize=20) plt.text(v + 2, i + .2, "(" + str(round(100/phonemesSum * v,2)) + "%)", fontsize=20) plt.show()
In [ ]: