coqui-tts/notebooks/dataset_analysis/CheckSpectrograms.ipynb

8.8 KiB

None <html lang="en"> <head> </head>
In [ ]:
%matplotlib inline

from TTS.utils.audio import AudioProcessor
from TTS.tts.utils.visual import plot_spectrogram
from TTS.config import load_config

import IPython.display as ipd
import glob
In [ ]:
from TTS.config.shared_configs import BaseAudioConfig
CONFIG = BaseAudioConfig()

✍️ Set these values

In [ ]:
data_path = "/root/wav48_silence_trimmed/"
file_ext = ".flac"

Read audio files

In [ ]:
file_paths = glob.glob(data_path + f"/**/*{file_ext}", recursive=True)

# Change this to the index of the desired file listed below
sample_file_index = 10

SAMPLE_FILE_PATH = file_paths[sample_file_index]

print("File list, by index:")
dict(enumerate(file_paths))

✍️ Set Audio Processor

Play with the AP parameters until you find a good fit with the synthesis speech below.

The default values are loaded from your config.json file, so you only need to uncomment and modify values below that you'd like to tune.

In [ ]:
tune_params={
 'num_mels': 80,          # In general, you don't need to change this. 
 'fft_size': 2400,        # In general, you don't need to change this.
 'frame_length_ms': 50, 
 'frame_shift_ms': 12.5,
 'sample_rate': 48000,    # This must match the sample rate of the dataset.
 'hop_length': None,       # In general, you don't need to change this.
 'win_length': 1024,      # In general, you don't need to change this.
 'preemphasis': 0.98,     # In general, 0 gives better voice recovery but makes training harder. If your model does not train, try 0.97 - 0.99.
 'min_level_db': -100,
 'ref_level_db': 0,       # The base DB; increase until all background noise is removed in the spectrogram, then lower until you hear better speech below.
 'power': 1.5,            # Change this value and listen to the synthesized voice. 1.2 - 1.5 are resonable values.
 'griffin_lim_iters': 60, # Quality does not improve for values > 60
 'mel_fmin': 0.0,         # Adjust this and check mel-spectrogram-based voice synthesis below.
 'mel_fmax': 8000.0,      # Adjust this and check mel-spectrogram-based voice synthesis below.
 'do_trim_silence': True  # If you dataset has some silience at the beginning or end, this trims it. Check the AP.load_wav() below,if it causes any difference for the loaded audio file.
}

# These options have to be forced off in order to avoid errors about the 
# pre-calculated not matching the options being tuned.
reset={
 'signal_norm': True,  # check this if you want to test normalization parameters.
 'stats_path': None,
 'symmetric_norm': False,
 'max_norm': 1,
 'clip_norm': True,
}

# Override select parts of loaded config with parameters above
tuned_config = CONFIG.copy()
tuned_config.update(reset)
tuned_config.update(tune_params)

AP = AudioProcessor(**tuned_config);

Check audio loading

In [ ]:
wav = AP.load_wav(SAMPLE_FILE_PATH)
ipd.Audio(data=wav, rate=AP.sample_rate) 

Generate Mel-Spectrogram and Re-synthesis with GL

In [ ]:
AP.power = 1.5
In [ ]:
mel = AP.melspectrogram(wav)
print("Max:", mel.max())
print("Min:", mel.min())
print("Mean:", mel.mean())
plot_spectrogram(mel.T, AP, output_fig=True)

wav_gen = AP.inv_melspectrogram(mel)
ipd.Audio(wav_gen, rate=AP.sample_rate)

Generate Linear-Spectrogram and Re-synthesis with GL

In [ ]:
spec = AP.spectrogram(wav)
print("Max:", spec.max())
print("Min:", spec.min())
print("Mean:", spec.mean())
plot_spectrogram(spec.T, AP, output_fig=True)

wav_gen = AP.inv_spectrogram(spec)
ipd.Audio(wav_gen, rate=AP.sample_rate)

Compare values for a certain parameter

Optimize your parameters by comparing different values per parameter at a time.

In [ ]:
from librosa import display
from matplotlib import pylab as plt
import IPython
plt.rcParams['figure.figsize'] = (20.0, 16.0)

def compare_values(attribute, values):
    """
    attributes (str): the names of the attribute you like to test.
    values (list): list of values to compare.
    """
    file = SAMPLE_FILE_PATH
    wavs = []
    for idx, val in enumerate(values):
        set_val_cmd = "AP.{}={}".format(attribute, val)
        exec(set_val_cmd)
        wav = AP.load_wav(file)
        spec = AP.spectrogram(wav)
        spec_norm = AP.denormalize(spec.T)
        plt.subplot(len(values), 2, 2*idx + 1)
        plt.imshow(spec_norm.T, aspect="auto", origin="lower")
        #         plt.colorbar()
        plt.tight_layout()
        wav_gen = AP.inv_spectrogram(spec)
        wavs.append(wav_gen)
        plt.subplot(len(values), 2, 2*idx + 2)
        display.waveshow(wav, alpha=0.5)
        display.waveshow(wav_gen, alpha=0.25)
        plt.title("{}={}".format(attribute, val))
        plt.tight_layout()
    
    wav = AP.load_wav(file)
    print(" > Ground-truth")
    IPython.display.display(IPython.display.Audio(wav, rate=AP.sample_rate))
    
    for idx, wav_gen in enumerate(wavs):
        val = values[idx]
        print(" > {} = {}".format(attribute, val))
        IPython.display.display(IPython.display.Audio(wav_gen, rate=AP.sample_rate))
In [ ]:
compare_values("preemphasis", [0, 0.5, 0.97, 0.98, 0.99])
In [ ]:
compare_values("ref_level_db", [2, 5, 10, 15, 20, 25, 30, 35, 40])
</html>