mirror of https://github.com/coqui-ai/TTS.git
Update Kokoro recipe
This commit is contained in:
parent
c4a5a73f18
commit
88f3255962
|
@ -1,22 +1,23 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# take the scripts's parent's directory to prefix all the output paths.
|
# take the scripts's parent's directory to prefix all the output paths.
|
||||||
RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
RUN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||||
|
CORPUS=kokoro-speech-v1_1-tiny
|
||||||
echo $RUN_DIR
|
echo $RUN_DIR
|
||||||
# download LJSpeech dataset
|
if [ \! -d $RUN_DIR/$CORPUS ] ; then
|
||||||
wget http://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
|
echo "$RUN_DIR/$CORPUS doesn't exist."
|
||||||
# extract
|
echo "Follow the instruction of https://github.com/kaiidams/Kokoro-Speech-Dataset to make the corpus."
|
||||||
tar -xjf LJSpeech-1.1.tar.bz2
|
exit 1
|
||||||
|
fi
|
||||||
# create train-val splits
|
# create train-val splits
|
||||||
shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv
|
shuf $RUN_DIR/$CORPUS/metadata.csv > $RUN_DIR/$CORPUS/metadata_shuf.csv
|
||||||
head -n 12000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv
|
head -n 8000 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_train.csv
|
||||||
tail -n 1100 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv
|
tail -n 812 $RUN_DIR/$CORPUS/metadata_shuf.csv > $RUN_DIR/$CORPUS/metadata_val.csv
|
||||||
mv LJSpeech-1.1 $RUN_DIR/
|
|
||||||
rm LJSpeech-1.1.tar.bz2
|
|
||||||
# compute dataset mean and variance for normalization
|
# compute dataset mean and variance for normalization
|
||||||
python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/LJSpeech-1.1/wavs/
|
python TTS/bin/compute_statistics.py $RUN_DIR/tacotron2-DDC.json $RUN_DIR/scale_stats.npy --data_path $RUN_DIR/$CORPUS/wavs/
|
||||||
# training ....
|
# training ....
|
||||||
# change the GPU id if needed
|
# change the GPU id if needed
|
||||||
CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tacotron.py --config_path $RUN_DIR/tacotron2-DDC.json \
|
CUDA_VISIBLE_DEVICES="0" python TTS/bin/train_tacotron.py --config_path $RUN_DIR/tacotron2-DDC.json \
|
||||||
--coqpit.output_path $RUN_DIR \
|
--coqpit.output_path $RUN_DIR \
|
||||||
--coqpit.datasets.0.path $RUN_DIR/LJSpeech-1.1/ \
|
--coqpit.datasets.0.path $RUN_DIR/$CORPUS \
|
||||||
--coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \
|
--coqpit.audio.stats_path $RUN_DIR/scale_stats.npy \
|
||||||
|
--coqpit.phoneme_cache_path $RUN_DIR/phoneme_cache \
|
|
@ -2,7 +2,7 @@
|
||||||
"datasets": [
|
"datasets": [
|
||||||
{
|
{
|
||||||
"name": "kokoro",
|
"name": "kokoro",
|
||||||
"path": "./kokoro-speech-v1_1-tiny/",
|
"path": "DEFINE THIS",
|
||||||
"meta_file_train": "metadata.csv",
|
"meta_file_train": "metadata.csv",
|
||||||
"meta_file_val": null
|
"meta_file_val": null
|
||||||
}
|
}
|
||||||
|
@ -106,8 +106,8 @@
|
||||||
"max_seq_len": 153,
|
"max_seq_len": 153,
|
||||||
"compute_input_seq_cache": false,
|
"compute_input_seq_cache": false,
|
||||||
"use_noise_augment": true,
|
"use_noise_augment": true,
|
||||||
"output_path": "./Models/Kokoro/",
|
"output_path": "DEFINE THIS",
|
||||||
"phoneme_cache_path": "./phoneme_cache/",
|
"phoneme_cache_path": "DEFINE THIS",
|
||||||
"use_phonemes": true,
|
"use_phonemes": true,
|
||||||
"phoneme_language": "ja-jp",
|
"phoneme_language": "ja-jp",
|
||||||
"characters": {
|
"characters": {
|
||||||
|
|
Loading…
Reference in New Issue