From 5afd3d980b740a7e93fc8e754dc9564cb73b5656 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Mon, 8 Apr 2019 09:56:16 +0200 Subject: [PATCH] config update --- .compute | 4 ++-- config.json | 2 +- config_cluster.json | 18 +++++++++--------- datasets/preprocess.py | 3 +-- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/.compute b/.compute index 990886fa..c559bcab 100644 --- a/.compute +++ b/.compute @@ -3,5 +3,5 @@ ls ${SHARED_DIR}/data/ pip3 install https://download.pytorch.org/whl/cu100/torch-1.0.1.post2-cp36-cp36m-linux_x86_64.whl yes | apt-get install espeak python3 setup.py develop -python3 distribute.py --config_path config_cluster.json --data_path ${SHARED_DIR}/data/Blizzard/Nancy/ --restore_path ${USER_DIR}/best_model_4467.pth.tar -# python3 distribute.py --config_path config_cluster.json --data_path ${SHARED_DIR}/data/Blizzard/Nancy/ \ No newline at end of file +# python3 distribute.py --config_path config_cluster.json --data_path ${SHARED_DIR}/data/Blizzard/Nancy/ --restore_path ${USER_DIR}/best_model_4467.pth.tar +python3 distribute.py --config_path config_cluster.json --data_path ${USER_DIR}/Mozilla/ \ No newline at end of file diff --git a/config.json b/config.json index e6cc8a53..c6cc5d05 100644 --- a/config.json +++ b/config.json @@ -60,7 +60,7 @@ "meta_file_val": "prompts_val.data", // DATASET-RELATED: metafile for evaluation dataloader. "dataset": "nancy", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py "min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training - "max_seq_len": 120, // DATASET-RELATED: maximum text length + "max_seq_len": 150, // DATASET-RELATED: maximum text length "output_path": "/media/erogol/data_ssd/Data/models/ljspeech_models/", // DATASET-RELATED: output path for all training outputs. "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values. "num_val_loader_workers": 4, // number of evaluation data loader processes. diff --git a/config_cluster.json b/config_cluster.json index 0af1d556..4f4248f3 100644 --- a/config_cluster.json +++ b/config_cluster.json @@ -1,12 +1,12 @@ { - "run_name": "nancy-bn", - "run_description": "Finetune 4467. No ending character ^. and no starting character", + "run_name": "mozilla-fattn", + "run_description": "Mozilla with 0 batch group size and fattn", "audio":{ // Audio processing parameters "num_mels": 80, // size of the mel spec frame. "num_freq": 1025, // number of stft frequency levels. Size of the linear spectogram frame. - "sample_rate": 16000, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled. + "sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled. "frame_length_ms": 50, // stft window length in ms. "frame_shift_ms": 12.5, // stft window hop-lengh in ms. "preemphasis": 0.98, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. @@ -41,9 +41,9 @@ "memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5. "attention_norm": "softmax", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. "prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn". - "use_forward_attn": false, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. + "use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. - "batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention. + "batch_size": 16, // Batch size for training. Lower values than 32 might cause hard to learn attention. "eval_batch_size":16, "r": 1, // Number of frames to predict for step. "wd": 0.000001, // Weight decay weight. @@ -51,16 +51,16 @@ "save_step": 1000, // Number of training steps expected to save traning stats and checkpoints. "print_step": 10, // Number of steps to log traning on console. "tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. - "batch_group_size": 8, //Number of batches to shuffle after bucketing. + "batch_group_size": 0, //Number of batches to shuffle after bucketing. - "run_eval": true, + "run_eval": false, "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time. "data_path": "/media/erogol/data_ssd/Data/LJSpeech-1.1", // DATASET-RELATED: can overwritten from command argument "meta_file_train": "prompts_train.data", // DATASET-RELATED: metafile for training dataloader. "meta_file_val": "prompts_val.data", // DATASET-RELATED: metafile for evaluation dataloader. - "dataset": "nancy", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py + "dataset": "mozilla", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py "min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training - "max_seq_len": 120, // DATASET-RELATED: maximum text length + "max_seq_len": 150, // DATASET-RELATED: maximum text length "output_path": "../keep/", // DATASET-RELATED: output path for all training outputs. "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values. "num_val_loader_workers": 4, // number of evaluation data loader processes. diff --git a/datasets/preprocess.py b/datasets/preprocess.py index b4237c60..6d6c0833 100644 --- a/datasets/preprocess.py +++ b/datasets/preprocess.py @@ -45,11 +45,10 @@ def tweb(root_path, meta_file): def mozilla(root_path, meta_files): """Normalizes Mozilla meta data files to TTS format""" import glob - meta_files = glob.glob(root_path + "**/batch*.txt", recursive=True) + meta_files = glob.glob(root_path + "/**/batch*.txt", recursive=True) folders = [os.path.dirname(f.strip()) for f in meta_files] items = [] for idx, meta_file in enumerate(meta_files): - print(" | > {}".format(meta_file)) folder = folders[idx] txt_file = os.path.join(root_path, meta_file) with open(txt_file, 'r') as ttf: