mirror of https://github.com/coqui-ai/TTS.git
fix pylint once again
This commit is contained in:
parent
80f5e39e56
commit
d158ec0806
|
@ -28,7 +28,6 @@ from TTS.utils.generic_utils import (
|
||||||
)
|
)
|
||||||
from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset
|
from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset
|
||||||
from TTS.vocoder.datasets.preprocess import (
|
from TTS.vocoder.datasets.preprocess import (
|
||||||
find_feat_files,
|
|
||||||
load_wav_data,
|
load_wav_data,
|
||||||
load_wav_feat_data
|
load_wav_feat_data
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import torch
|
import torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from torch.utils.data import Dataset
|
from torch.utils.data import Dataset
|
||||||
from multiprocessing import Manager
|
|
||||||
|
|
||||||
|
|
||||||
class WaveRNNDataset(Dataset):
|
class WaveRNNDataset(Dataset):
|
||||||
|
|
|
@ -17,7 +17,7 @@ def test_wavernn():
|
||||||
feat_dims=80,
|
feat_dims=80,
|
||||||
compute_dims=128,
|
compute_dims=128,
|
||||||
res_out_dims=128,
|
res_out_dims=128,
|
||||||
res_blocks=10,
|
num_res_blocks=10,
|
||||||
hop_length=256,
|
hop_length=256,
|
||||||
sample_rate=22050,
|
sample_rate=22050,
|
||||||
)
|
)
|
||||||
|
|
|
@ -23,7 +23,7 @@ test_quant_feat_path = os.path.join(test_data_path, "quant")
|
||||||
ok_ljspeech = os.path.exists(test_data_path)
|
ok_ljspeech = os.path.exists(test_data_path)
|
||||||
|
|
||||||
|
|
||||||
def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, num_workers):
|
def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, mulaw, num_workers):
|
||||||
""" run dataloader with given parameters and check conditions """
|
""" run dataloader with given parameters and check conditions """
|
||||||
ap = AudioProcessor(**C.audio)
|
ap = AudioProcessor(**C.audio)
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, num_workers):
|
||||||
hop_len=hop_len,
|
hop_len=hop_len,
|
||||||
pad=pad,
|
pad=pad,
|
||||||
mode=mode,
|
mode=mode,
|
||||||
|
mulaw=mulaw
|
||||||
)
|
)
|
||||||
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
|
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
|
||||||
loader = DataLoader(dataset,
|
loader = DataLoader(dataset,
|
||||||
|
@ -78,13 +79,13 @@ def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, num_workers):
|
||||||
def test_parametrized_wavernn_dataset():
|
def test_parametrized_wavernn_dataset():
|
||||||
''' test dataloader with different parameters '''
|
''' test dataloader with different parameters '''
|
||||||
params = [
|
params = [
|
||||||
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 10, 0],
|
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 10, True, 0],
|
||||||
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, "mold", 4],
|
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, "mold", False, 4],
|
||||||
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 9, 0],
|
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 9, False, 0],
|
||||||
[1, C.audio['hop_length'], C.audio['hop_length'], 2, 10, 0],
|
[1, C.audio['hop_length'], C.audio['hop_length'], 2, 10, True, 0],
|
||||||
[1, C.audio['hop_length'], C.audio['hop_length'], 2, "mold", 0],
|
[1, C.audio['hop_length'], C.audio['hop_length'], 2, "mold", False, 0],
|
||||||
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 4, 10, 2],
|
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 4, 10, False, 2],
|
||||||
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 2, "mold", 0],
|
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 2, "mold", False, 0],
|
||||||
]
|
]
|
||||||
for param in params:
|
for param in params:
|
||||||
print(param)
|
print(param)
|
||||||
|
|
Loading…
Reference in New Issue