mirror of https://github.com/coqui-ai/TTS.git
test(freevc): remove unused code
This commit is contained in:
parent
fce3137e0d
commit
d488441b75
|
@ -22,15 +22,12 @@ BATCH_SIZE = 3
|
||||||
|
|
||||||
class TestFreeVC(unittest.TestCase):
|
class TestFreeVC(unittest.TestCase):
|
||||||
def _create_inputs(self, config, batch_size=2):
|
def _create_inputs(self, config, batch_size=2):
|
||||||
input_dummy = torch.rand(batch_size, 30 * config.audio["hop_length"]).to(device)
|
|
||||||
input_lengths = torch.randint(100, 30 * config.audio["hop_length"], (batch_size,)).long().to(device)
|
|
||||||
input_lengths[-1] = 30 * config.audio["hop_length"]
|
|
||||||
spec = torch.rand(batch_size, 30, config.audio["filter_length"] // 2 + 1).to(device)
|
spec = torch.rand(batch_size, 30, config.audio["filter_length"] // 2 + 1).to(device)
|
||||||
mel = torch.rand(batch_size, 30, config.audio["n_mel_channels"]).to(device)
|
mel = torch.rand(batch_size, 30, config.audio["n_mel_channels"]).to(device)
|
||||||
spec_lengths = torch.randint(20, 30, (batch_size,)).long().to(device)
|
spec_lengths = torch.randint(20, 30, (batch_size,)).long().to(device)
|
||||||
spec_lengths[-1] = spec.size(2)
|
spec_lengths[-1] = spec.size(2)
|
||||||
waveform = torch.rand(batch_size, spec.size(2) * config.audio["hop_length"]).to(device)
|
waveform = torch.rand(batch_size, spec.size(2) * config.audio["hop_length"]).to(device)
|
||||||
return input_dummy, input_lengths, mel, spec, spec_lengths, waveform
|
return mel, spec, spec_lengths, waveform
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_inputs_inference():
|
def _create_inputs_inference():
|
||||||
|
@ -38,15 +35,6 @@ class TestFreeVC(unittest.TestCase):
|
||||||
target_wav = torch.rand(16000)
|
target_wav = torch.rand(16000)
|
||||||
return source_wav, target_wav
|
return source_wav, target_wav
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _check_parameter_changes(model, model_ref):
|
|
||||||
count = 0
|
|
||||||
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
|
|
||||||
assert (param != param_ref).any(), "param {} with shape {} not updated!! \n{}\n{}".format(
|
|
||||||
count, param.shape, param, param_ref
|
|
||||||
)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
def test_methods(self):
|
def test_methods(self):
|
||||||
config = FreeVCConfig()
|
config = FreeVCConfig()
|
||||||
model = FreeVC(config).to(device)
|
model = FreeVC(config).to(device)
|
||||||
|
@ -69,7 +57,7 @@ class TestFreeVC(unittest.TestCase):
|
||||||
model.train()
|
model.train()
|
||||||
print(" > Num parameters for FreeVC model:%s" % (count_parameters(model)))
|
print(" > Num parameters for FreeVC model:%s" % (count_parameters(model)))
|
||||||
|
|
||||||
_, _, mel, spec, spec_lengths, waveform = self._create_inputs(config, batch_size)
|
mel, spec, spec_lengths, waveform = self._create_inputs(config, batch_size)
|
||||||
|
|
||||||
wavlm_vec = model.extract_wavlm_features(waveform)
|
wavlm_vec = model.extract_wavlm_features(waveform)
|
||||||
wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long)
|
wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long)
|
||||||
|
@ -86,7 +74,7 @@ class TestFreeVC(unittest.TestCase):
|
||||||
model = FreeVC(config).to(device)
|
model = FreeVC(config).to(device)
|
||||||
model.eval()
|
model.eval()
|
||||||
|
|
||||||
_, _, mel, _, _, waveform = self._create_inputs(config, batch_size)
|
mel, _, _, waveform = self._create_inputs(config, batch_size)
|
||||||
|
|
||||||
wavlm_vec = model.extract_wavlm_features(waveform)
|
wavlm_vec = model.extract_wavlm_features(waveform)
|
||||||
wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long)
|
wavlm_vec_lengths = torch.ones(batch_size, dtype=torch.long)
|
||||||
|
|
Loading…
Reference in New Issue