From 77bfb881d74e753e25e2b40f8fa389620d169eca Mon Sep 17 00:00:00 2001 From: SanjaESC Date: Sun, 12 Jul 2020 14:07:44 +0200 Subject: [PATCH] tacotrongst test + test fixes --- mozilla_voice_tts/tts/models/tacotron2.py | 12 ++--- tests/outputs/dummy_model_config.json | 1 + tests/test_tacotron2_model.py | 56 +++++++++++++++++++++++ 3 files changed, 63 insertions(+), 6 deletions(-) diff --git a/mozilla_voice_tts/tts/models/tacotron2.py b/mozilla_voice_tts/tts/models/tacotron2.py index 7effdb3d..18eb17d1 100644 --- a/mozilla_voice_tts/tts/models/tacotron2.py +++ b/mozilla_voice_tts/tts/models/tacotron2.py @@ -95,14 +95,14 @@ class Tacotron2(TacotronAbstract): if self.num_speakers > 1: embedded_speakers = self.speaker_embedding(speaker_ids)[:, None] embedded_speakers = embedded_speakers.repeat(1, encoder_outputs.size(1), 1) - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, mel_specs) encoder_outputs = torch.cat([encoder_outputs, embedded_gst, embedded_speakers], dim=-1) else: encoder_outputs = torch.cat([encoder_outputs, embedded_speakers], dim=-1) else: - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, mel_specs) encoder_outputs = torch.cat([encoder_outputs, embedded_gst], dim=-1) @@ -140,14 +140,14 @@ class Tacotron2(TacotronAbstract): if self.num_speakers > 1: embedded_speakers = self.speaker_embedding(speaker_ids)[:, None] embedded_speakers = embedded_speakers.repeat(1, encoder_outputs.size(1), 1) - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, style_mel) encoder_outputs = torch.cat([encoder_outputs, embedded_gst, embedded_speakers], dim=-1) else: encoder_outputs = torch.cat([encoder_outputs, embedded_speakers], dim=-1) else: - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, style_mel) encoder_outputs = torch.cat([encoder_outputs, embedded_gst], dim=-1) @@ -170,14 +170,14 @@ class Tacotron2(TacotronAbstract): if self.num_speakers > 1: embedded_speakers = self.speaker_embedding(speaker_ids)[:, None] embedded_speakers = embedded_speakers.repeat(1, encoder_outputs.size(1), 1) - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, style_mel) encoder_outputs = torch.cat([encoder_outputs, embedded_gst, embedded_speakers], dim=-1) else: encoder_outputs = torch.cat([encoder_outputs, embedded_speakers], dim=-1) else: - if hasattr(self, 'gst'): + if self.gst: # B x gst_dim encoder_outputs, embedded_gst = self.compute_gst(encoder_outputs, style_mel) encoder_outputs = torch.cat([encoder_outputs, embedded_gst], dim=-1) diff --git a/tests/outputs/dummy_model_config.json b/tests/outputs/dummy_model_config.json index bf46be1c..b032f191 100644 --- a/tests/outputs/dummy_model_config.json +++ b/tests/outputs/dummy_model_config.json @@ -99,3 +99,4 @@ } } + diff --git a/tests/test_tacotron2_model.py b/tests/test_tacotron2_model.py index d4d5eb86..c6d08160 100644 --- a/tests/test_tacotron2_model.py +++ b/tests/test_tacotron2_model.py @@ -75,6 +75,62 @@ class TacotronTrainTest(unittest.TestCase): count, param.shape, param, param_ref) count += 1 + +class TacotronGSTTrainTest(unittest.TestCase): + def test_train_step(self): + input_dummy = torch.randint(0, 24, (8, 128)).long().to(device) + input_lengths = torch.randint(100, 128, (8, )).long().to(device) + input_lengths = torch.sort(input_lengths, descending=True)[0] + mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device) + mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device) + mel_lengths = torch.randint(20, 30, (8, )).long().to(device) + mel_lengths[0] = 30 + stop_targets = torch.zeros(8, 30, 1).float().to(device) + speaker_ids = torch.randint(0, 5, (8, )).long().to(device) + + for idx in mel_lengths: + stop_targets[:, int(idx.item()):, 0] = 1.0 + + stop_targets = stop_targets.view(input_dummy.shape[0], + stop_targets.size(1) // c.r, -1) + stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze() + + criterion = MSELossMasked(seq_len_norm=False).to(device) + criterion_st = nn.BCEWithLogitsLoss().to(device) + model = Tacotron2(num_chars=24, + gst=True, + r=c.r, + num_speakers=5).to(device) + model.train() + model_ref = copy.deepcopy(model) + count = 0 + for param, param_ref in zip(model.parameters(), + model_ref.parameters()): + assert (param - param_ref).sum() == 0, param + count += 1 + optimizer = optim.Adam(model.parameters(), lr=c.lr) + for i in range(5): + mel_out, mel_postnet_out, align, stop_tokens = model.forward( + input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids) + assert torch.sigmoid(stop_tokens).data.max() <= 1.0 + assert torch.sigmoid(stop_tokens).data.min() >= 0.0 + optimizer.zero_grad() + loss = criterion(mel_out, mel_spec, mel_lengths) + stop_loss = criterion_st(stop_tokens, stop_targets) + loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss + loss.backward() + optimizer.step() + # check parameter changes + count = 0 + for param, param_ref in zip(model.parameters(), + model_ref.parameters()): + # ignore pre-higway layer since it works conditional + # if count not in [145, 59]: + assert (param != param_ref).any( + ), "param {} with shape {} not updated!! \n{}\n{}".format( + count, param.shape, param, param_ref) + count += 1 + class TacotronGSTTrainTest(unittest.TestCase): @staticmethod def test_train_step():