mirror of https://github.com/coqui-ai/TTS.git
tflite inference for melgan models
This commit is contained in:
parent
2d596aa140
commit
b2cc256dab
|
@ -109,3 +109,20 @@ class MelganGenerator(tf.keras.models.Model):
|
||||||
def build_inference(self):
|
def build_inference(self):
|
||||||
x = tf.random.uniform((1, self.in_channels, 4), dtype=tf.float32)
|
x = tf.random.uniform((1, self.in_channels, 4), dtype=tf.float32)
|
||||||
self(x, training=False)
|
self(x, training=False)
|
||||||
|
|
||||||
|
@tf.function(
|
||||||
|
experimental_relax_shapes=True,
|
||||||
|
input_signature=[
|
||||||
|
tf.TensorSpec([1, None, None], dtype=tf.float32),
|
||||||
|
],)
|
||||||
|
def inference_tflite(self, c):
|
||||||
|
c = tf.transpose(c, perm=[0, 2, 1])
|
||||||
|
c = tf.expand_dims(c, 2)
|
||||||
|
# FIXME: TF had no replicate padding as in Torch
|
||||||
|
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
|
||||||
|
o = c
|
||||||
|
for layer in self.model_layers:
|
||||||
|
o = layer(o)
|
||||||
|
# o = self.model_layers(c)
|
||||||
|
o = tf.transpose(o, perm=[0, 3, 2, 1])
|
||||||
|
return o[:, :, 0, :]
|
|
@ -30,11 +30,6 @@ class MultibandMelganGenerator(MelganGenerator):
|
||||||
def pqmf_synthesis(self, x):
|
def pqmf_synthesis(self, x):
|
||||||
return self.pqmf_layer.synthesis(x)
|
return self.pqmf_layer.synthesis(x)
|
||||||
|
|
||||||
# def call(self, c, training=False):
|
|
||||||
# if training:
|
|
||||||
# raise NotImplementedError()
|
|
||||||
# return self.inference(c)
|
|
||||||
|
|
||||||
def inference(self, c):
|
def inference(self, c):
|
||||||
c = tf.transpose(c, perm=[0, 2, 1])
|
c = tf.transpose(c, perm=[0, 2, 1])
|
||||||
c = tf.expand_dims(c, 2)
|
c = tf.expand_dims(c, 2)
|
||||||
|
@ -46,3 +41,20 @@ class MultibandMelganGenerator(MelganGenerator):
|
||||||
o = tf.transpose(o, perm=[0, 3, 2, 1])
|
o = tf.transpose(o, perm=[0, 3, 2, 1])
|
||||||
o = self.pqmf_layer.synthesis(o[:, :, 0, :])
|
o = self.pqmf_layer.synthesis(o[:, :, 0, :])
|
||||||
return o
|
return o
|
||||||
|
|
||||||
|
@tf.function(
|
||||||
|
experimental_relax_shapes=True,
|
||||||
|
input_signature=[
|
||||||
|
tf.TensorSpec([1, 80, None], dtype=tf.float32),
|
||||||
|
],)
|
||||||
|
def inference_tflite(self, c):
|
||||||
|
c = tf.transpose(c, perm=[0, 2, 1])
|
||||||
|
c = tf.expand_dims(c, 2)
|
||||||
|
# FIXME: TF had no replicate padding as in Torch
|
||||||
|
# c = tf.pad(c, [[0, 0], [self.inference_padding, self.inference_padding], [0, 0], [0, 0]], "REFLECT")
|
||||||
|
o = c
|
||||||
|
for layer in self.model_layers:
|
||||||
|
o = layer(o)
|
||||||
|
o = tf.transpose(o, perm=[0, 3, 2, 1])
|
||||||
|
o = self.pqmf_layer.synthesis(o[:, :, 0, :])
|
||||||
|
return o
|
||||||
|
|
Loading…
Reference in New Issue