mirror of https://github.com/coqui-ai/TTS.git
new requirements
This commit is contained in:
parent
3e4305f878
commit
b116bdaefa
|
@ -19,3 +19,5 @@ soundfile
|
|||
nose==1.3.7
|
||||
cardboardlint==1.3.0
|
||||
pylint==2.5.3
|
||||
fuzzywuzzy
|
||||
gdown
|
||||
|
|
2
setup.py
2
setup.py
|
@ -98,6 +98,8 @@ requirements = {
|
|||
"nose==1.3.7",
|
||||
"cardboardlint==1.3.0",
|
||||
"pylint==2.5.3",
|
||||
'fuzzywuzzy',
|
||||
'gdown'
|
||||
],
|
||||
'pip_install':[
|
||||
'tensorflow>=2.2.0',
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
# Convert Tensorflow Tacotron2 model to TF-Lite binary
|
||||
|
||||
import argparse
|
||||
|
||||
from TTS.utils.io import load_config
|
||||
from TTS.utils.text.symbols import symbols, phonemes
|
||||
from TTS.vocoder.tf.utils.generic_utils import setup_generator
|
||||
from TTS.vocoder.tf.utils.io import load_checkpoint
|
||||
from TTS.vocoder.tf.utils.tflite import convert_melgan_to_tflite
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--tf_model',
|
||||
type=str,
|
||||
help='Path to target torch model to be converted to TF.')
|
||||
parser.add_argument('--config_path',
|
||||
type=str,
|
||||
help='Path to config file of torch model.')
|
||||
parser.add_argument('--output_path',
|
||||
type=str,
|
||||
help='path to tflite output binary.')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Set constants
|
||||
CONFIG = load_config(args.config_path)
|
||||
|
||||
# load the model
|
||||
model = setup_generator(CONFIG)
|
||||
model.build_inference()
|
||||
model = load_checkpoint(model, args.tf_model)
|
||||
|
||||
# create tflite model
|
||||
tflite_model = convert_melgan_to_tflite(model, output_path=args.output_path)
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
import tensorflow as tf
|
||||
|
||||
|
||||
|
||||
def convert_melgan_to_tflite(model,
|
||||
output_path=None,
|
||||
experimental_converter=True):
|
||||
"""Convert Tensorflow MelGAN model to TFLite. Save a binary file if output_path is
|
||||
provided, else return TFLite model."""
|
||||
|
||||
concrete_function = model.inference_tflite.get_concrete_function()
|
||||
converter = tf.lite.TFLiteConverter.from_concrete_functions(
|
||||
[concrete_function])
|
||||
converter.experimental_new_converter = experimental_converter
|
||||
converter.optimizations = []
|
||||
converter.target_spec.supported_ops = [
|
||||
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
|
||||
]
|
||||
tflite_model = converter.convert()
|
||||
print(f'Tflite Model size is {len(tflite_model) / (1024.0 * 1024.0)} MBs.')
|
||||
if output_path is not None:
|
||||
# same model binary if outputpath is provided
|
||||
with open(output_path, 'wb') as f:
|
||||
f.write(tflite_model)
|
||||
return None
|
||||
return tflite_model
|
||||
|
||||
def load_tflite_model(tflite_path):
|
||||
tflite_model = tf.lite.Interpreter(model_path=tflite_path)
|
||||
tflite_model.allocate_tensors()
|
||||
return tflite_model
|
Loading…
Reference in New Issue