Merge pull request #26 from idiap/server-output

fix(server): ensure logging output gets actually shown
This commit is contained in:
Enno Hermann 2024-05-26 09:08:27 +01:00 committed by GitHub
commit 642cbd472f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 27 additions and 36 deletions

View File

@ -59,7 +59,7 @@ body:
You can either run `TTS/bin/collect_env_info.py` You can either run `TTS/bin/collect_env_info.py`
```bash ```bash
wget https://raw.githubusercontent.com/coqui-ai/TTS/main/TTS/bin/collect_env_info.py wget https://raw.githubusercontent.com/idiap/coqui-ai-TTS/main/TTS/bin/collect_env_info.py
python collect_env_info.py python collect_env_info.py
``` ```

View File

@ -1,8 +1,8 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links: contact_links:
- name: CoquiTTS GitHub Discussions - name: CoquiTTS GitHub Discussions
url: https://github.com/coqui-ai/TTS/discussions url: https://github.com/idiap/coqui-ai-TTS/discussions
about: Please ask and answer questions here. about: Please ask and answer questions here.
- name: Coqui Security issue disclosure - name: Coqui Security issue disclosure
url: mailto:info@coqui.ai url: mailto:enno.hermann@gmail.com
about: Please report security vulnerabilities here. about: Please report security vulnerabilities here.

View File

@ -5,11 +5,3 @@ Welcome to the 🐸TTS project! We are excited to see your interest, and appreci
This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file. This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) file.
In order to make a good pull request, please see our [CONTRIBUTING.md](CONTRIBUTING.md) file. In order to make a good pull request, please see our [CONTRIBUTING.md](CONTRIBUTING.md) file.
Before accepting your pull request, you will be asked to sign a [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS).
This [Contributor License Agreement](https://cla-assistant.io/coqui-ai/TTS):
- Protects you, Coqui, and the users of the code.
- Does not change your rights to use your contributions for any purpose.
- Does not change the license of the 🐸TTS project. It just makes the terms of your contribution clearer and lets us know you are OK to contribute.

View File

@ -29,6 +29,8 @@ import zipfile
import soundfile as sf import soundfile as sf
from TTS.utils.generic_utils import ConsoleFormatter, setup_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
SUBSETS = { SUBSETS = {
@ -214,7 +216,7 @@ def processor(directory, subset, force_process):
if __name__ == "__main__": if __name__ == "__main__":
logging.getLogger("TTS").setLevel(logging.INFO) setup_logger("TTS", level=logging.INFO, screen=True, formatter=ConsoleFormatter())
if len(sys.argv) != 4: if len(sys.argv) != 4:
print("Usage: python prepare_data.py save_directory user password") print("Usage: python prepare_data.py save_directory user password")
sys.exit() sys.exit()

View File

@ -1,4 +1,7 @@
#!flask/bin/python #!flask/bin/python
"""TTS demo server."""
import argparse import argparse
import io import io
import json import json
@ -13,27 +16,23 @@ from urllib.parse import parse_qs
try: try:
from flask import Flask, render_template, render_template_string, request, send_file from flask import Flask, render_template, render_template_string, request, send_file
except ImportError as e: except ImportError as e:
raise ImportError("Server requires requires flask, use `pip install coqui-tts[server]`.") from e msg = "Server requires requires flask, use `pip install coqui-tts[server]`"
raise ImportError(msg) from e
from TTS.config import load_config from TTS.config import load_config
from TTS.utils.generic_utils import ConsoleFormatter, setup_logger
from TTS.utils.manage import ModelManager from TTS.utils.manage import ModelManager
from TTS.utils.synthesizer import Synthesizer from TTS.utils.synthesizer import Synthesizer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logging.getLogger("TTS").setLevel(logging.INFO) setup_logger("TTS", level=logging.INFO, screen=True, formatter=ConsoleFormatter())
def create_argparser(): def create_argparser() -> argparse.ArgumentParser:
def convert_boolean(x):
return x.lower() in ["true", "1", "yes"]
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"--list_models", "--list_models",
type=convert_boolean, action="store_true",
nargs="?",
const=True,
default=False,
help="list available pre-trained tts and vocoder models.", help="list available pre-trained tts and vocoder models.",
) )
parser.add_argument( parser.add_argument(
@ -61,9 +60,13 @@ def create_argparser():
parser.add_argument("--vocoder_config_path", type=str, help="Path to vocoder model config file.", default=None) parser.add_argument("--vocoder_config_path", type=str, help="Path to vocoder model config file.", default=None)
parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", default=None) parser.add_argument("--speakers_file_path", type=str, help="JSON file for multi-speaker model.", default=None)
parser.add_argument("--port", type=int, default=5002, help="port to listen on.") parser.add_argument("--port", type=int, default=5002, help="port to listen on.")
parser.add_argument("--use_cuda", type=convert_boolean, default=False, help="true to use CUDA.") parser.add_argument("--use_cuda", action=argparse.BooleanOptionalAction, default=False, help="true to use CUDA.")
parser.add_argument("--debug", type=convert_boolean, default=False, help="true to enable Flask debug mode.") parser.add_argument(
parser.add_argument("--show_details", type=convert_boolean, default=False, help="Generate model detail page.") "--debug", action=argparse.BooleanOptionalAction, default=False, help="true to enable Flask debug mode."
)
parser.add_argument(
"--show_details", action=argparse.BooleanOptionalAction, default=False, help="Generate model detail page."
)
return parser return parser
@ -73,10 +76,6 @@ args = create_argparser().parse_args()
path = Path(__file__).parent / "../.models.json" path = Path(__file__).parent / "../.models.json"
manager = ModelManager(path) manager = ModelManager(path)
if args.list_models:
manager.list_models()
sys.exit()
# update in-use models to the specified released models. # update in-use models to the specified released models.
model_path = None model_path = None
config_path = None config_path = None
@ -171,14 +170,12 @@ def index():
def details(): def details():
if args.config_path is not None and os.path.isfile(args.config_path): if args.config_path is not None and os.path.isfile(args.config_path):
model_config = load_config(args.config_path) model_config = load_config(args.config_path)
else: elif args.model_name is not None:
if args.model_name is not None:
model_config = load_config(config_path) model_config = load_config(config_path)
if args.vocoder_config_path is not None and os.path.isfile(args.vocoder_config_path): if args.vocoder_config_path is not None and os.path.isfile(args.vocoder_config_path):
vocoder_config = load_config(args.vocoder_config_path) vocoder_config = load_config(args.vocoder_config_path)
else: elif args.vocoder_name is not None:
if args.vocoder_name is not None:
vocoder_config = load_config(vocoder_config_path) vocoder_config = load_config(vocoder_config_path)
else: else:
vocoder_config = None vocoder_config = None