mirror of https://github.com/coqui-ai/TTS.git
Fix console output issue in `tts` function of `synthesizer` class
Fix Console Printing Issue in synthesizer Class This update addresses an issue with the tts function in the synthesizer class where console output, including the array of split sentences, processing time, and real-time factor, was being printed during execution. This was causing unwanted console output when the synthesizer function is run in a thread. Changes Made: Added criteria SuppresPrintStatements type boolen to suppress console output generated by the tts function. Ensured that print statements are only triggered when explicitly required, preventing cluttered console output during threaded execution. This modification improves the usability of the synthesizer class when integrated into threaded applications and enhances the overall user experience by minimizing unnecessary console output.
This commit is contained in:
parent
dbf1a08a0d
commit
fb1cec38f4
|
@ -88,7 +88,7 @@ class TTS(nn.Module):
|
|||
|
||||
@property
|
||||
def is_multi_speaker(self):
|
||||
if hasattr(self.synthesizer.tts_model, "speaker_manager") and self.synthesizer.tts_model.speaker_manager:
|
||||
if hasattr(self.synthesizer.tts_model, "speaker_Smanager") and self.synthesizer.tts_model.speaker_manager:
|
||||
return self.synthesizer.tts_model.speaker_manager.num_speakers > 1
|
||||
return False
|
||||
|
||||
|
@ -243,6 +243,7 @@ class TTS(nn.Module):
|
|||
emotion: str = None,
|
||||
speed: float = None,
|
||||
split_sentences: bool = True,
|
||||
SuppresPrintStatements: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""Convert text to speech.
|
||||
|
@ -267,6 +268,9 @@ class TTS(nn.Module):
|
|||
Split text into sentences, synthesize them separately and concatenate the file audio.
|
||||
Setting it False uses more VRAM and possibly hit model specific text length or VRAM limits. Only
|
||||
applicable to the 🐸TTS models. Defaults to True.
|
||||
SuppresPrintStatements (bool, optional):
|
||||
Suppress All the Print statements so that when runnging the function in thread the print statement will not apears in the terminal.
|
||||
Setting it to True will suppress the print statements
|
||||
kwargs (dict, optional):
|
||||
Additional arguments for the model.
|
||||
"""
|
||||
|
@ -283,6 +287,7 @@ class TTS(nn.Module):
|
|||
style_text=None,
|
||||
reference_speaker_name=None,
|
||||
split_sentences=split_sentences,
|
||||
SuppresPrintStatements=SuppresPrintStatements,
|
||||
**kwargs,
|
||||
)
|
||||
return wav
|
||||
|
|
|
@ -265,6 +265,7 @@ class Synthesizer(nn.Module):
|
|||
reference_wav=None,
|
||||
reference_speaker_name=None,
|
||||
split_sentences: bool = True,
|
||||
SuppresPrintStatements: bool= False,
|
||||
**kwargs,
|
||||
) -> List[int]:
|
||||
"""🐸 TTS magic. Run all the models and generate speech.
|
||||
|
@ -279,6 +280,7 @@ class Synthesizer(nn.Module):
|
|||
reference_wav ([type], optional): reference waveform for voice conversion. Defaults to None.
|
||||
reference_speaker_name ([type], optional): speaker id of reference waveform. Defaults to None.
|
||||
split_sentences (bool, optional): split the input text into sentences. Defaults to True.
|
||||
SuppresPrintStatements (bool, optional): Suppress the Print statements.
|
||||
**kwargs: additional arguments to pass to the TTS model.
|
||||
Returns:
|
||||
List[int]: [description]
|
||||
|
@ -294,9 +296,11 @@ class Synthesizer(nn.Module):
|
|||
if text:
|
||||
sens = [text]
|
||||
if split_sentences:
|
||||
print(" > Text splitted to sentences.")
|
||||
if not SuppresPrintStatements:
|
||||
print(" > Text splitted to sentences.")
|
||||
sens = self.split_into_sentences(text)
|
||||
print(sens)
|
||||
if not SuppresPrintStatements:
|
||||
print(sens)
|
||||
|
||||
# handle multi-speaker
|
||||
if "voice_dir" in kwargs:
|
||||
|
@ -420,7 +424,8 @@ class Synthesizer(nn.Module):
|
|||
self.vocoder_config["audio"]["sample_rate"] / self.tts_model.ap.sample_rate,
|
||||
]
|
||||
if scale_factor[1] != 1:
|
||||
print(" > interpolating tts model output.")
|
||||
if not SuppresPrintStatements:
|
||||
print(" > interpolating tts model output.")
|
||||
vocoder_input = interpolate_vocoder_input(scale_factor, vocoder_input)
|
||||
else:
|
||||
vocoder_input = torch.tensor(vocoder_input).unsqueeze(0) # pylint: disable=not-callable
|
||||
|
@ -484,7 +489,8 @@ class Synthesizer(nn.Module):
|
|||
self.vocoder_config["audio"]["sample_rate"] / self.tts_model.ap.sample_rate,
|
||||
]
|
||||
if scale_factor[1] != 1:
|
||||
print(" > interpolating tts model output.")
|
||||
if not SuppresPrintStatements:
|
||||
print(" > interpolating tts model output.")
|
||||
vocoder_input = interpolate_vocoder_input(scale_factor, vocoder_input)
|
||||
else:
|
||||
vocoder_input = torch.tensor(vocoder_input).unsqueeze(0) # pylint: disable=not-callable
|
||||
|
@ -500,6 +506,7 @@ class Synthesizer(nn.Module):
|
|||
# compute stats
|
||||
process_time = time.time() - start_time
|
||||
audio_time = len(wavs) / self.tts_config.audio["sample_rate"]
|
||||
print(f" > Processing time: {process_time}")
|
||||
print(f" > Real-time factor: {process_time / audio_time}")
|
||||
if not SuppresPrintStatements:
|
||||
print(f" > Processing time: {process_time}")
|
||||
print(f" > Real-time factor: {process_time / audio_time}")
|
||||
return wavs
|
||||
|
|
Loading…
Reference in New Issue