mirror of https://github.com/coqui-ai/TTS.git
glow-tts module renaming updates
This commit is contained in:
parent
0ffe91b21d
commit
15e6ab3912
|
@ -13,30 +13,30 @@ import torch
|
||||||
from torch.utils.data import DataLoader
|
from torch.utils.data import DataLoader
|
||||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
|
|
||||||
from mozilla_voice_tts.tts.datasets.preprocess import load_meta_data
|
from TTS.tts.datasets.preprocess import load_meta_data
|
||||||
from mozilla_voice_tts.tts.datasets.TTSDataset import MyDataset
|
from TTS.tts.datasets.TTSDataset import MyDataset
|
||||||
from mozilla_voice_tts.tts.layers.losses import GlowTTSLoss
|
from TTS.tts.layers.losses import GlowTTSLoss
|
||||||
from mozilla_voice_tts.utils.console_logger import ConsoleLogger
|
from TTS.utils.console_logger import ConsoleLogger
|
||||||
from mozilla_voice_tts.tts.utils.distribute import (DistributedSampler,
|
from TTS.tts.utils.distribute import (DistributedSampler,
|
||||||
init_distributed,
|
init_distributed,
|
||||||
reduce_tensor)
|
reduce_tensor)
|
||||||
from mozilla_voice_tts.tts.utils.generic_utils import check_config, setup_model
|
from TTS.tts.utils.generic_utils import check_config, setup_model
|
||||||
from mozilla_voice_tts.tts.utils.io import save_best_model, save_checkpoint
|
from TTS.tts.utils.io import save_best_model, save_checkpoint
|
||||||
from mozilla_voice_tts.tts.utils.measures import alignment_diagonal_score
|
from TTS.tts.utils.measures import alignment_diagonal_score
|
||||||
from mozilla_voice_tts.tts.utils.speakers import (get_speakers,
|
from TTS.tts.utils.speakers import (get_speakers,
|
||||||
load_speaker_mapping,
|
load_speaker_mapping,
|
||||||
save_speaker_mapping)
|
save_speaker_mapping)
|
||||||
from mozilla_voice_tts.tts.utils.synthesis import synthesis
|
from TTS.tts.utils.synthesis import synthesis
|
||||||
from mozilla_voice_tts.tts.utils.text.symbols import make_symbols, phonemes, symbols
|
from TTS.tts.utils.text.symbols import make_symbols, phonemes, symbols
|
||||||
from mozilla_voice_tts.tts.utils.visual import plot_alignment, plot_spectrogram
|
from TTS.tts.utils.visual import plot_alignment, plot_spectrogram
|
||||||
from mozilla_voice_tts.utils.audio import AudioProcessor
|
from TTS.utils.audio import AudioProcessor
|
||||||
from mozilla_voice_tts.utils.generic_utils import (
|
from TTS.utils.generic_utils import (
|
||||||
KeepAverage, count_parameters, create_experiment_folder, get_git_branch,
|
KeepAverage, count_parameters, create_experiment_folder, get_git_branch,
|
||||||
remove_experiment_folder, set_init_dict)
|
remove_experiment_folder, set_init_dict)
|
||||||
from mozilla_voice_tts.utils.io import copy_config_file, load_config
|
from TTS.utils.io import copy_config_file, load_config
|
||||||
from mozilla_voice_tts.utils.radam import RAdam
|
from TTS.utils.radam import RAdam
|
||||||
from mozilla_voice_tts.utils.tensorboard_logger import TensorboardLogger
|
from TTS.utils.tensorboard_logger import TensorboardLogger
|
||||||
from mozilla_voice_tts.utils.training import (NoamLR, adam_weight_decay,
|
from TTS.utils.training import (NoamLR, adam_weight_decay,
|
||||||
check_update,
|
check_update,
|
||||||
gradual_training_scheduler,
|
gradual_training_scheduler,
|
||||||
set_weight_decay,
|
set_weight_decay,
|
|
@ -2,8 +2,8 @@ import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
|
|
||||||
from mozilla_voice_tts.tts.utils.generic_utils import sequence_mask
|
from TTS.tts.utils.generic_utils import sequence_mask
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.glow import InvConvNear, CouplingBlock, ActNorm
|
from TTS.tts.layers.glow_tts.glow import InvConvNear, CouplingBlock, ActNorm
|
||||||
|
|
||||||
|
|
||||||
def squeeze(x, x_mask=None, num_sqz=2):
|
def squeeze(x, x_mask=None, num_sqz=2):
|
|
@ -2,10 +2,10 @@ import math
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.transformer import Transformer
|
from TTS.tts.layers.glow_tts.transformer import Transformer
|
||||||
from mozilla_voice_tts.tts.utils.generic_utils import sequence_mask
|
from TTS.tts.utils.generic_utils import sequence_mask
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.glow import ConvLayerNorm, LayerNorm
|
from TTS.tts.layers.glow_tts.glow import ConvLayerNorm, LayerNorm
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.duration_predictor import DurationPredictor
|
from TTS.tts.layers.glow_tts.duration_predictor import DurationPredictor
|
||||||
|
|
||||||
|
|
||||||
class GatedConvBlock(nn.Module):
|
class GatedConvBlock(nn.Module):
|
|
@ -1,8 +1,8 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
from mozilla_voice_tts.tts.utils.generic_utils import sequence_mask
|
from TTS.tts.utils.generic_utils import sequence_mask
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.monotonic_align.core import maximum_path_c
|
from TTS.tts.layers.glow_tts.monotonic_align.core import maximum_path_c
|
||||||
|
|
||||||
|
|
||||||
def convert_pad_shape(pad_shape):
|
def convert_pad_shape(pad_shape):
|
File diff suppressed because it is too large
Load Diff
|
@ -5,7 +5,7 @@ import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
|
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.glow import LayerNorm
|
from TTS.tts.layers.glow_tts.glow import LayerNorm
|
||||||
|
|
||||||
|
|
||||||
class RelativePositionMultiHeadAttention(nn.Module):
|
class RelativePositionMultiHeadAttention(nn.Module):
|
|
@ -3,10 +3,10 @@ import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
|
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.encoder import Encoder
|
from TTS.tts.layers.glow_tts.encoder import Encoder
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.decoder import Decoder
|
from TTS.tts.layers.glow_tts.decoder import Decoder
|
||||||
from mozilla_voice_tts.tts.utils.generic_utils import sequence_mask
|
from TTS.tts.utils.generic_utils import sequence_mask
|
||||||
from mozilla_voice_tts.tts.layers.glow_tts.monotonic_align import maximum_path, generate_path
|
from TTS.tts.layers.glow_tts.monotonic_align import maximum_path, generate_path
|
||||||
|
|
||||||
|
|
||||||
class GlowTts(nn.Module):
|
class GlowTts(nn.Module):
|
|
@ -49,7 +49,7 @@ def to_camel(text):
|
||||||
def setup_model(num_chars, num_speakers, c, speaker_embedding_dim=None):
|
def setup_model(num_chars, num_speakers, c, speaker_embedding_dim=None):
|
||||||
print(" > Using model: {}".format(c.model))
|
print(" > Using model: {}".format(c.model))
|
||||||
MyModel = importlib.import_module('TTS.tts.models.' + c.model.lower())
|
MyModel = importlib.import_module('TTS.tts.models.' + c.model.lower())
|
||||||
MyModel = getattr(MyModel, c.model)
|
MyModel = getattr(MyModel, to_camel(c.model))
|
||||||
if c.model.lower() in "tacotron":
|
if c.model.lower() in "tacotron":
|
||||||
model = MyModel(num_chars=num_chars,
|
model = MyModel(num_chars=num_chars,
|
||||||
num_speakers=num_speakers,
|
num_speakers=num_speakers,
|
||||||
|
|
|
@ -5,8 +5,8 @@ import pickle as pickle_tts
|
||||||
class RenamingUnpickler(pickle_tts.Unpickler):
|
class RenamingUnpickler(pickle_tts.Unpickler):
|
||||||
"""Overload default pickler to solve module renaming problem"""
|
"""Overload default pickler to solve module renaming problem"""
|
||||||
def find_class(self, module, name):
|
def find_class(self, module, name):
|
||||||
if 'mozilla_voice_tts' in module :
|
if 'TTS' in module :
|
||||||
module = module.replace('mozilla_voice_tts', 'TTS')
|
module = module.replace('TTS', 'TTS')
|
||||||
return super().find_class(module, name)
|
return super().find_class(module, name)
|
||||||
|
|
||||||
class AttrDict(dict):
|
class AttrDict(dict):
|
||||||
|
|
Loading…
Reference in New Issue