mirror of https://github.com/coqui-ai/TTS.git
load model checkpoint on cpu, set 'r' for all models with gradual training enabled for all
This commit is contained in:
parent
ca49ae8b5e
commit
e2e92b63d5
|
@ -53,15 +53,14 @@ class Synthesizer(object):
|
||||||
num_speakers = 0
|
num_speakers = 0
|
||||||
self.tts_model = setup_model(self.input_size, num_speakers=num_speakers, c=self.tts_config)
|
self.tts_model = setup_model(self.input_size, num_speakers=num_speakers, c=self.tts_config)
|
||||||
# load model state
|
# load model state
|
||||||
map_location = None if use_cuda else torch.device('cpu')
|
cp = torch.load(tts_checkpoint, map_location=torch.device('cpu'))
|
||||||
cp = torch.load(tts_checkpoint, map_location=map_location)
|
|
||||||
# load the model
|
# load the model
|
||||||
self.tts_model.load_state_dict(cp['model'])
|
self.tts_model.load_state_dict(cp['model'])
|
||||||
if use_cuda:
|
if use_cuda:
|
||||||
self.tts_model.cuda()
|
self.tts_model.cuda()
|
||||||
self.tts_model.eval()
|
self.tts_model.eval()
|
||||||
self.tts_model.decoder.max_decoder_steps = 3000
|
self.tts_model.decoder.max_decoder_steps = 3000
|
||||||
if 'r' in cp and self.tts_config.model in ["Tacotron", "TacotronGST"]:
|
if 'r' in cp:
|
||||||
self.tts_model.decoder.set_r(cp['r'])
|
self.tts_model.decoder.set_r(cp['r'])
|
||||||
|
|
||||||
def load_wavernn(self, lib_path, model_path, model_file, model_config, use_cuda):
|
def load_wavernn(self, lib_path, model_path, model_file, model_config, use_cuda):
|
||||||
|
|
Loading…
Reference in New Issue