diff --git a/TTS/bin/compute_embeddings.py b/TTS/bin/compute_embeddings.py
index 9f5b0b49..702e015e 100644
--- a/TTS/bin/compute_embeddings.py
+++ b/TTS/bin/compute_embeddings.py
@@ -1,5 +1,6 @@
 import argparse
 import os
+import torch
 from argparse import RawTextHelpFormatter
 
 from tqdm import tqdm
@@ -28,11 +29,12 @@ parser.add_argument(
     type=str,
     help="Path to dataset config file.",
 )
-parser.add_argument("output_path", type=str, help="path for output speakers.json and/or speakers.npy.")
+parser.add_argument("output_path", type=str, help="path for output .json file.")
 parser.add_argument(
-    "--old_file", type=str, help="Previous speakers.json file, only compute for new audios.", default=None
+    "--old_file", type=str, help="Previous .json file, only compute for new audios.", default=None
 )
 parser.add_argument("--use_cuda", type=bool, help="flag to set cuda.", default=True)
+parser.add_argument("--use_predicted_label", type=bool, help="If True and predicted label is available with will use it.", default=False)
 parser.add_argument("--eval", type=bool, help="compute eval.", default=True)
 
 args = parser.parse_args()
@@ -52,10 +54,10 @@ encoder_manager = SpeakerManager(
 class_name_key = encoder_manager.encoder_config.class_name_key
 
 # compute speaker embeddings
-speaker_mapping = {}
+class_mapping = {}
 for idx, wav_file in enumerate(tqdm(wav_files)):
     if isinstance(wav_file, dict):
-        class_name = wav_file[class_name_key]
+        class_name = wav_file[class_name_key] if class_name_key in wav_file else None
         wav_file = wav_file["audio_file"]
     else:
         class_name = None
@@ -68,20 +70,37 @@ for idx, wav_file in enumerate(tqdm(wav_files)):
         # extract the embedding
         embedd = encoder_manager.compute_embedding_from_clip(wav_file)
 
-    # create speaker_mapping if target dataset is defined
-    speaker_mapping[wav_file_name] = {}
-    speaker_mapping[wav_file_name]["name"] = class_name
-    speaker_mapping[wav_file_name]["embedding"] = embedd
+    if args.use_predicted_label:
+        map_classid_to_classname = getattr(encoder_manager.encoder_config, 'map_classid_to_classname', None)
+        if encoder_manager.encoder_criterion is not None and map_classid_to_classname is not None:
+            embedding = torch.FloatTensor(embedd).unsqueeze(0)
+            if encoder_manager.use_cuda:
+                embedding = embedding.cuda()
 
-if speaker_mapping:
-    # save speaker_mapping if target dataset is defined
+            class_id = encoder_manager.encoder_criterion.softmax.inference(embedding).item()
+            class_name = map_classid_to_classname[str(class_id)]
+        else:
+            raise RuntimeError(
+                    " [!] use_predicted_label is enable and predicted_labels is not available !!"
+                )
+
+    # create class_mapping if target dataset is defined
+    class_mapping[wav_file_name] = {}
+    class_mapping[wav_file_name]["name"] = class_name
+    class_mapping[wav_file_name]["embedding"] = embedd
+
+if class_mapping:
+    # save class_mapping if target dataset is defined
     if ".json" not in args.output_path:
-        mapping_file_path = os.path.join(args.output_path, "speakers.json")
+        if class_name_key == "speaker_name":
+            mapping_file_path = os.path.join(args.output_path, "speakers.json")
+        else:
+            mapping_file_path = os.path.join(args.output_path, "emotions.json")
     else:
         mapping_file_path = args.output_path
 
     os.makedirs(os.path.dirname(mapping_file_path), exist_ok=True)
 
     # pylint: disable=W0212
-    encoder_manager._save_json(mapping_file_path, speaker_mapping)
-    print("Speaker embeddings saved at:", mapping_file_path)
+    encoder_manager._save_json(mapping_file_path, class_mapping)
+    print("Embeddings saved at:", mapping_file_path)
diff --git a/TTS/bin/synthesize.py b/TTS/bin/synthesize.py
index ce9975a4..ac0bda1d 100755
--- a/TTS/bin/synthesize.py
+++ b/TTS/bin/synthesize.py
@@ -227,6 +227,7 @@ If you don't specify any models, then it uses LJSpeech based English model.
     model_path = None
     config_path = None
     speakers_file_path = None
+    emotions_file_path = None
     language_ids_file_path = None
     vocoder_path = None
     vocoder_config_path = None
diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py
index d9fe9ecf..fed55a09 100644
--- a/TTS/utils/synthesizer.py
+++ b/TTS/utils/synthesizer.py
@@ -259,9 +259,9 @@ class Synthesizer(object):
 
         # handle emotion
         emotion_embedding, emotion_id = None, None
-        if self.tts_emotions_file or hasattr(self.tts_model.emotion_manager, "ids"):
+        if self.tts_emotions_file or (getattr(self.tts_model, "emotion_manager", None) and getattr(self.tts_model.emotion_manager, "ids", None)):
             if emotion_name and isinstance(emotion_name, str):
-                if getattr(self.tts_config, "use_external_emotions_embeddings", False) or getattr(self.tts_config.model_args, "use_external_emotions_embeddings", False):
+                if getattr(self.tts_config, "use_external_emotions_embeddings", False) or (getattr(self.tts_config, "model_args", None) and getattr(self.tts_config.model_args, "use_external_emotions_embeddings", False)):
                     # get the average speaker embedding from the saved embeddings.
                     emotion_embedding = self.tts_model.emotion_manager.get_mean_embedding(emotion_name, num_samples=None, randomize=False)
                     emotion_embedding = np.array(emotion_embedding)[None, :]  # [1 x embedding_dim]