Delete trainer related code

This commit is contained in:
Eren Gölge 2022-02-03 15:35:52 +01:00
parent 38314194e7
commit a013566d15
5 changed files with 0 additions and 1518 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,24 +0,0 @@
from TTS.utils.logging.console_logger import ConsoleLogger
from TTS.utils.logging.tensorboard_logger import TensorboardLogger
from TTS.utils.logging.wandb_logger import WandbLogger
def init_dashboard_logger(config):
if config.dashboard_logger == "tensorboard":
dashboard_logger = TensorboardLogger(config.output_log_path, model_name=config.model)
elif config.dashboard_logger == "wandb":
project_name = config.model
if config.project_name:
project_name = config.project_name
dashboard_logger = WandbLogger(
project=project_name,
name=config.run_name,
config=config,
entity=config.wandb_entity,
)
dashboard_logger.add_text("model-config", f"<pre>{config.to_json()}</pre>", 0)
return dashboard_logger

View File

@ -1,105 +0,0 @@
import datetime
from TTS.utils.io import AttrDict
tcolors = AttrDict(
{
"OKBLUE": "\033[94m",
"HEADER": "\033[95m",
"OKGREEN": "\033[92m",
"WARNING": "\033[93m",
"FAIL": "\033[91m",
"ENDC": "\033[0m",
"BOLD": "\033[1m",
"UNDERLINE": "\033[4m",
}
)
class ConsoleLogger:
def __init__(self):
# TODO: color code for value changes
# use these to compare values between iterations
self.old_train_loss_dict = None
self.old_epoch_loss_dict = None
self.old_eval_loss_dict = None
# pylint: disable=no-self-use
def get_time(self):
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def print_epoch_start(self, epoch, max_epoch, output_path=None):
print(
"\n{}{} > EPOCH: {}/{}{}".format(tcolors.UNDERLINE, tcolors.BOLD, epoch, max_epoch, tcolors.ENDC),
flush=True,
)
if output_path is not None:
print(f" --> {output_path}")
def print_train_start(self):
print(f"\n{tcolors.BOLD} > TRAINING ({self.get_time()}) {tcolors.ENDC}")
def print_train_step(self, batch_steps, step, global_step, loss_dict, avg_loss_dict):
indent = " | > "
print()
log_text = "{} --> STEP: {}/{} -- GLOBAL_STEP: {}{}\n".format(
tcolors.BOLD, step, batch_steps, global_step, tcolors.ENDC
)
for key, value in loss_dict.items():
if f"avg_{key}" in avg_loss_dict.keys():
# print the avg value if given
if isinstance(value, float) and round(value, 5) == 0:
# do not round the number if it is zero when rounded
log_text += "{}{}: {} ({})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"])
else:
# print the rounded value
log_text += "{}{}: {:.5f} ({:.5f})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"])
else:
if isinstance(value, float) and round(value, 5) == 0:
log_text += "{}{}: {} \n".format(indent, key, value)
else:
log_text += "{}{}: {:.5f} \n".format(indent, key, value)
print(log_text, flush=True)
# pylint: disable=unused-argument
def print_train_epoch_end(self, global_step, epoch, epoch_time, print_dict):
indent = " | > "
log_text = f"\n{tcolors.BOLD} --> TRAIN PERFORMACE -- EPOCH TIME: {epoch_time:.2f} sec -- GLOBAL_STEP: {global_step}{tcolors.ENDC}\n"
for key, value in print_dict.items():
log_text += "{}{}: {:.5f}\n".format(indent, key, value)
print(log_text, flush=True)
def print_eval_start(self):
print(f"\n{tcolors.BOLD} > EVALUATION {tcolors.ENDC}\n")
def print_eval_step(self, step, loss_dict, avg_loss_dict):
indent = " | > "
log_text = f"{tcolors.BOLD} --> STEP: {step}{tcolors.ENDC}\n"
for key, value in loss_dict.items():
# print the avg value if given
if f"avg_{key}" in avg_loss_dict.keys():
log_text += "{}{}: {:.5f} ({:.5f})\n".format(indent, key, value, avg_loss_dict[f"avg_{key}"])
else:
log_text += "{}{}: {:.5f} \n".format(indent, key, value)
print(log_text, flush=True)
def print_epoch_end(self, epoch, avg_loss_dict):
indent = " | > "
log_text = "\n {}--> EVAL PERFORMANCE{}\n".format(tcolors.BOLD, tcolors.ENDC)
for key, value in avg_loss_dict.items():
# print the avg value if given
color = ""
sign = "+"
diff = 0
if self.old_eval_loss_dict is not None and key in self.old_eval_loss_dict:
diff = value - self.old_eval_loss_dict[key]
if diff < 0:
color = tcolors.OKGREEN
sign = ""
elif diff > 0:
color = tcolors.FAIL
sign = "+"
log_text += "{}{}:{} {:.5f} {}({}{:.5f})\n".format(indent, key, color, value, tcolors.ENDC, sign, diff)
self.old_eval_loss_dict = avg_loss_dict
print(log_text, flush=True)

View File

@ -1,79 +0,0 @@
import traceback
from tensorboardX import SummaryWriter
class TensorboardLogger(object):
def __init__(self, log_dir, model_name):
self.model_name = model_name
self.writer = SummaryWriter(log_dir)
def model_weights(self, model, step):
layer_num = 1
for name, param in model.named_parameters():
if param.numel() == 1:
self.writer.add_scalar("layer{}-{}/value".format(layer_num, name), param.max(), step)
else:
self.writer.add_scalar("layer{}-{}/max".format(layer_num, name), param.max(), step)
self.writer.add_scalar("layer{}-{}/min".format(layer_num, name), param.min(), step)
self.writer.add_scalar("layer{}-{}/mean".format(layer_num, name), param.mean(), step)
self.writer.add_scalar("layer{}-{}/std".format(layer_num, name), param.std(), step)
self.writer.add_histogram("layer{}-{}/param".format(layer_num, name), param, step)
self.writer.add_histogram("layer{}-{}/grad".format(layer_num, name), param.grad, step)
layer_num += 1
def dict_to_tb_scalar(self, scope_name, stats, step):
for key, value in stats.items():
self.writer.add_scalar("{}/{}".format(scope_name, key), value, step)
def dict_to_tb_figure(self, scope_name, figures, step):
for key, value in figures.items():
self.writer.add_figure("{}/{}".format(scope_name, key), value, step)
def dict_to_tb_audios(self, scope_name, audios, step, sample_rate):
for key, value in audios.items():
if value.dtype == "float16":
value = value.astype("float32")
try:
self.writer.add_audio("{}/{}".format(scope_name, key), value, step, sample_rate=sample_rate)
except RuntimeError:
traceback.print_exc()
def train_step_stats(self, step, stats):
self.dict_to_tb_scalar(f"{self.model_name}_TrainIterStats", stats, step)
def train_epoch_stats(self, step, stats):
self.dict_to_tb_scalar(f"{self.model_name}_TrainEpochStats", stats, step)
def train_figures(self, step, figures):
self.dict_to_tb_figure(f"{self.model_name}_TrainFigures", figures, step)
def train_audios(self, step, audios, sample_rate):
self.dict_to_tb_audios(f"{self.model_name}_TrainAudios", audios, step, sample_rate)
def eval_stats(self, step, stats):
self.dict_to_tb_scalar(f"{self.model_name}_EvalStats", stats, step)
def eval_figures(self, step, figures):
self.dict_to_tb_figure(f"{self.model_name}_EvalFigures", figures, step)
def eval_audios(self, step, audios, sample_rate):
self.dict_to_tb_audios(f"{self.model_name}_EvalAudios", audios, step, sample_rate)
def test_audios(self, step, audios, sample_rate):
self.dict_to_tb_audios(f"{self.model_name}_TestAudios", audios, step, sample_rate)
def test_figures(self, step, figures):
self.dict_to_tb_figure(f"{self.model_name}_TestFigures", figures, step)
def add_text(self, title, text, step):
self.writer.add_text(title, text, step)
def log_artifact(self, file_or_dir, name, artifact_type, aliases=None): # pylint: disable=W0613, R0201
yield
def flush(self):
self.writer.flush()
def finish(self):
self.writer.close()

View File

@ -1,111 +0,0 @@
# pylint: disable=W0613
import traceback
from pathlib import Path
try:
import wandb
from wandb import finish, init # pylint: disable=W0611
except ImportError:
wandb = None
class WandbLogger:
def __init__(self, **kwargs):
if not wandb:
raise Exception("install wandb using `pip install wandb` to use WandbLogger")
self.run = None
self.run = wandb.init(**kwargs) if not wandb.run else wandb.run
self.model_name = self.run.config.model
self.log_dict = {}
def model_weights(self, model):
layer_num = 1
for name, param in model.named_parameters():
if param.numel() == 1:
self.dict_to_scalar("weights", {"layer{}-{}/value".format(layer_num, name): param.max()})
else:
self.dict_to_scalar("weights", {"layer{}-{}/max".format(layer_num, name): param.max()})
self.dict_to_scalar("weights", {"layer{}-{}/min".format(layer_num, name): param.min()})
self.dict_to_scalar("weights", {"layer{}-{}/mean".format(layer_num, name): param.mean()})
self.dict_to_scalar("weights", {"layer{}-{}/std".format(layer_num, name): param.std()})
self.log_dict["weights/layer{}-{}/param".format(layer_num, name)] = wandb.Histogram(param)
self.log_dict["weights/layer{}-{}/grad".format(layer_num, name)] = wandb.Histogram(param.grad)
layer_num += 1
def dict_to_scalar(self, scope_name, stats):
for key, value in stats.items():
self.log_dict["{}/{}".format(scope_name, key)] = value
def dict_to_figure(self, scope_name, figures):
for key, value in figures.items():
self.log_dict["{}/{}".format(scope_name, key)] = wandb.Image(value)
def dict_to_audios(self, scope_name, audios, sample_rate):
for key, value in audios.items():
if value.dtype == "float16":
value = value.astype("float32")
try:
self.log_dict["{}/{}".format(scope_name, key)] = wandb.Audio(value, sample_rate=sample_rate)
except RuntimeError:
traceback.print_exc()
def log(self, log_dict, prefix="", flush=False):
for key, value in log_dict.items():
self.log_dict[prefix + key] = value
if flush: # for cases where you don't want to accumulate data
self.flush()
def train_step_stats(self, step, stats):
self.dict_to_scalar(f"{self.model_name}_TrainIterStats", stats)
def train_epoch_stats(self, step, stats):
self.dict_to_scalar(f"{self.model_name}_TrainEpochStats", stats)
def train_figures(self, step, figures):
self.dict_to_figure(f"{self.model_name}_TrainFigures", figures)
def train_audios(self, step, audios, sample_rate):
self.dict_to_audios(f"{self.model_name}_TrainAudios", audios, sample_rate)
def eval_stats(self, step, stats):
self.dict_to_scalar(f"{self.model_name}_EvalStats", stats)
def eval_figures(self, step, figures):
self.dict_to_figure(f"{self.model_name}_EvalFigures", figures)
def eval_audios(self, step, audios, sample_rate):
self.dict_to_audios(f"{self.model_name}_EvalAudios", audios, sample_rate)
def test_audios(self, step, audios, sample_rate):
self.dict_to_audios(f"{self.model_name}_TestAudios", audios, sample_rate)
def test_figures(self, step, figures):
self.dict_to_figure(f"{self.model_name}_TestFigures", figures)
def add_text(self, title, text, step):
pass
def flush(self):
if self.run:
wandb.log(self.log_dict)
self.log_dict = {}
def finish(self):
if self.run:
self.run.finish()
def log_artifact(self, file_or_dir, name, artifact_type, aliases=None):
if not self.run:
return
name = "_".join([self.run.id, name])
artifact = wandb.Artifact(name, type=artifact_type)
data_path = Path(file_or_dir)
if data_path.is_dir():
artifact.add_dir(str(data_path))
elif data_path.is_file():
artifact.add_file(str(data_path))
self.run.log_artifact(artifact, aliases=aliases)