// These are the metrics we"ll pass up the way, and their new names
train_metrics = {"train_loss", "ups", "wps", "gnorm", "clip"}
valid_metrics = {"valid_loss"}
metrics = train_metrics if self.is_training else valid_metrics
m = {k: self.trainer.meters[k].avg for k in metrics}
// additionally output perplexity. note that fairseq models use base 2
// in cross_entropy:
// github.com/pytorch/fairseq/blob/master/fairseq/criterions/cross_entropy.py//L55
if "train_loss" in m:
m["train_ppl"] = np.exp2(m["train_loss"])
if "valid_loss" in m:
m["ppl"] = np.exp2(m["valid_loss"])
for k, v in m.items():
// clean up: rounds to sigfigs and converts tensors to floats
m[k] = round_sigfigs(v, 4)