if self.__tb_dir is not None:
// Summaries for any problem type
tf.summary.scalar("train/loss", cost)
tf.summary.scalar("train/learning_rate", self.__learning_rate)
tf.summary.scalar("train/l2_loss", tf.reduce_mean(l2_cost))
filter_summary = self.__getWeightsAsImage(self.__firstLayer().weights)
tf.summary.image("filters/first", filter_summary)
// Summaries for classification problems
if self.__problem_type == definitions.ProblemType.CLASSIFICATION:
tf.summary.scalar("train/accuracy", accuracy)
tf.summary.scalar("test/accuracy", test_accuracy)
tf.summary.histogram("train/class_predictions", class_predictions)
tf.summary.histogram("test/class_predictions", test_class_predictions)
// Summaries for regression
if self.__problem_type == definitions.ProblemType.REGRESSION:
tf.summary.scalar("test/loss", test_cost)
// Summaries for each layer
for layer in self.__layers:
if hasattr(layer, "name"):
tf.summary.histogram("weights/"+layer.name, layer.weights)
tf.summary.histogram("biases/"+layer.name, layer.biases)
tf.summary.histogram("activations/"+layer.name, layer.activations)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(self.__tb_dir, self.__session.graph)
// Either load the network parameters from a checkpoint file or start training
if self.__load_from_saved is not False:
self.__log("Loading from checkpoint file...")
saver = tf.train.Saver()
saver.restore(self.__session, tf.train.latest_checkpoint(self.__load_from_saved))
self.__initializeQueueRunners()
After Change
for layer in self.__layers:
if hasattr(layer, "name"):
tf.summary.histogram("weights/"+layer.name, layer.weights, collections=["custom_summaries"])
tf.summary.histogram("biases/"+layer.name, layer.biases, collections=["custom_summaries"])
tf.summary.histogram("activations/"+layer.name, layer.activations, collections=["custom_summaries"])
merged = tf.summary.merge_all(key="custom_summaries")