if self.grad_clip is None:
clip_gvs = gvs
else:
clip_gvs = [(tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v) for g, v in gvs]
self.step_op = self.opt.apply_gradients(clip_gvs)
//////////////////////////////////////////////////////////////////////////////////////////////////////
After Change
if self.target_space == "integer":
// move negatives into exponential space and align positives
// clipping the negatives prevents overflow that TF dislikes
self.preds_op = tf.select(self.preds_op > 0, self.preds_op + 1, tf.exp(tf.clip_by_value(self.preds_op,-50,50)))
// Poisson loss
self.loss_op = tf.nn.log_poisson_loss(tf.log(self.preds_op), self.targets_op, compute_full_loss=True)
self.loss_op = tf.reduce_mean(self.loss_op)
else:
// clip targets
if self.target_space == "positive":
self.targets_op = tf.nn.relu(self.targets_op)
// take square difference
sq_diff = tf.squared_difference(self.preds_op, self.targets_op)
// set NaN"s to zero
// sq_diff = tf.boolean_mask(sq_diff, tf.logical_not(self.targets_na[:,tstart:tend]))
// take the mean
self.loss_op = tf.reduce_mean(sq_diff, name="r2_loss") + norm_stabilizer
// track
tf.scalar_summary("loss", self.loss_op)
// define optimization
if self.optimization == "adam":
self.opt = tf.train.AdamOptimizer(self.learning_rate, beta1=self.adam_beta1, beta2=self.adam_beta2, epsilon=self.adam_eps)
else:
print("Cannot recognize optimization algorithm %s" % self.optimization)
exit(1)
// clip gradients
self.gvs = self.opt.compute_gradients(self.loss_op)
if self.grad_clip is not None:
// self.gvs = [(tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v) for g, v in self.gvs]
// batch norm introduces these None values that we have to dodge
clip_gvs = []
for i in range(len(self.gvs)):
g,v = self.gvs[i]
if g is None:
clip_gvs.append(self.gvs[i])
else:
clip_gvs.append((tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v))
// apply gradients
self.step_op = self.opt.apply_gradients(clip_gvs)
//////////////////////////////////////////////////////////////////////////////////////////////////////