// TODO(petebu): Consider the case when all updates are None.
if update is not None:
optimizer_utils.check_same_dtype(update, parameter)
m, v = self._get_or_create_moments(parameter)
learning_rate = tf.cast(self.learning_rate, update.dtype.base_dtype)
beta1 = tf.cast(self.beta1, update.dtype.base_dtype)
beta2 = tf.cast(self.beta2, update.dtype.base_dtype)
epsilon = tf.cast(self.epsilon, update.dtype.base_dtype)
After Change
lengths, or have inconsistent types.
optimizer_utils.check_updates_parameters(updates, parameters)
self._initialize(parameters)
self.step.assign_add(1)
for update, parameter, m, v in zip(updates, parameters, self.m, self.v):
// TODO(petebu): Add support for sparse tensors.
// TODO(petebu): Consider caching learning_rate cast.