self.iterations = shared_scalar(0)
def get_updates(self, params, regularizers, constraints, cost):
grads, params = self.get_gradients(cost, params, regularizers, constraints)
lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
updates = [(self.iterations, self.iterations+1.)]
for p, g in zip(params, grads):
m = shared_zeros(p.get_value().shape) // momentum
v = self.momentum * m - lr * g // velocity
updates.append((m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
updates.append((p, new_p))
return updates
class RMSprop(Optimizer):