// Initialize loss accumulators
if callable(self.callback):
self.loss_trace = list()
// Iterative updates
for t in range(0, self.max_iter):
if self.solver == "batch-gradient":
grad = _grad_L2loss(self.distr,
alpha, self.Tau,
reg_lambda, X, y, self.eta,
beta)
if t > 1:
if np.linalg.norm(grad) / np.linalg.norm(beta) < tol / lr:
msg = ("\tConverged in {0:d} iterations".format(t))
logger.info(msg)
break
beta = beta - self.learning_rate * grad
elif self.solver == "cdfast":
beta_old = deepcopy(beta)
beta, z = \
self._cdfast(X, y, z, ActiveSet, beta, reg_lambda)
if t > 1:
if ((np.linalg.norm(beta - beta_old) /
np.linalg.norm(beta_old) < tol / lr)):
msg = ("\tConverged in {0:d} iterations".format(t))
logger.info(msg)
break
// Apply proximal operator
beta[1:] = self._prox(beta[1:], reg_lambda * alpha)
// Update active set
if self.solver == "cdfast":
ActiveSet[beta == 0] = 0
ActiveSet[0] = 1.
// Compute and save loss if callbacks are requested
if callable(self.callback):
self.loss_trace.append(self.callback(self.distr, alpha,
self.Tau, reg_lambda,
X, y, self.eta,
self.group, beta))
// Update the estimated variables
self.beta0_ = beta[0]
self.beta_ = beta[1:]
self.ynull_ = np.mean(y)
return self
After Change
// Compute and save loss if callbacks are requested
if callable(self.callback):
self.callback(beta)
// Update the estimated variables
self.beta0_ = beta[0]
self.beta_ = beta[1:]