a68ad3bda020d9ade2b33c58a3f1406de41b682c,pyglmnet/pyglmnet.py,GLM,fit,#GLM#Any#Any#,661
Before Change
z = beta[0] + np.dot(X, beta[1:]) // cache z
// Initialize loss accumulators
L, DL = list(), list()
for t in range(0, self.max_iter):
if self.solver == "batch-gradient":
grad = _grad_L2loss(self.distr,
alpha, self.Tau,
After Change
beta = beta - self.learning_rate * grad
elif self.solver == "cdfast":
beta_old = deepcopy(beta)
beta, z = \
self._cdfast(X, y, z, ActiveSet, beta, reg_lambda)
if t > 1:
if np.linalg.norm(beta - beta_old) / \
np.linalg.norm(beta_old) < tol / lr:
msg = ("\tConverged in {0:d} iterations".format(t))
logger.info(msg)
break
// Apply proximal operator
beta[1:] = self._prox(beta[1:], reg_lambda * alpha)
// Update active set
if self.solver == "cdfast":
In pattern: SUPERPATTERN
Frequency: 4
Non-data size: 4
Instances Project Name: glm-tools/pyglmnet
Commit Name: a68ad3bda020d9ade2b33c58a3f1406de41b682c
Time: 2018-09-05
Author: pavan.ramkumar@gmail.com
File Name: pyglmnet/pyglmnet.py
Class Name: GLM
Method Name: fit
Project Name: jhfjhfj1/autokeras
Commit Name: aa212fc6e72f394396ecde431a86c55e89ae194e
Time: 2018-08-03
Author: jin@tamu.edu
File Name: autokeras/search.py
Class Name: BayesianSearcher
Method Name: maximize_acq
Project Name: glm-tools/pyglmnet
Commit Name: 4367785c9131771d2dd80b45d4bbb4ca00bd24ac
Time: 2018-09-08
Author: pavan.ramkumar@gmail.com
File Name: pyglmnet/pyglmnet.py
Class Name: GLM
Method Name: fit