// less susceptible to overfitting.
//
for train_idx, holdout_idx in kfold.split(X, y, groups):
instance = clone(regr)
if sample_weight is None:
instance.fit(X[train_idx], y[train_idx])
else:
instance.fit(X[train_idx], y[train_idx],
After Change
// Advantage of this complex approach is that data points we"re
// predicting have not been trained on by the algorithm, so it"s
// less susceptible to overfitting.
if sample_weight is None:
fit_params = None
else:
fit_params = dict(sample_weight=sample_weight)
meta_features = np.column_stack([cross_val_predict(
regr, X, y, groups=groups, cv=kfold,
n_jobs=self.n_jobs, fit_params=fit_params,
pre_dispatch=self.pre_dispatch)