0fb529227b2754e195ad9b65af236a528ebf7f20,src/skmultiflow/evaluation/evaluate_prequential.py,EvaluatePrequential,_train_and_test,#EvaluatePrequential#,212

Before Change



            X, y = self.stream.next_sample(self.pretrain_size)

            for i in range(self.n_models):
                if self._task_type != constants.REGRESSION and \
                   self._task_type != constants.MULTI_TARGET_REGRESSION:
                    self.model[i].partial_fit(X=X, y=y, classes=self.stream.
                                              target_values)
                else:
                    self.model[i].partial_fit(X=X, y=y)
            self.global_sample_count += self.pretrain_size
            first_run = False
        else:
            logging.info("No pre-training.")

        update_count = 0
        logging.info("Evaluating...")
        while ((self.global_sample_count < self.max_samples) & (end_time - start_time < self.max_time)
               & (self.stream.has_more_samples())):
            try:
                X, y = self.stream.next_sample(self.batch_size)

                if X is not None and y is not None:
                    // Test
                    prediction = [[] for _ in range(self.n_models)]
                    for i in range(self.n_models):
                        try:
                            prediction[i].extend(self.model[i].predict(X))
                        except TypeError:
                            raise TypeError("Unexpected prediction value from {}"
                                            .format(type(self.model[i]).__name__))
                    self.global_sample_count += self.batch_size

                    for j in range(self.n_models):
                        for i in range(len(prediction[0])):
                            if self._task_type == constants.CLASSIFICATION:
                                self.mean_eval_measurements[j].add_result(y[i], prediction[j][i])
                                self.current_eval_measurements[j].add_result(y[i], prediction[j][i])
                            else:
                                self.mean_eval_measurements[j].add_result(y[i], prediction[j][i])
                                self.current_eval_measurements[j].add_result(y[i], prediction[j][i])
                    self._check_progress(logging, n_samples)

                    // Train
                    if first_run:

After Change



            return self.model

    def _train_and_test(self):
         Method to control the prequential evaluation.

        Returns
        -------
        BaseClassifier extension or list of BaseClassifier extensions
            The trained classifiers.

        Notes
        -----
        The classifier parameter should be an extension from the BaseClassifier. In
        the future, when BaseRegressor is created, it could be an extension from that
        class as well.

        
        logging.basicConfig(format="%(message)s", level=logging.INFO)
        start_time = timer()
        end_time = timer()
        logging.info("Prequential Evaluation")
        logging.info("Evaluating %s target(s).", str(self.stream.n_targets))

        n_samples = self.stream.n_remaining_samples()
        if n_samples == -1 or n_samples > self.max_samples:
            n_samples = self.max_samples

        first_run = True
        if self.pretrain_size > 0:
            logging.info("Pre-training on %s samples.", str(self.pretrain_size))

            X, y = self.stream.next_sample(self.pretrain_size)

            for i in range(self.n_models):
                if self._task_type == constants.CLASSIFICATION:
                    self.model[i].partial_fit(X=X, y=y, classes=self.stream.target_values)
                elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
                    self.model[i].partial_fit(X=X, y=y, classes=unique(self.stream.target_values))
                else:
                    self.model[i].partial_fit(X=X, y=y)
            self.global_sample_count += self.pretrain_size
            first_run = False
        else:
            logging.info("No pre-training.")

        update_count = 0
        logging.info("Evaluating...")
        while ((self.global_sample_count < self.max_samples) & (end_time - start_time < self.max_time)
               & (self.stream.has_more_samples())):
            try:
                X, y = self.stream.next_sample(self.batch_size)

                if X is not None and y is not None:
                    // Test
                    prediction = [[] for _ in range(self.n_models)]
                    for i in range(self.n_models):
                        try:
                            prediction[i].extend(self.model[i].predict(X))
                        except TypeError:
                            raise TypeError("Unexpected prediction value from {}"
                                            .format(type(self.model[i]).__name__))
                    self.global_sample_count += self.batch_size

                    for j in range(self.n_models):
                        for i in range(len(prediction[0])):
                            self.mean_eval_measurements[j].add_result(y[i], prediction[j][i])
                            self.current_eval_measurements[j].add_result(y[i], prediction[j][i])
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 4

Non-data size: 29

Instances


Project Name: scikit-multiflow/scikit-multiflow
Commit Name: 0fb529227b2754e195ad9b65af236a528ebf7f20
Time: 2018-10-06
Author: jacob.montiel@gmail.com
File Name: src/skmultiflow/evaluation/evaluate_prequential.py
Class Name: EvaluatePrequential
Method Name: _train_and_test


Project Name: scikit-multiflow/scikit-multiflow
Commit Name: 56a6fb1d87f757e01096e1dfedd4339f4f9ad9f6
Time: 2018-10-08
Author: jacob.montiel@gmail.com
File Name: src/skmultiflow/evaluation/evaluate_prequential.py
Class Name: EvaluatePrequential
Method Name: _train_and_test


Project Name: scikit-multiflow/scikit-multiflow
Commit Name: 0fb529227b2754e195ad9b65af236a528ebf7f20
Time: 2018-10-06
Author: jacob.montiel@gmail.com
File Name: src/skmultiflow/evaluation/evaluate_prequential.py
Class Name: EvaluatePrequential
Method Name: _train_and_test


Project Name: scikit-multiflow/scikit-multiflow
Commit Name: 0fb529227b2754e195ad9b65af236a528ebf7f20
Time: 2018-10-06
Author: jacob.montiel@gmail.com
File Name: src/skmultiflow/evaluation/evaluate_holdout.py
Class Name: EvaluateHoldout
Method Name: _periodic_holdout


Project Name: scikit-multiflow/scikit-multiflow
Commit Name: 56a6fb1d87f757e01096e1dfedd4339f4f9ad9f6
Time: 2018-10-08
Author: jacob.montiel@gmail.com
File Name: src/skmultiflow/evaluation/evaluate_holdout.py
Class Name: EvaluateHoldout
Method Name: _periodic_holdout