ac04dcf9ced65fdd2cafc5c967400cabf32d3c6a,tensorforce/tests/test_quickstart_example.py,TestQuickstartExample,test_example,#TestQuickstartExample#,34

Before Change


            env = OpenAIGym("CartPole-v0")

            // Create a Trust Region Policy Optimization agent
            agent = PPOAgent(config=Configuration(
                log_level="info",
                batch_size=4096,

                gae_lambda=0.97,
                learning_rate=0.001,
                entropy_penalty=0.01,
                epochs=5,
                optimizer_batch_size=512,
                loss_clipping=0.2,
                states=env.states,
                actions=env.actions,
                network=layered_network_builder([
                    dict(type="dense", size=32, activation="tanh"),
                    dict(type="dense", size=32, activation="tanh")
                ])
            ))
            runner = Runner(agent=agent, environment=env)

            def episode_finished(r):

After Change


        for _ in xrange(3):
            // Create an OpenAIgym environment
            env = OpenAIGym("CartPole-v0")
            config = Configuration(
                batch_size=4096,
                // Agent
                preprocessing=None,
                exploration=None,
                reward_preprocessing=None,
                // BatchAgent
                keep_last_timestep=True,  // not documented!
                // PPOAgent
                step_optimizer=dict(
                    type="adam",
                    learning_rate=1e-3
                ),
                optimization_steps=10,
                // Model
                scope="ppo",
                discount=0.99,
                // DistributionModel
                distributions=None,  // not documented!!!
                entropy_regularization=0.01,
                // PGModel
                baseline_mode=None,
                baseline=None,
                baseline_optimizer=None,
                gae_lambda=None,
                normalize_rewards=False,
                // PGLRModel
                likelihood_ratio_clipping=0.2,
                // Logging
                log_level="info",
                // TensorFlow Summaries
                summary_logdir=None,
                summary_labels=["total-loss"],
                summary_frequency=1,
                // Distributed
                distributed=False,
                device=None
            )

            network_spec = [
                dict(type="dense", size=32, activation="tanh"),
                dict(type="dense", size=32, activation="tanh")
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 4

Non-data size: 3

Instances


Project Name: reinforceio/tensorforce
Commit Name: ac04dcf9ced65fdd2cafc5c967400cabf32d3c6a
Time: 2017-10-15
Author: mi.schaarschmidt@gmail.com
File Name: tensorforce/tests/test_quickstart_example.py
Class Name: TestQuickstartExample
Method Name: test_example


Project Name: automl/auto-sklearn
Commit Name: 33463c7acd0eae8a063fa9659cd4f8774c8e80fc
Time: 2016-12-20
Author: feurerm@informatik.uni-freiburg.de
File Name: test/test_pipeline/test_classification.py
Class Name: SimpleClassificationPipelineTest
Method Name: test_predict_batched_sparse


Project Name: automl/auto-sklearn
Commit Name: 33463c7acd0eae8a063fa9659cd4f8774c8e80fc
Time: 2016-12-20
Author: feurerm@informatik.uni-freiburg.de
File Name: test/test_pipeline/test_classification.py
Class Name: SimpleClassificationPipelineTest
Method Name: test_predict_proba_batched_sparse


Project Name: reinforceio/tensorforce
Commit Name: 863b8dee69df21ff479b0f28422f2bf2b14f05bd
Time: 2017-10-15
Author: mi.schaarschmidt@gmail.com
File Name: examples/quickstart.py
Class Name:
Method Name: