a0d0c18fcde09f06be9ca1556052cf8064df4ac6,examples/reinforcement_learning/tutorial_A3C.py,,,#,52

Before Change


GLOBAL_RUNNING_R = []
GLOBAL_EP = 0  // will increase during training, stop training when it >= MAX_GLOBAL_EP

env = gym.make(GAME)

N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]

A_BOUND = [env.action_space.low, env.action_space.high]
A_BOUND[0] = A_BOUND[0].reshape(1, N_A)
A_BOUND[1] = A_BOUND[1].reshape(1, N_A)
// print(A_BOUND)

After Change


ENTROPY_BETA = 0.005  // factor for entropy boosted exploration
LR_A = 0.00005  // learning rate for actor
LR_C = 0.0001  // learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0  // will increase during training, stop training when it >= MAX_GLOBAL_EP


//////////////////////////////////////  Asynchronous Advantage Actor Critic (A3C)  ////////////////////////////////////////////////////////////////////////
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 9

Instances


Project Name: tensorlayer/tensorlayer
Commit Name: a0d0c18fcde09f06be9ca1556052cf8064df4ac6
Time: 2019-06-09
Author: 1402434478@qq.com
File Name: examples/reinforcement_learning/tutorial_A3C.py
Class Name:
Method Name:


Project Name: PacktPublishing/Deep-Reinforcement-Learning-Hands-On
Commit Name: a0113631d7e9d6ec65531457ce03e06c902a4d0b
Time: 2017-10-17
Author: max.lapan@gmail.com
File Name: ch06/01_dqn_pong.py
Class Name:
Method Name:


Project Name: PacktPublishing/Deep-Reinforcement-Learning-Hands-On
Commit Name: 3bb0fd78af4f9dbddf7c01dde71844a283099650
Time: 2017-10-16
Author: max.lapan@gmail.com
File Name: ch06/01_dqn_pong.py
Class Name:
Method Name: