7ec9efd5cf6a479e8c5d85dcc950f464fd15b134,tf_agents/agents/reinforce/reinforce_agent.py,ReinforceAgent,total_loss,#ReinforceAgent#Any#Any#Any#Any#,260

Before Change


          value_preds, returns, num_episodes, weights)
      total_loss += value_estimation_loss

    with tf.name_scope("Losses/"):
      tf.compat.v2.summary.scalar(
          name="policy_gradient_loss",
          data=policy_gradient_loss,
          step=self.train_step_counter)
      tf.compat.v2.summary.scalar(
          name="entropy_regularization_loss",
          data=entropy_regularization_loss,
          step=self.train_step_counter)
      if self._baseline:
        tf.compat.v2.summary.scalar(
            name="value_estimation_loss",
            data=value_estimation_loss,
            step=self.train_step_counter)
      tf.compat.v2.summary.scalar(
          name="total_loss", data=total_loss, step=self.train_step_counter)

    return tf_agent.LossInfo(total_loss, ())

  def policy_gradient_loss(self,
                           actions_distribution,

After Change


                  network_regularization_loss +
                  entropy_regularization_loss)

    losses_dict = {
        "policy_gradient_loss": policy_gradient_loss,
        "policy_network_regularization_loss": network_regularization_loss,
        "entropy_regularization_loss": entropy_regularization_loss,
        "value_estimation_loss": 0.0,
        "value_network_regularization_loss": 0.0,
    }

    value_estimation_loss = None
    if self._baseline:
      value_estimation_loss = self.value_estimation_loss(
          value_preds, returns, num_episodes, weights)
      value_network_regularization_loss = tf.nn.scale_regularization_loss(
          self._value_network.losses)
      total_loss += value_estimation_loss + value_network_regularization_loss
      losses_dict["value_estimation_loss"] = value_estimation_loss
      losses_dict["value_network_regularization_loss"] = (
          value_network_regularization_loss)

    loss_info_extra = ReinforceAgentLossInfo._make(losses_dict)

    losses_dict["total_loss"] = total_loss  // Total loss not in loss_info_extra.

    common.summarize_scalar_dict(losses_dict,
                                 self.train_step_counter,
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 7

Instances


Project Name: tensorflow/agents
Commit Name: 7ec9efd5cf6a479e8c5d85dcc950f464fd15b134
Time: 2020-02-28
Author: no-reply@google.com
File Name: tf_agents/agents/reinforce/reinforce_agent.py
Class Name: ReinforceAgent
Method Name: total_loss


Project Name: tensorflow/agents
Commit Name: 452cf41746dd7c4572b6e6766185431bce7f5ee1
Time: 2020-05-01
Author: no-reply@google.com
File Name: tf_agents/networks/lstm_encoding_network.py
Class Name: LSTMEncodingNetwork
Method Name: call


Project Name: tensorflow/agents
Commit Name: 7ec9efd5cf6a479e8c5d85dcc950f464fd15b134
Time: 2020-02-28
Author: no-reply@google.com
File Name: tf_agents/agents/reinforce/reinforce_agent.py
Class Name: ReinforceAgent
Method Name: total_loss


Project Name: tensorflow/agents
Commit Name: 452cf41746dd7c4572b6e6766185431bce7f5ee1
Time: 2020-05-01
Author: no-reply@google.com
File Name: tf_agents/agents/ddpg/critic_rnn_network.py
Class Name: CriticRnnNetwork
Method Name: call