aef2eb58d25f63103f37c0aff2dfc3ccdc691d81,basenji/rnn.py,RNN,build,#RNN#Any#,15

Before Change


        if self.grad_clip is None:
            clip_gvs = gvs
        else:
            clip_gvs = [(tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v) for g, v in gvs]
        self.step_op = self.opt.apply_gradients(clip_gvs)

        //////////////////////////////////////////////////////////////////////////////////////////////////////

After Change


        if self.target_space == "integer":
            // move negatives into exponential space and align positives
            //  clipping the negatives prevents overflow that TF dislikes
            self.preds_op = tf.select(self.preds_op > 0, self.preds_op + 1, tf.exp(tf.clip_by_value(self.preds_op,-50,50)))

            // Poisson loss
            self.loss_op = tf.nn.log_poisson_loss(tf.log(self.preds_op), self.targets_op, compute_full_loss=True)
            self.loss_op = tf.reduce_mean(self.loss_op)

        else:
            // clip targets
            if self.target_space == "positive":
                self.targets_op = tf.nn.relu(self.targets_op)

            // take square difference
            sq_diff = tf.squared_difference(self.preds_op, self.targets_op)

            // set NaN"s to zero
            // sq_diff = tf.boolean_mask(sq_diff, tf.logical_not(self.targets_na[:,tstart:tend]))

            // take the mean
            self.loss_op = tf.reduce_mean(sq_diff, name="r2_loss") + norm_stabilizer

        // track
        tf.scalar_summary("loss", self.loss_op)

        // define optimization
        if self.optimization == "adam":
            self.opt = tf.train.AdamOptimizer(self.learning_rate, beta1=self.adam_beta1, beta2=self.adam_beta2, epsilon=self.adam_eps)
        else:
            print("Cannot recognize optimization algorithm %s" % self.optimization)
            exit(1)

        // clip gradients
        self.gvs = self.opt.compute_gradients(self.loss_op)
        if self.grad_clip is not None:
            // self.gvs = [(tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v) for g, v in self.gvs]

            // batch norm introduces these None values that we have to dodge
            clip_gvs = []
            for i in range(len(self.gvs)):
                g,v = self.gvs[i]
                if g is None:
                    clip_gvs.append(self.gvs[i])
                else:
                    clip_gvs.append((tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v))

        // apply gradients
        self.step_op = self.opt.apply_gradients(clip_gvs)


        //////////////////////////////////////////////////////////////////////////////////////////////////////
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 3

Instances


Project Name: calico/basenji
Commit Name: aef2eb58d25f63103f37c0aff2dfc3ccdc691d81
Time: 2016-11-04
Author: drk@calicolabs.com
File Name: basenji/rnn.py
Class Name: RNN
Method Name: build


Project Name: tensorflow/cleverhans
Commit Name: fcf1e2a18e72dd42280f04a50fce3eb4e0b20fcf
Time: 2018-04-13
Author: kurakin@google.com
File Name: cleverhans/attacks.py
Class Name: BasicIterativeMethod
Method Name: generate


Project Name: keras-team/keras
Commit Name: cc0e60c1012b7c72eeb5ea0c41b8a2045177ae5e
Time: 2016-07-19
Author: francois.chollet@gmail.com
File Name: keras/backend/tensorflow_backend.py
Class Name:
Method Name: relu