d533214900cea56a4ec0be87577e97d3807b6bc5,keras_ssd_loss.py,SSD_Loss,compute_loss,#SSD_Loss#Any#Any#,89

Before Change


        """
        batch_size = tf.shape(y_pred)[0] // tf.int32
        n_boxes = tf.shape(y_pred)[1] // tf.int32
        depth = tf.shape(y_pred)[2] // tf.int32

        // 1: Compute the losses for class and box predictions for each default box

After Change


        positives = tf.to_float(tf.reduce_max(y_true[:,:,1:-8], axis=-1)) // Tensor of shape (batch_size, n_boxes)

        // Count the number of positive boxes (classes 1 to n) in y_true across the whole batch
        n_positive = tf.reduce_sum(positives)

        // Now mask all negative boxes and sum up the losses for the positive boxes PER batch item
        // (Keras loss functions must output one scalar loss value PER batch item, rather than just
        // one scalar for the entire batch, that"s why we"re not summing across all axes)
        pos_class_loss = tf.reduce_sum(classification_loss * positives, axis=-1) // Tensor of shape (batch_size,)

        // Compute the classification loss for the negative default boxes (if there are any)

        // First, compute the classification loss for all negative boxes
        neg_class_loss_all = classification_loss * negatives // Tensor of shape (batch_size, n_boxes)
        n_neg_losses = tf.count_nonzero(neg_class_loss_all, dtype=tf.int32) // The number of non-zero loss entries in `neg_class_loss_all`
        // What"s the point of `n_neg_losses`? For the next step, which will be to compute which negative boxes enter the classification
        // loss, we don"t just want to know how many negative ground truth boxes there are, but for how many of those there actually is
        // a positive (i.e. non-zero) loss. This is necessary because `tf.nn.top-k()` in the function below will pick the top k boxes with
        // the highest losses no matter what, even if it receives a vector where all losses are zero. In the unlikely event that all negative
        // classification losses ARE actually zero though, this behavior might lead to `tf.nn.top-k()` returning the indices of positive
        // boxes, leading to an incorrect negative classification loss computation, and hence an incorrect overall loss computation.
        // We therefore need to make sure that `n_negative_keep`, which assumes the role of the `k` argument in `tf.nn.top-k()`,
        // is at most the number of negative boxes for which there is a positive classification loss.

        // Compute the number of negative examples we want to account for in the loss
        // We"ll keep at most `self.neg_pos_ratio` times the number of positives in `y_true`, but at least `self.n_neg_min` (unless `n_neg_loses` is smaller)
        n_negative_keep = tf.minimum(tf.maximum(self.neg_pos_ratio * tf.to_int32(n_positive), self.n_neg_min), n_neg_losses)

        // In the unlikely case when either (1) there are no negative ground truth boxes at all
        // or (2) the classification loss for all negative boxes is zero, return zero as the `neg_class_loss`
        def f1():
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 5

Instances


Project Name: pierluigiferrari/ssd_keras
Commit Name: d533214900cea56a4ec0be87577e97d3807b6bc5
Time: 2017-04-06
Author: pierluigi.ferrari@gmx.com
File Name: keras_ssd_loss.py
Class Name: SSD_Loss
Method Name: compute_loss


Project Name: GPflow/GPflow
Commit Name: 1d3e25c3ad4835ee298675f557e4c78bc8501c74
Time: 2017-03-06
Author: james.hensman@gmail.com
File Name: GPflow/ekernels.py
Class Name: RBF
Method Name: eKxz


Project Name: OpenNMT/OpenNMT-tf
Commit Name: 4d49910b3f0696102f813fb5ba451b934a4a579c
Time: 2021-03-25
Author: guillaumekln@users.noreply.github.com
File Name: opennmt/utils/losses.py
Class Name:
Method Name: cross_entropy_sequence_loss