rpn_cls_prob = prediction_dict["rpn_cls_prob"]
rpn_cls_score = prediction_dict["rpn_cls_score"]
rpn_cls_target = prediction_dict["rpn_cls_target"]
rpn_bbox_target = prediction_dict["rpn_bbox_target"]
rpn_bbox_pred = prediction_dict["rpn_bbox_pred"]
// First, we need to calculate classification loss over `rpn_cls_prob`
// and `rpn_cls_target`. Ignoring all anchors where `rpn_cls_target =
// -1`.
// For classification loss we use log loss of two classes. So we need to:
// - filter `rpn_cls_prob` that are ignored. We need to reshape both labels and prob
// - transform positive and negative `rpn_cls_target` to same shape as `rpn_cls_prob`.
// - then we can use `tf.losses.log_loss` which returns a tensor.
with self._enter_variable_scope():
with tf.name_scope("RPNLoss"):
// Flatten already flat Tensor for usage as boolean mask filter.
rpn_cls_target = tf.cast(tf.reshape(
rpn_cls_target, [-1]), tf.int32, name="rpn_cls_target")
// Transform to boolean tensor with True only when != -1 (else
// == -1 -> False)
labels_not_ignored = tf.not_equal(
rpn_cls_target, -1, name="labels_not_ignored")
// Now we only have the labels we are going to compare with the
// cls probability.
labels = tf.boolean_mask(rpn_cls_target, labels_not_ignored)
cls_prob = tf.boolean_mask(rpn_cls_prob, labels_not_ignored)
cls_score = tf.boolean_mask(rpn_cls_score, labels_not_ignored)
// We need to transform `labels` to `cls_prob` shape.
// convert [1, 0] to [[0, 1], [1, 0]]
cls_target = tf.one_hot(labels, depth=2)
cross_entropy_per_anchor = tf.nn.softmax_cross_entropy_with_logits(
labels=cls_target, logits=cls_score
)
prediction_dict["cross_entropy_per_anchor"] = cross_entropy_per_anchor
// Finally, we need to calculate the regression loss over
// `rpn_bbox_target` and `rpn_bbox_pred`.
// Since `rpn_bbox_target` is obtained from AnchorTargetLayer then we