tf.layers.separable_conv2d, depth_multiplier=1)
else:
conv2d_op = functools.partial(
tf.layers.conv2d, kernel_initializer=tf.random_normal_initializer(
stddev=0.01))
features = conv2d_op(
features,
self._num_filters,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
activation=None,
padding="same",
name="class-"+str(i))
// The convolution layers in the class net are shared among all levels, but
// each level has its batch normlization to capture the statistical
// difference among different levels.
features = self._batch_norm_relu(features, is_training=is_training,
name="class-%d-%d"%(i, level),)
if self._use_separable_conv:
conv2d_op = functools.partial(
tf.layers.separable_conv2d, depth_multiplier=1)
else:
conv2d_op = functools.partial(
tf.layers.conv2d, kernel_initializer=tf.random_normal_initializer(
stddev=1e-5))
classes = conv2d_op(
features,
self._num_classes * self._anchors_per_location,
kernel_size=(3, 3),