// Cloud ML requires dynamic batch sizes, so set shape to [None].
input_jpeg_str = tf.placeholder(tf.string, shape=[None])
// For each instance in batch, calculate the bottleneck.
bottleneck_tensor = tf.map_fn(
ml_utils.get_bottleneck_tensor,
input_jpeg_str,
back_prop=False,
dtype=tf.float32)
// Reduce dimension in bottleneck - we want to feed a BATCH_SIZE x 2048 tensor
// to dense layer, to produce a BATCH_SIZE x NUMBER_OF_CLASSES tensor.
bottleneck_tensor = tf.squeeze(bottleneck_tensor, [1])
_, normalized_tensor, prediction_index = _create_dense_and_softmax_layers(
After Change
score is the score for the predicted label.
img_bytes, bottleneck_tensor = ml_utils.create_fixed_weight_input_graph()
_, normalized_tensor, prediction_index = _create_dense_and_softmax_layers(
bottleneck_tensor, class_count)
// Get the prediction (label) for a given tensor index.