@ONNXConverter.register_handler("GlobalMaxPool")
def _convert_global_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
// FIXME: It"s possible to support in current version of webdnn
raise NotImplementedError("[ONNXConverter] Operator \"GlobalMaxPool\" is not supported yet.")
@ONNXConverter.register_handler("BatchNormalization")
def _convert_batch_normalization(converter: ONNXConverter, onnx_op: INodeProto):
After Change
@ONNXConverter.register_handler("GlobalMaxPool")
def _convert_global_max_pool(converter: ONNXConverter, onnx_op: INodeProto):
x = converter.get_variable(onnx_op.input[0])
if x.ndim == 4:
x.order.unify(OrderNCHW)
reduction_size = mul(x.shape[2:])
reduction_axis = Axis()
x = x.reshape([x.shape[0], x.shape[1], reduction_size],
Order([x.order.axes[0], x.order.axes[1], reduction_axis]))
y, = Max(None, axis=reduction_axis)(x)
converter.set_variable(onnx_op.output[0], y)
@ONNXConverter.register_handler("BatchNormalization")