download(img_url, img)
input_shape = (1, 3, in_size, in_size)
target = "llvm"
input_name = "input0"
shape_list = [(input_name, input_shape)]
score_threshold = 0.9
scripted_model = generate_jit_model(1)
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
ctx = tvm.cpu()
vm = VirtualMachine(vm_exec, ctx)
data = process_image(img)
pt_res = scripted_model(data)
data = data.detach().numpy()
vm.set_input("main", **{input_name: data})
tvm_res = vm.run()
// Note: due to accumulated numerical error, we can"t directly compare results
// with pytorch output. Some boxes might have a quite tiny difference in score
// and the order can become different. We just measure how many valid boxes
// there are for input image.
pt_scores = pt_res[1].detach().numpy().tolist()
tvm_scores = tvm_res[1].asnumpy().tolist()
num_pt_valid_scores = num_tvm_valid_scores = 0
for score in pt_scores:
if score >= score_threshold: