from tvm.contrib.download import download_testdata
from scipy.special import softmax
# device = torch.device("cpu")
model = getattr(torchvision.models, model_name)(pretrained=True)
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
# Preprocess the image and convert to tensor
from torchvision import transforms
my_preprocess = transforms.Compose(
transforms.CenterCrop(224),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
img = np.expand_dims(img, 0)
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the graph to llvm target with given input specification.
with tvm.transform.PassContext(opt_level=7):
lib = relay.build(mod, target=target, target_host=target_host, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now we can try deploying the compiled model on target.
from tvm.contrib import graph_executor
m = graph_executor.GraphModule(lib["default"](dev))
# tvm_t0 = time.process_time()
for i in range(n_warmup+n_time):
m.set_input(input_name, tvm.nd.array(img.astype(dtype)))
tvm_output = m.get_output(0)
tvm_time_spent.append(time.time() - tvm_t0)
# tvm_t1 = time.process_time()
#####################################################################
# Look up prediction top 1 index in 1000 class synset.
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
synset_name = "imagenet_synsets.txt"
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synsets = [x.strip() for x in synsets]
splits = [line.split(" ") for line in synsets]
key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits}
"https://raw.githubusercontent.com/Cadene/",
"pretrained-models.pytorch/master/data/",
class_name = "imagenet_classes.txt"
class_path = download_testdata(class_url, class_name, module="data")
with open(class_path) as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
# Get top-1 result for TVM
top1_tvm = np.argmax(tvm_output.asnumpy()[0])
tvm_class_key = class_id_to_key[top1_tvm]
# Convert input to PyTorch variable and get PyTorch result for comparison
# torch_t0 = time.process_time()
# torch.set_num_threads(1)
for i in range(n_warmup+n_time):
torch_img = torch.from_numpy(img)
output = model(torch_img)
torch_time_spent.append(time.time() - torch_t0)
# Get top-1 result for PyTorch
top1_torch = np.argmax(output.numpy())
torch_class_key = class_id_to_key[top1_torch]
# torch_t1 = time.process_time()
# tvm_time = tvm_t1 - tvm_t0
# torch_time = torch_t1 - torch_t0
tvm_time = np.mean(tvm_time_spent[n_warmup:]) * 1000
torch_time = np.mean(torch_time_spent[n_warmup:]) * 1000
tvm_output_prob = softmax(tvm_output.asnumpy())
output_prob = softmax(output.numpy())
print("Relay top-1 id: {}, class name: {}, class probality: {}".format(top1_tvm, key_to_classname[tvm_class_key], tvm_output_prob[0][top1_tvm]))
print("Torch top-1 id: {}, class name: {}, class probality: {}".format(top1_torch, key_to_classname[torch_class_key], output_prob[0][top1_torch]))
print('Relay time(ms): {:.3f}'.format(tvm_time))
print('Torch time(ms): {:.3f}'.format(torch_time))