import argparse import numpy as np import cv2 as cv # Check OpenCV version opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" from mobilenet import MobileNet # Valid combinations of backends and targets backend_target_pairs = [ [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] ] parser = argparse.ArgumentParser(description='Demo for MobileNet V1 & V2.') parser.add_argument('--input', '-i', type=str, help='Usage: Set input path to a certain image, omit if using camera.') parser.add_argument('--model', '-m', type=str, default='image_classification_mobilenetv1_2022apr.onnx', help='Usage: Set model type, defaults to image_classification_mobilenetv1_2022apr.onnx (v1).') parser.add_argument('--backend_target', '-bt', type=int, default=0, help='''Choose one of the backend-target pair to run this demo: {:d}: (default) OpenCV implementation + CPU, {:d}: CUDA + GPU (CUDA), {:d}: CUDA + GPU (CUDA FP16), {:d}: TIM-VX + NPU, {:d}: CANN + NPU '''.format(*[x for x in range(len(backend_target_pairs))])) parser.add_argument('--top_k', type=int, default=1, help='Usage: Get top k predictions.') args = parser.parse_args() if __name__ == '__main__': backend_id = backend_target_pairs[args.backend_target][0] target_id = backend_target_pairs[args.backend_target][1] top_k = args.top_k # Instantiate MobileNet model = MobileNet(modelPath=args.model, topK=top_k, backendId=backend_id, targetId=target_id) # Read image and get a 224x224 crop from a 256x256 resized image = cv.imread(args.input) image = cv.cvtColor(image, cv.COLOR_BGR2RGB) image = cv.resize(image, dsize=(256, 256)) image = image[16:240, 16:240, :] # Inference result = model.infer(image) # Print result print('label: {}'.format(result))