From cc3259ac12d62b26e565619a9c5f93987f433547 Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang Date: Sat, 30 Oct 2021 17:21:47 +0800 Subject: [PATCH] Fix the type casting bug in labels (#221) * Fixing CMake configs for APPLE * Fix the type annotation error in classId * Fix the version parameter in export_model.py * Revert others types temporarily * Update README.md for macOS support --- deployment/onnxruntime/CMakeLists.txt | 23 +++++++++++++++-------- deployment/onnxruntime/README.md | 5 ++--- deployment/onnxruntime/main.cpp | 2 +- tools/export_model.py | 19 +++++++++++++++++-- 4 files changed, 35 insertions(+), 14 deletions(-) diff --git a/deployment/onnxruntime/CMakeLists.txt b/deployment/onnxruntime/CMakeLists.txt index 02ad10d9..e85f3409 100644 --- a/deployment/onnxruntime/CMakeLists.txt +++ b/deployment/onnxruntime/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.0) +cmake_minimum_required(VERSION 3.18.0) project(yolort_onnx) option(ONNXRUNTIME_DIR "Path to built ONNX Runtime directory." STRING) @@ -11,15 +11,22 @@ add_executable(yolort_onnx main.cpp) set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) -target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include") -# link_directories("${ONNXRUNTIME_DIR}/lib") +if (APPLE) + target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include/onnxruntime/core/session") +else() + target_include_directories(yolort_onnx PRIVATE "${ONNXRUNTIME_DIR}/include") +endif() target_link_libraries(yolort_onnx ${OpenCV_LIBS}) if (WIN32) - target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib") -endif(WIN32) + target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/onnxruntime.lib") +endif() -if (UNIX) - target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so") -endif(UNIX) +if (APPLE) + target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.dylib") +endif() + +if (UNIX AND NOT APPLE) + target_link_libraries(yolort_onnx "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so") +endif() diff --git a/deployment/onnxruntime/README.md b/deployment/onnxruntime/README.md index 7bbf9a68..fc726277 100644 --- a/deployment/onnxruntime/README.md +++ b/deployment/onnxruntime/README.md @@ -4,7 +4,7 @@ The ONNXRuntime inference for `yolort`, both GPU and CPU are supported. ## Dependencies -- Ubuntu 20.04 / Windows 10 +- Ubuntu 20.04 / Windows 10 / macOS - ONNXRuntime 1.7 + - OpenCV 4.5 + - CUDA 11 \[Optional\] @@ -40,7 +40,6 @@ The `ONNX` model exported with `yolort` differs from the official one in the fol ```bash python tools/export_model.py [--checkpoint_path path/to/custom/best.pt] - [--simplify] ``` And then, you can find that a new pair of ONNX models ("best.onnx" and "best.sim.onnx") has been generated in the directory of "best.pt". @@ -50,7 +49,7 @@ The `ONNX` model exported with `yolort` differs from the official one in the fol ```python from yolort.runtime import PredictorORT - detector = PredictorORT("best.sim.onnx") + detector = PredictorORT("best.onnx") img_path = "bus.jpg" scores, class_ids, boxes = detector.run_on_image(img_path) ``` diff --git a/deployment/onnxruntime/main.cpp b/deployment/onnxruntime/main.cpp index afad7212..940e4bec 100644 --- a/deployment/onnxruntime/main.cpp +++ b/deployment/onnxruntime/main.cpp @@ -147,7 +147,7 @@ void YOLOv5Detector::preprocessing(cv::Mat &image, float* blob) std::vector YOLOv5Detector::postprocessing(cv::Mat& image, std::vector& outputTensors) { const auto* scoresTensor = outputTensors[0].GetTensorData(); - const auto* classIdsTensor = outputTensors[1].GetTensorData(); + const auto* classIdsTensor = outputTensors[1].GetTensorData(); const auto* boxesTensor = outputTensors[2].GetTensorData(); size_t count = outputTensors[0].GetTensorTypeAndShapeInfo().GetElementCount(); diff --git a/tools/export_model.py b/tools/export_model.py index 8067db1d..243f87f9 100644 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -33,6 +33,13 @@ def get_parser(): type=float, help="Score threshold used for postprocessing the detections.", ) + parser.add_argument( + "--version", + type=str, + default="r6.0", + help="Upstream version released by the ultralytics/yolov5, Possible " + "values are ['r3.1', 'r4.0', 'r6.0']. Default: 'r6.0'.", + ) parser.add_argument( "--export_friendly", action="store_true", @@ -132,7 +139,11 @@ def cli_main(): } input_names = ["images_tensors"] output_names = ["scores", "labels", "boxes"] - model = YOLO.load_from_yolov5(checkpoint_path, score_thresh=args.score_thresh) + model = YOLO.load_from_yolov5( + checkpoint_path, + score_thresh=args.score_thresh, + version=args.version, + ) model.eval() else: # input data @@ -146,7 +157,11 @@ def cli_main(): } input_names = ["images_tensors"] output_names = ["scores", "labels", "boxes"] - model = YOLOv5.load_from_yolov5(checkpoint_path, score_thresh=args.score_thresh) + model = YOLOv5.load_from_yolov5( + checkpoint_path, + score_thresh=args.score_thresh, + version=args.version, + ) model.eval() # export ONNX models