Skip to content

Commit

Permalink
Support using onnxruntime 1.16.0 with CUDA 11.4 on Jetson Orin NX (Li…
Browse files Browse the repository at this point in the history
…nux arm64 GPU). (#1630)

* Support using onnxruntime 1.16.0 with CUDA 11.4 on Jetson Orin NX.

The pre-built onnxruntime libs are provided by the community
using the following command:

```bash
./build.sh --build_shared_lib --config Release --update \
  --build --parallel --use_cuda \
  --cuda_home /usr/local/cuda \
  --cudnn_home /usr/lib/aarch64-linux-gnu 2>&1 | tee my-log.txt
```

See also microsoft/onnxruntime#11226

---

Info about the board:

```
Model: NVIDIA Orin NX T801-16GB - Jetpack 5.1.4 [L4T 35.6.0]
```

```
nvidia@nvidia-desktop:~/Downloads$ head -n 1 /etc/nv_tegra_release
# R35 (release), REVISION: 6.0, GCID: 37391689, BOARD: t186ref, EABI: aarch64, DATE: Wed Aug 28 09:12:27 UTC 2024

nvidia@nvidia-desktop:~/Downloads$ uname -r
5.10.216-tegra

nvidia@nvidia-desktop:~/Downloads$ lsb_release -i -r
Distributor ID:	Ubuntu
Release:	20.04

nvidia@nvidia-desktop:~/Downloads$ nvcc -V
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2022 NVIDIA Corporation
Built on Wed_Sep_21_10:43:33_PDT_2022
Cuda compilation tools, release 11.8, V11.8.89
Build cuda_11.8.r11.8/compiler.31833905_0

nvidia@nvidia-desktop:~/Downloads$ dpkg -l libcudnn8
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name           Version              Architecture Description
+++-==============-====================-============-=================================
ii  libcudnn8      8.6.0.166-1+cuda11.4 arm64        cuDNN runtime libraries

nvidia@nvidia-desktop:~/Downloads$ dpkg -l tensorrt
Desired=Unknown/Install/Remove/Purge/Hold
| Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend
|/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad)
||/ Name           Version            Architecture Description
+++-==============-==================-============-=================================
ii  tensorrt       8.5.2.2-1+cuda11.4 arm64        Meta package for TensorRT
```
  • Loading branch information
csukuangfj authored Dec 19, 2024
1 parent 5cc60de commit 70ee779
Show file tree
Hide file tree
Showing 6 changed files with 73 additions and 21 deletions.
21 changes: 15 additions & 6 deletions .github/workflows/aarch64-linux-gnu-shared.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,20 @@ concurrency:
jobs:
aarch64_linux_gnu_shared:
runs-on: ${{ matrix.os }}
name: aarch64 shared GPU ${{ matrix.gpu }}
name: aarch64 shared GPU ${{ matrix.gpu }} ${{ matrix.onnxruntime_version }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
gpu: [ON, OFF]
include:
- os: ubuntu-latest
gpu: ON
onnxruntime_version: "1.11.0"
- os: ubuntu-latest
gpu: ON
onnxruntime_version: "1.16.0"
- os: ubuntu-latest
gpu: OFF
onnxruntime_version: ""

steps:
- uses: actions/checkout@v4
Expand All @@ -62,7 +70,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.
- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down Expand Up @@ -159,6 +167,7 @@ jobs:
export BUILD_SHARED_LIBS=ON
export SHERPA_ONNX_ENABLE_GPU=${{ matrix.gpu }}
export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=${{ matrix.onnxruntime_version }}
./build-aarch64-linux-gnu.sh
Expand Down Expand Up @@ -199,7 +208,7 @@ jobs:
if [[ ${{ matrix.gpu }} == OFF ]]; then
dst=${dst}-cpu
else
dst=${dst}-gpu
dst=${dst}-gpu-onnxruntime-${{ matrix.onnxruntime_version }}
fi
mkdir $dst
Expand All @@ -223,7 +232,7 @@ jobs:
- uses: actions/upload-artifact@v4
with:
name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }}
name: sherpa-onnx-linux-aarch64-shared-gpu-${{ matrix.gpu }}-onnxruntime-${{ matrix.onnxruntime_version }}
path: sherpa-onnx-*linux-aarch64-shared*.tar.bz2

# https://huggingface.co/docs/hub/spaces-github-actions
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/aarch64-linux-gnu-static.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.
- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/arm-linux-gnueabihf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ jobs:
if: steps.cache-qemu.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install autoconf automake autotools-dev ninja-build
sudo apt-get install autoconf automake autotools-dev ninja-build libglib2.0-dev.
- name: checkout-qemu
if: steps.cache-qemu.outputs.cache-hit != 'true'
Expand Down
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ option(SHERPA_ONNX_USE_PRE_INSTALLED_ONNXRUNTIME_IF_AVAILABLE "True to use pre-i
option(SHERPA_ONNX_ENABLE_SANITIZER "Whether to enable ubsan and asan" OFF)
option(SHERPA_ONNX_BUILD_C_API_EXAMPLES "Whether to enable C API examples" ON)

set(SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION "1.11.0" CACHE STRING "Used only for Linux ARM64 GPU. If you use Jetson nano b01, then please set it to 1.11.0. If you use Jetson Orin NX, then set it to 1.16.0")


set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
Expand Down
32 changes: 27 additions & 5 deletions build-aarch64-linux-gnu.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,25 @@
#!/usr/bin/env bash
#
# Usage of this file
#
# (1) Build CPU version of sherpa-onnx
# ./build-aarch64-linux-gnu.sh
#
# (2) Build GPU version of sherpa-onnx
#
# (a) Make sure your board has NVIDIA GPU(s)
#
# (b) For Jetson Nano B01 (using CUDA 10.2)
#
# export SHERPA_ONNX_ENABLE_GPU=ON
# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0
# ./build-aarch64-linux-gnu.sh
#
# (c) For Jetson Orin NX (using CUDA 11.4)
#
# export SHERPA_ONNX_ENABLE_GPU=ON
# export SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0
# ./build-aarch64-linux-gnu.sh

if command -v aarch64-none-linux-gnu-gcc &> /dev/null; then
ln -svf $(which aarch64-none-linux-gnu-gcc) ./aarch64-linux-gnu-gcc
Expand Down Expand Up @@ -47,18 +68,18 @@ fi
if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"" ]]; then
# By default, use CPU
SHERPA_ONNX_ENABLE_GPU=OFF

# If you use GPU, then please make sure you have NVIDIA GPUs on your board.
# It uses onnxruntime 1.11.0.
#
# Tested on Jetson Nano B01
fi

if [[ x"$SHERPA_ONNX_ENABLE_GPU" == x"ON" ]]; then
# Build shared libs if building GPU is enabled.
BUILD_SHARED_LIBS=ON
fi

if [[ x"$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION" == x"" ]]; then
# Used only when SHERPA_ONNX_ENABLE_GPU is ON
SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION="1.11.0"
fi

cmake \
-DBUILD_PIPER_PHONMIZE_EXE=OFF \
-DBUILD_PIPER_PHONMIZE_TESTS=OFF \
Expand All @@ -75,6 +96,7 @@ cmake \
-DSHERPA_ONNX_ENABLE_JNI=OFF \
-DSHERPA_ONNX_ENABLE_C_API=ON \
-DSHERPA_ONNX_ENABLE_WEBSOCKET=ON \
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=$SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION \
-DCMAKE_TOOLCHAIN_FILE=../toolchains/aarch64-linux-gnu.toolchain.cmake \
..

Expand Down
34 changes: 26 additions & 8 deletions cmake/onnxruntime-linux-aarch64-gpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,37 @@ if(NOT SHERPA_ONNX_ENABLE_GPU)
message(FATAL_ERROR "This file is for NVIDIA GPU only. Given SHERPA_ONNX_ENABLE_GPU: ${SHERPA_ONNX_ENABLE_GPU}")
endif()

set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v1.11.0/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2")
set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2")
set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7")
message(WARNING "\
SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION: ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION}
If you use Jetson nano b01, then please pass
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.11.0
to cmake (You need to make sure CUDA 10.2 is available on your board).
If you use Jetson Orin NX, then please pass
-DSHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION=1.16.0
to cmake (You need to make sure CUDA 11.4 is available on your board).
")

set(v ${SHERPA_ONNX_LINUX_ARM64_GPU_ONNXRUNTIME_VERSION})

set(onnxruntime_URL "https://github.com/csukuangfj/onnxruntime-libs/releases/download/v${v}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2")
set(onnxruntime_URL2 "https://hf-mirror.com/csukuangfj/onnxruntime-libs/resolve/main/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2")

if(v STREQUAL "1.11.0")
set(onnxruntime_HASH "SHA256=36eded935551e23aead09d4173bdf0bd1e7b01fdec15d77f97d6e34029aa60d7")
else()
set(onnxruntime_HASH "SHA256=4c09d5acf2c2682b4eab1dc2f1ad98fc1fde5f5f1960063e337983ba59379a4b")
endif()

# If you don't have access to the Internet,
# please download onnxruntime to one of the following locations.
# You can add more if you want.
set(possible_file_locations
$ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
/tmp/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
/star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-1.11.0.tar.bz2
$ENV{HOME}/Downloads/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
${CMAKE_SOURCE_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
${CMAKE_BINARY_DIR}/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
/tmp/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
/star-fj/fangjun/download/github/onnxruntime-linux-aarch64-gpu-${v}.tar.bz2
)

foreach(f IN LISTS possible_file_locations)
Expand Down

0 comments on commit 70ee779

Please sign in to comment.