Open3

onnxruntime-gpu v1.16.0 + CUDA 11.8 + TensorRT 8.5.3 のビルド (RTX3070)

PINTOPINTO
  • libgtest で下記のエラーが発生する
/usr/bin/ld: /usr/local/lib/libgtest.a(gtest-all.cc.o): relocation R_X86_64_PC32 against symbol stderr@@GLIBC_2.2.5' can not be used when making a shared object; recompile with -fPIC
### build gtest
# ビルドするバージョンはココから適当に選択する
# ホストPCがUbuntu22.04だったのでとりあえず1.11.0に合わせた
# https://packages.ubuntu.com/search?keywords=googletest
wget https://github.com/google/googletest/archive/release-1.11.0.tar.gz \
&& tar xzf release-1.11.0.tar.gz \
&& cd googletest-release-1.11.0

mkdir build \
&& cd build \
&& cmake .. -DCMAKE_CXX_FLAGS="-fPIC" \
&& make \
&& sudo make install \
&& cd ../..


### build onnxruntim
git clone -b v1.16.0 https://github.com/microsoft/onnxruntime.git \
&& cd onnxruntime

dpkg -l | grep TensorRT

ii  graphsurgeon-tf        8.5.3-1+cuda11.8   amd64 GraphSurgeon for TensorRT package
ii  libnvinfer-bin         8.5.3-1+cuda11.8   amd64 TensorRT binaries
ii  libnvinfer-dev         8.5.3-1+cuda11.8   amd64 TensorRT development libraries and headers
ii  libnvinfer-plugin-dev  8.5.3-1+cuda11.8   amd64 TensorRT plugin libraries
ii  libnvinfer-plugin8     8.5.3-1+cuda11.8   amd64 TensorRT plugin libraries
ii  libnvinfer-samples     8.5.3-1+cuda11.8   all   TensorRT samples
ii  libnvinfer8            8.5.3-1+cuda11.8   amd64 TensorRT runtime libraries
ii  libnvonnxparsers-dev   8.5.3-1+cuda11.8   amd64 TensorRT ONNX libraries
ii  libnvonnxparsers8      8.5.3-1+cuda11.8   amd64 TensorRT ONNX libraries
ii  libnvparsers-dev       8.5.3-1+cuda11.8   amd64 TensorRT parsers libraries
ii  libnvparsers8          8.5.3-1+cuda11.8   amd64 TensorRT parsers libraries
ii  onnx-graphsurgeon      8.5.3-1+cuda11.8   amd64 ONNX GraphSurgeon for TensorRT package
ii  python3-libnvinfer     8.5.3-1+cuda11.8   amd64 Python 3 bindings for TensorRT
ii  python3-libnvinfer-dev 8.5.3-1+cuda11.8   amd64 Python 3 development package for TensorRT
ii  tensorrt               8.5.3.1-1+cuda11.8 amd64 Meta package for TensorRT
ii  tensorrt-dev           8.5.3.1-1+cuda11.8 amd64 Meta package for TensorRT development libraries
ii  tensorrt-libs          8.5.3.1-1+cuda11.8 amd64 Meta package for TensorRT runtime libraries
ii  uff-converter-tf       8.5.3-1+cuda11.8   amd64 UFF converter for TensorRT package

sudo chmod +x build.sh
pip install cmake==3.26.4

# cmake の追加オプションを指定する場合は --cmake_extra_defines を使用する
# -DCMAKE_POSITION_INDEPENDENT_CODE=ON は下記のとおりの指定方法になる
# --cmake_extra_defines CMAKE_POSITION_INDEPENDENT_CODE=ON

# CMAKE_CUDA_ARCHITECTURES
# https://developer.nvidia.com/cuda-gpus

./build.sh \
--config Release \
--cmake_generator Ninja \
--cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--cuda_home /usr/local/cuda \
--use_tensorrt \
--use_cuda \
--tensorrt_home /usr/src/tensorrt/ \
--enable_pybind \
--build_wheel \
--parallel $(nproc) \
--compile_no_warning_as_error \
--skip_tests

find . -name "*.whl"
./build/Linux/Release/dist/onnxruntime_gpu-1.16.0-cp310-cp310-linux_x86_64.whl