Open1

onnxruntime-gpu v1.20.0 + CUDA 12.5 + TensorRT 10.2.0 のビルド (RTX3070)

PINTOPINTO

今のところ Eigen の問題でビルドできていない

https://developer.nvidia.com/cuda-gpus

pip install cmake==3.27.5 numpy==2.1.2

### build gtest
# ビルドするバージョンはココから適当に選択する
# ホストPCがUbuntu22.04だったのでとりあえず1.11.0に合わせた
# https://packages.ubuntu.com/search?keywords=googletest
wget https://github.com/google/googletest/archive/release-1.11.0.tar.gz \
&& tar xzf release-1.11.0.tar.gz \
&& cd googletest-release-1.11.0

mkdir build \
&& cd build \
&& cmake .. -DCMAKE_CXX_FLAGS="-fPIC" \
&& make \
&& sudo make install \
&& cd ../..


### build onnxruntim
git clone -b v1.20.0 https://github.com/microsoft/onnxruntime.git \
&& cd onnxruntime

dpkg -l | grep TensorRT

ii  libnvinfer-bin                                     10.2.0.19-1+cuda12.5                                       amd64        TensorRT binaries
ii  libnvinfer-dev                                     10.2.0.19-1+cuda12.5                                       amd64        TensorRT development libraries
ii  libnvinfer-dispatch-dev                            10.2.0.19-1+cuda12.5                                       amd64        TensorRT development dispatch runtime libraries
ii  libnvinfer-dispatch10                              10.2.0.19-1+cuda12.5                                       amd64        TensorRT dispatch runtime library
ii  libnvinfer-headers-dev                             10.2.0.19-1+cuda12.5                                       amd64        TensorRT development headers
ii  libnvinfer-headers-plugin-dev                      10.2.0.19-1+cuda12.5                                       amd64        TensorRT plugin headers
ii  libnvinfer-lean-dev                                10.2.0.19-1+cuda12.5                                       amd64        TensorRT lean runtime libraries
ii  libnvinfer-lean10                                  10.2.0.19-1+cuda12.5                                       amd64        TensorRT lean runtime library
ii  libnvinfer-plugin-dev                              10.2.0.19-1+cuda12.5                                       amd64        TensorRT plugin libraries
ii  libnvinfer-plugin10                                10.2.0.19-1+cuda12.5                                       amd64        TensorRT plugin libraries
ii  libnvinfer-samples                                 10.2.0.19-1+cuda12.5                                       all          TensorRT samples
ii  libnvinfer-vc-plugin-dev                           10.2.0.19-1+cuda12.5                                       amd64        TensorRT vc-plugin library
ii  libnvinfer-vc-plugin10                             10.2.0.19-1+cuda12.5                                       amd64        TensorRT vc-plugin library
ii  libnvinfer10                                       10.2.0.19-1+cuda12.5                                       amd64        TensorRT runtime libraries
ii  libnvonnxparsers-dev                               10.2.0.19-1+cuda12.5                                       amd64        TensorRT ONNX libraries
ii  libnvonnxparsers10                                 10.2.0.19-1+cuda12.5                                       amd64        TensorRT ONNX libraries
ii  python3-libnvinfer                                 10.2.0.19-1+cuda12.5                                       amd64        Python 3 bindings for TensorRT standard runtime
ii  python3-libnvinfer-dev                             10.2.0.19-1+cuda12.5                                       amd64        Python 3 development package for TensorRT standard runtime
ii  python3-libnvinfer-dispatch                        10.2.0.19-1+cuda12.5                                       amd64        Python 3 bindings for TensorRT dispatch runtime
ii  python3-libnvinfer-lean                            10.2.0.19-1+cuda12.5                                       amd64        Python 3 bindings for TensorRT lean runtime
ii  tensorrt                                           10.2.0.19-1+cuda12.5                                       amd64        Meta package for TensorRT
ii  tensorrt-dev                                       10.2.0.19-1+cuda12.5                                       amd64        Meta package for TensorRT development libraries
ii  tensorrt-libs                                      10.2.0.19-1+cuda12.5                                       amd64        Meta package for TensorRT runtime libraries
  • Eigen3 参照バグ回避パッチ
eigen_patch_22463.diff
diff --git a/cmake/external/onnxruntime_external_deps.cmake b/cmake/external/onnxruntime_external_deps.cmake
index 85746027d4..5ff06f77cd 100644
--- a/cmake/external/onnxruntime_external_deps.cmake
+++ b/cmake/external/onnxruntime_external_deps.cmake
@@ -543,12 +543,12 @@ if(TARGET ONNX::onnx_proto AND NOT TARGET onnx_proto)
   add_library(onnx_proto ALIAS ONNX::onnx_proto)
 endif()
 
-find_package(Eigen3 CONFIG)
-if(Eigen3_FOUND)
-  get_target_property(eigen_INCLUDE_DIRS Eigen3::Eigen INTERFACE_INCLUDE_DIRECTORIES)
-else()
-  include(eigen) # FetchContent
-endif()
+# find_package(Eigen3 CONFIG)
+# if(Eigen3_FOUND)
+#   get_target_property(eigen_INCLUDE_DIRS Eigen3::Eigen INTERFACE_INCLUDE_DIRECTORIES)
+# else()
+#   include(eigen) # FetchContent
+# endif()
 
 if(onnxruntime_USE_VCPKG)
   find_package(wil CONFIG REQUIRED)
git apply eigen_patch_22463.diff

sudo chmod +x build.sh

# cmake の追加オプションを指定する場合は --cmake_extra_defines を使用する
# -DCMAKE_POSITION_INDEPENDENT_CODE=ON は下記のとおりの指定方法になる
# --cmake_extra_defines CMAKE_POSITION_INDEPENDENT_CODE=ON

# CMAKE_CUDA_ARCHITECTURES
# https://developer.nvidia.com/cuda-gpus

./build.sh \
--config Release \
--cmake_generator Ninja \
--cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=86 \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--cuda_home /usr/local/cuda-12.5 \
--use_tensorrt \
--use_cuda \
--tensorrt_home /usr/src/tensorrt/ \
--enable_pybind \
--build_wheel \
--parallel $(nproc) \
--compile_no_warning_as_error \
--use_preinstalled_eigen \
--eigen_path /usr/include/eigen3/Eigen/ \
--skip_tests

find . -name "*.whl"
./build/Linux/Release/dist/onnxruntime_gpu-1.18.1-cp310-cp310-linux_x86_64.whl