Open4

onnxruntime-gpu 1.11.0 の CUDA+TensorRT 8.2.2 Provider 対応ビルド試行 2022年02月26日時点

PINTOPINTO
  • onnxruntime-gpu v1.11.0
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:latest

git clone https://github.com/microsoft/onnxruntime.git && cd onnxruntime
git checkout 5fbfca3d58b88d0da29c6b206c36980052a4392c

dpkg -l | grep TensorRT

ii  graphsurgeon-tf        8.2.2-1+cuda11.4   amd64 GraphSurgeon for TensorRT package
ii  libnvinfer-bin         8.2.2-1+cuda11.4   amd64 TensorRT binaries
ii  libnvinfer-dev         8.2.2-1+cuda11.4   amd64 TensorRT development libraries and headers
ii  libnvinfer-doc         8.2.2-1+cuda11.4   all   TensorRT documentation
ii  libnvinfer-plugin-dev  8.2.2-1+cuda11.4   amd64 TensorRT plugin libraries
ii  libnvinfer-plugin8     8.2.2-1+cuda11.4   amd64 TensorRT plugin libraries
ii  libnvinfer-samples     8.2.2-1+cuda11.4   all   TensorRT samples
ii  libnvinfer8            8.2.2-1+cuda11.4   amd64 TensorRT runtime libraries
ii  libnvonnxparsers-dev   8.2.2-1+cuda11.4   amd64 TensorRT ONNX libraries
ii  libnvonnxparsers8      8.2.2-1+cuda11.4   amd64 TensorRT ONNX libraries
ii  libnvparsers-dev       8.2.2-1+cuda11.4   amd64 TensorRT parsers libraries
ii  libnvparsers8          8.2.2-1+cuda11.4   amd64 TensorRT parsers libraries
ii  onnx-graphsurgeon      8.2.2-1+cuda11.4   amd64 ONNX GraphSurgeon for TensorRT package
ii  python3-libnvinfer     8.2.2-1+cuda11.4   amd64 Python 3 bindings for TensorRT
ii  python3-libnvinfer-dev 8.2.2-1+cuda11.4   amd64 Python 3 development package for TensorRT
ii  tensorrt               8.2.2.1-1+cuda11.4 amd64 Meta package of TensorRT
ii  uff-converter-tf       8.2.2-1+cuda11.4   amd64 UFF converter for TensorRT package

sudo chmod +x build.sh
sudo pip install cmake --upgrade

./build.sh --clean

./build.sh \
--config Release \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--cuda_home /usr/local/cuda \
--use_tensorrt \
--use_cuda \
--tensorrt_home /usr/src/tensorrt/ \
--enable_pybind \
--build_wheel \
--parallel $(nproc) \
--skip_tests

find . -name "*.whl"
./build/Linux/Release/dist/onnxruntime_gpu-1.11.0-cp38-cp38-linux_x86_64.whl

exit
PINTOPINTO

openvino provider build test

  • onnxruntime v1.11.0 with openvino execution provider
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:latest

git clone https://github.com/microsoft/onnxruntime.git \
&& cd onnxruntime \
&& git checkout 5cbacec854f151549f13b4be0a19c65c01a5728d

sudo chmod +x build.sh
sudo pip install cmake --upgrade

./build.sh --clean

./build.sh \
--config Release \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--cuda_home /usr/local/cuda \
--use_tensorrt \
--use_cuda \
--tensorrt_home /usr/src/tensorrt/ \
--use_openvino AUTO:GPU,CPU \
--enable_pybind \
--build_shared_lib \
--build_wheel \
--parallel $(nproc) \
--skip_tests

find . -name "*.whl"
./build/Linux/Release/dist/onnxruntime_gpu-1.11.0-cp38-cp38-linux_x86_64.whl

sudo pip3 uninstall -y onnxruntime onnxruntime-gpu
sudo pip3 install ./build/Linux/Release/dist/onnxruntime_gpu-1.11.0-cp38-cp38-linux_x86_64.whl