Open1
onnxruntime-gpu v1.12.1 + CUDA11.7 + TensorRT 8.4.3 + OpenVINO2022.1 Execution Provider のビルド試行
docker run --rm -it --gpus all \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:base.11.7.1-cudnn8-tf2.10.0-trt8.4.3-openvino2022.1.0
cd /home/user/workdir
git clone -b v1.12.1 https://github.com/microsoft/onnxruntime.git \
&& cd onnxruntime
pip show numpy
Name: numpy
Version: 1.23.2
dpkg -l | grep TensorRT
ii graphsurgeon-tf 8.4.3-1+cuda11.6 amd64 GraphSurgeon for TensorRT package
ii libnvinfer-bin 8.4.3-1+cuda11.6 amd64 TensorRT binaries
ii libnvinfer-dev 8.4.3-1+cuda11.6 amd64 TensorRT development libraries and headers
ii libnvinfer-plugin-dev 8.4.3-1+cuda11.6 amd64 TensorRT plugin libraries
ii libnvinfer-plugin8 8.4.3-1+cuda11.6 amd64 TensorRT plugin libraries
ii libnvinfer-samples 8.4.3-1+cuda11.6 all TensorRT samples
ii libnvinfer8 8.4.3-1+cuda11.6 amd64 TensorRT runtime libraries
ii libnvonnxparsers-dev 8.4.3-1+cuda11.6 amd64 TensorRT ONNX libraries
ii libnvonnxparsers8 8.4.3-1+cuda11.6 amd64 TensorRT ONNX libraries
ii libnvparsers-dev 8.4.3-1+cuda11.6 amd64 TensorRT parsers libraries
ii libnvparsers8 8.4.3-1+cuda11.6 amd64 TensorRT parsers libraries
ii onnx-graphsurgeon 8.4.3-1+cuda11.6 amd64 ONNX GraphSurgeon for TensorRT package
ii python3-libnvinfer 8.4.3-1+cuda11.6 amd64 Python 3 bindings for TensorRT
ii python3-libnvinfer-dev 8.4.3-1+cuda11.6 amd64 Python 3 development package for TensorRT
ii tensorrt 8.4.3.1-1+cuda11.6 amd64 Meta package for TensorRT
ii uff-converter-tf 8.4.3-1+cuda11.6 amd64 UFF converter for TensorRT package
sudo chmod +x build.sh
sudo pip install cmake==3.24.1
./build.sh \
--config Release \
--cudnn_home /usr/lib/x86_64-linux-gnu/ \
--cuda_home /usr/local/cuda \
--use_tensorrt \
--use_cuda \
--tensorrt_home /usr/src/tensorrt/ \
--use_openvino AUTO:GPU,CPU \
--enable_pybind \
--build_shared_lib \
--build_wheel \
--parallel $(nproc) \
--skip_tests
find . -name "*.whl"
./build/Linux/Release/dist/onnxruntime_gpu-1.12.1-cp38-cp38-linux_x86_64.whl