Open1
CUDA11.6, cuDNN 8.6.0, TensorRT 8.5.3, onnxruntime 1.13.1 コンテナ
FROM nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04
### CUDA version
# nvcc --version
# nvcc: NVIDIA (R) Cuda compiler driver
# Copyright (c) 2005-2022 NVIDIA Corporation
# Built on Tue_Mar__8_18:18:20_PST_2022
# Cuda compilation tools, release 11.6, V11.6.124
# Build cuda_11.6.r11.6/compiler.31057947_0
### cudnn version
# cat /usr/include/cudnn_version.h
# #define CUDNN_MAJOR 8
# #define CUDNN_MINOR 6
# #define CUDNN_PATCHLEVEL 0
### TensorRT version
# dpkg -l | grep TensorRT
# libnvinfer8 8.5.3-1+cuda11.8
### onnxruntime version
# git clone -b v1.13.1 https://github.com/microsoft/onnxruntime.git && cd onnxruntime
# chmod +x build.sh
#
# ./build.sh \
# --config Release \
# --cudnn_home /usr/lib/x86_64-linux-gnu/ \
# --cuda_home /usr/local/cuda \
# --use_tensorrt \
# --use_cuda \
# --tensorrt_home /usr/src/tensorrt/ \
# --enable_pybind \
# --build_wheel \
# --parallel $(nproc) \
# --skip_tests
ENV DEBIAN_FRONTEND=noninteractive
ARG OSVER=ubuntu2004
ARG TENSORFLOWVER=2.12.0rc0
ARG CPVER=cp38
ARG OPENVINOGEN=2022
ARG OPENVINOVER=${OPENVINOGEN}.3.0
ARG TENSORRTVER=8.5.3
ARG CUDNNVER=8.6.0.163
ARG ONNXRUNTIMEVER=1.13.1
ARG WKDIR=/home/user
# dash -> bash
RUN echo "dash dash/sh boolean false" | debconf-set-selections \
&& dpkg-reconfigure -p low dash
COPY bashrc ${WKDIR}/.bashrc
WORKDIR ${WKDIR}
COPY packages/* ${WKDIR}/
# Install dependencies (1)
RUN apt-get update \
&& apt-get install -y \
automake autoconf libpng-dev nano python3-pip \
curl zip unzip libtool swig zlib1g-dev pkg-config \
python3-mock libpython3-dev libpython3-all-dev \
g++ gcc make pciutils cpio gosu wget libmkldnn-dev \
libgtk-3-dev libxtst-dev sudo apt-transport-https \
build-essential gnupg git xz-utils vim libyaml-cpp-dev \
libva-drm2 libva-x11-2 vainfo libva-wayland2 libva-glx2 \
libva-dev libdrm-dev xorg xorg-dev protobuf-compiler \
openbox libx11-dev libgl1-mesa-glx libgl1-mesa-dev \
libtbb2 libtbb-dev libopenblas-dev libopenmpi-dev \
python-is-python3 software-properties-common \
libxcb-xinerama0 patchelf libusb-1.0-0-dev \
&& sed -i 's/# set linenumbers/set linenumbers/g' /etc/nanorc \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# Install dependencies (2)
RUN pip3 install --upgrade pip \
&& pip install --upgrade numpy==1.24.2 \
&& pip install --upgrade tensorflowjs \
&& pip install --upgrade coremltools \
&& pip install --upgrade paddlepaddle \
&& pip install --upgrade lap \
&& pip install --upgrade pycocotools \
&& pip install --upgrade scipy \
&& pip install --upgrade paddle2onnx \
&& pip install --upgrade onnx \
&& pip install --upgrade onnxruntime-extensions \
&& pip install --upgrade onnxoptimizer \
&& pip install --upgrade onnxsim \
&& pip install --upgrade onnxmltools \
&& pip install --upgrade onnxconverter-common \
&& pip install --upgrade tf2onnx \
&& pip install --upgrade onnx2tf \
&& pip install --upgrade tensorflow-datasets \
&& pip install --upgrade openvino2tensorflow \
&& pip install --upgrade tflite2tensorflow \
&& pip install --upgrade gdown \
&& pip install --upgrade PyYAML \
&& pip install --upgrade matplotlib \
&& pip install --upgrade tf_slim \
&& pip install --upgrade pandas \
&& pip install --upgrade numexpr \
&& pip install --upgrade simple-onnx-processing-tools \
&& pip install --upgrade gluoncv \
&& pip install --upgrade dgl \
&& pip install --upgrade cmake \
&& pip install --upgrade ninja \
&& pip install --upgrade Cython \
&& pip install --upgrade setuptools \
&& pip install --upgrade wheel \
&& pip install --upgrade pafy \
&& pip install --upgrade youtube-dl \
&& pip install --upgrade blobconverter \
&& pip install pycuda==2022.2 \
&& pip uninstall -y onnxruntime onnxruntime-gpu \
&& pip install ${WKDIR}/onnxruntime_gpu-${ONNXRUNTIMEVER}-${CPVER}-none-linux_x86_64.whl \
&& rm ${WKDIR}/onnxruntime_gpu-${ONNXRUNTIMEVER}-${CPVER}-none-linux_x86_64.whl \
&& python -m pip install onnx_graphsurgeon \
--index-url https://pypi.ngc.nvidia.com \
&& pip3 install torch torchvision torchaudio \
--extra-index-url https://download.pytorch.org/whl/cu116 \
&& pip install scikit-image \
&& pip install performance-monitor \
&& pip install graphviz \
&& pip install pydot \
&& ldconfig \
&& pip cache purge \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# Install cuDNN
RUN dpkg -i ${WKDIR}/cudnn-local-repo-${OSVER}-${CUDNNVER}_1.0-1_amd64.deb \
&& sudo cp /var/cudnn-local-repo-*/cudnn-local-*-keyring.gpg /usr/share/keyrings/ \
&& apt-get update \
&& apt-get install -y --no-install-recommends --allow-change-held-packages \
libcudnn8=${CUDNNVER}-1+cuda11.8 \
libcudnn8-dev=${CUDNNVER}-1+cuda11.8
# Install TensorRT additional package
RUN dpkg -i ${WKDIR}/nv-tensorrt-local-repo-${OSVER}-${TENSORRTVER}-cuda-11.8_1.0-1_amd64.deb \
&& cp /var/nv-tensorrt-local-repo-${OSVER}-${TENSORRTVER}-cuda-11.8/*-keyring.gpg /usr/share/keyrings/ \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
tensorrt=${TENSORRTVER}.1-1+cuda11.8 \
tensorrt-dev=${TENSORRTVER}.1-1+cuda11.8 \
tensorrt-libs=${TENSORRTVER}.1-1+cuda11.8 \
uff-converter-tf=${TENSORRTVER}-1+cuda11.8 \
python3-libnvinfer-dev=${TENSORRTVER}-1+cuda11.8 \
python3-libnvinfer=${TENSORRTVER}-1+cuda11.8 \
libnvparsers-dev=${TENSORRTVER}-1+cuda11.8 \
libnvparsers8=${TENSORRTVER}-1+cuda11.8 \
libnvonnxparsers-dev=${TENSORRTVER}-1+cuda11.8 \
libnvonnxparsers8=${TENSORRTVER}-1+cuda11.8 \
libnvinfer-samples=${TENSORRTVER}-1+cuda11.8 \
libnvinfer-plugin-dev=${TENSORRTVER}-1+cuda11.8 \
libnvinfer-plugin8=${TENSORRTVER}-1+cuda11.8 \
libnvinfer-dev=${TENSORRTVER}-1+cuda11.8 \
libnvinfer-bin=${TENSORRTVER}-1+cuda11.8 \
libnvinfer8=${TENSORRTVER}-1+cuda11.8 \
graphsurgeon-tf=${TENSORRTVER}-1+cuda11.8 \
onnx-graphsurgeon=${TENSORRTVER}-1+cuda11.8 \
&& rm ${WKDIR}/nv-tensorrt-local-repo-${OSVER}-${TENSORRTVER}-cuda-11.8_1.0-1_amd64.deb \
&& cd /usr/src/tensorrt/samples/trtexec \
&& make \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# Install onnx-tensorrt
RUN git clone -b release/8.5-GA --recursive https://github.com/onnx/onnx-tensorrt \
&& cd onnx-tensorrt \
&& mkdir build \
&& cd build \
&& cmake .. -DTENSORRT_ROOT=/usr/src/tensorrt \
&& make -j$(nproc) \
&& make install
# Clear caches
RUN apt clean \
&& rm -rf /var/lib/apt/lists/*
# Create a user who can sudo in the Docker container
ENV USERNAME=user
RUN echo "root:root" | chpasswd \
&& adduser --disabled-password --gecos "" "${USERNAME}" \
&& echo "${USERNAME}:${USERNAME}" | chpasswd \
&& echo "%${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/${USERNAME} \
&& chmod 0440 /etc/sudoers.d/${USERNAME}
USER ${USERNAME}
RUN sudo chown ${USERNAME}:${USERNAME} ${WKDIR} \
&& sudo chmod 777 ${WKDIR}/.bashrc
# Final processing of onnx-tensorrt install
RUN echo 'GPU=$(python3 -c "import torch;print(torch.cuda.is_available())")' >> ${HOME}/.bashrc \
&& echo 'if [ $GPU = "True" ]; then' >> ${HOME}/.bashrc \
&& echo "export PATH=${PATH}:/usr/src/tensorrt/bin:/onnx-tensorrt/build" >> ${HOME}/.bashrc \
&& echo "cd ${HOME}/onnx-tensorrt" >> ${HOME}/.bashrc \
&& echo "sudo python setup.py install" >> ${HOME}/.bashrc \
&& echo "fi" >> ${HOME}/.bashrc \
&& echo "cd ${WKDIR}" >> ${HOME}/.bashrc \
&& echo "cd ${HOME}/workdir" >> ${HOME}/.bashrc \
&& echo "export CUDA_MODULE_LOADING=LAZY" >> ${HOME}/.bashrc