Open3
Ubuntu18.04 + Python3.6 + CUDA11.4 + TensorRT 8.2.3 のDockerビルド試行
FROM nvcr.io/nvidia/cuda:11.4.0-devel-ubuntu18.04
ENV DEBIAN_FRONTEND=noninteractive
ARG OSVER=ubuntu1804
ARG CPVER=cp36
ARG TENSORRTVER=cuda11.4-trt8.2.3.0-ga-20220113
ARG WKDIR=/home/user
WORKDIR ${WKDIR}
COPY nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb .
# Install dependencies (1)
RUN apt-get update && apt-get install -y \
automake autoconf libpng-dev nano python3-pip \
curl zip unzip libtool swig zlib1g-dev pkg-config \
python3-mock libpython3-dev libpython3-all-dev \
g++ gcc make pciutils cpio gosu wget \
libgtk-3-dev libxtst-dev sudo apt-transport-https \
build-essential gnupg git xz-utils vim libyaml-cpp-dev \
libva-drm2 libva-x11-2 vainfo libva-wayland2 libva-glx2 \
libva-dev libdrm-dev xorg xorg-dev protobuf-compiler \
openbox libx11-dev libgl1-mesa-glx libgl1-mesa-dev \
libtbb2 libtbb-dev libopenblas-dev libopenmpi-dev \
software-properties-common \
&& sed -i 's/# set linenumbers/set linenumbers/g' /etc/nanorc \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# python3 -> python
RUN ln -s /usr/bin/python3 /usr/bin/python
# Install dependencies (2)
RUN pip3 install --upgrade pip \
&& pip install --upgrade numpy==1.19.5 \
&& pip install --upgrade tensorflowjs \
&& pip install --upgrade coremltools \
&& pip install --upgrade paddlepaddle \
&& pip install --upgrade lap \
&& pip install --upgrade pycocotools \
&& pip install --upgrade scipy \
&& pip install --upgrade paddle2onnx \
&& pip install --upgrade onnx \
&& pip install --upgrade onnxruntime-extensions \
&& pip install --upgrade onnx-simplifier \
&& pip install --upgrade onnxmltools \
&& pip install --upgrade onnxconverter-common \
&& pip install --upgrade tf2onnx \
&& pip install --upgrade onnx-tf \
&& pip install --upgrade tensorflow-datasets \
&& pip install --upgrade openvino2tensorflow \
&& pip install --upgrade tflite2tensorflow \
&& pip install --upgrade gdown \
&& pip install --upgrade PyYAML \
&& pip install --upgrade matplotlib \
&& pip install --upgrade tf_slim \
&& pip install --upgrade pandas \
&& pip install --upgrade numexpr \
&& pip install --upgrade onnx2json \
&& pip install --upgrade json2onnx \
&& pip install --upgrade gluoncv \
&& pip install --upgrade dgl \
&& pip install --upgrade cmake \
&& pip install --upgrade ninja \
&& python3 -m pip install onnx_graphsurgeon \
--index-url https://pypi.ngc.nvidia.com \
&& pip install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 \
-f https://download.pytorch.org/whl/cu113/torch_stable.html \
&& pip install pycuda==2021.1 \
&& pip install scikit-image \
&& ldconfig \
&& pip cache purge \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# Install TensorRT additional package
RUN dpkg -i nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb \
&& apt-key add /var/nv-tensorrt-repo-${OSVER}-${TENSORRTVER}/7fa2af80.pub \
&& apt-get update \
&& apt-get install -y \
tensorrt uff-converter-tf graphsurgeon-tf \
python3-libnvinfer-dev onnx-graphsurgeon \
&& rm nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb \
&& cd /usr/src/tensorrt/samples/trtexec \
&& make \
&& apt clean \
&& rm -rf /var/lib/apt/lists/*
# Install onnx-tensorrt
RUN git clone --recursive https://github.com/onnx/onnx-tensorrt \
&& cd onnx-tensorrt \
&& git checkout 1f041ce6d7b30e9bce0aacb2243309edffc8fb3c \
&& mkdir build \
&& cd build \
&& cmake .. -DTENSORRT_ROOT=/usr/src/tensorrt \
&& make -j$(nproc) \
&& make install
# Install torch2trt
RUN git clone https://github.com/NVIDIA-AI-IOT/torch2trt \
&& cd torch2trt \
&& git checkout 0400b38123d01cc845364870bdf0a0044ea2b3b2 \
# https://github.com/NVIDIA-AI-IOT/torch2trt/issues/619
&& wget https://github.com/NVIDIA-AI-IOT/torch2trt/commit/8b9fb46ddbe99c2ddf3f1ed148c97435cbeb8fd3.patch \
&& git apply 8b9fb46ddbe99c2ddf3f1ed148c97435cbeb8fd3.patch \
&& python3 setup.py install
# Create a user who can sudo in the Docker container
ENV USERNAME=user
RUN echo "root:root" | chpasswd \
&& adduser --disabled-password --gecos "" "${USERNAME}" \
&& echo "${USERNAME}:${USERNAME}" | chpasswd \
&& echo "%${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/${USERNAME} \
&& chmod 0440 /etc/sudoers.d/${USERNAME}
USER ${USERNAME}
COPY bashrc ${WKDIR}/.bashrc
RUN sudo chown ${USERNAME}:${USERNAME} ${WKDIR}\
&& sudo chmod 777 ${WKDIR}/.bashrc
# Final processing of onnx-tensorrt install
RUN echo 'GPU=$(python3 -c "import torch;print(torch.cuda.is_available())")' >> ${HOME}/.bashrc \
&& echo 'if [ $GPU = "True" ]; then' >> ${HOME}/.bashrc \
&& echo "export PATH=${PATH}:/usr/src/tensorrt/bin:/onnx-tensorrt/build" >> ${HOME}/.bashrc \
&& echo "cd ${HOME}/onnx-tensorrt" >> ${HOME}/.bashrc \
&& echo "sudo python setup.py install" >> ${HOME}/.bashrc \
&& echo "fi" >> ${HOME}/.bashrc \
&& echo "cd ${WKDIR}" >> ${HOME}/.bashrc \
&& echo "cd ${HOME}/workdir" >> ${HOME}/.bashrc
- GUI無効 最小構成
docker run --rm -it --gpus all \
-v `pwd`:/home/user/workdir \
pinto0309/ubuntu1804-cuda114-tensorrt8.2.3:latest
or
- GUI有効 フル構成
xhost +local: && \
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
-v /tmp/.X11-unix/:/tmp/.X11-unix:rw \
--net=host \
-e XDG_RUNTIME_DIR=$XDG_RUNTIME_DIR \
-e DISPLAY=$DISPLAY \
--privileged \
pinto0309/ubuntu1804-cuda114-tensorrt8.2.3:latest
docker pull pinto0309/ubuntu1804-cuda114-tensorrt8.2.3:latest