Skip to content

Commit

Permalink
add docker (open-mmlab#15)
Browse files Browse the repository at this point in the history
Need tests...
  • Loading branch information
AllentDan authored Jul 13, 2021
1 parent 7227de9 commit 0eca9eb
Show file tree
Hide file tree
Showing 3 changed files with 156 additions and 0 deletions.
75 changes: 75 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# This is a dockerfile that can be directly used for mmlab deployment,
# including onnxruntime stuff. But this will be deprivecated in the future and
# should not be open to custmers!!!
ARG OS_VERSION=18.04
ARG PYTHON_VERSION=3.7.2
ARG PYTORCH_VERSION="1.8.1"
ARG CUDA_VERSION="11.1"
ARG CUDNN_VERSION="8"

FROM pytorch/pytorch:${PYTORCH_VERSION}-cuda${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel as torch
RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
cmake \
wget \
git \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /workspace
COPY TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz /workspace/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz

ENV TENSORRT_DIR=/workspace/TensorRT-7.2.3.4
ENV ONNXRUNTIME_DIR=/workspace/onnxruntime-linux-x64-1.5.1
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ONNXRUNTIME_DIR/lib:$TENSORRT_DIR/lib
RUN conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.2 -c pytorch
RUN tar -xvzf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz \
&& pip install $TENSORRT_DIR/python/tensorrt-7.2.3.4-cp38-none-linux_x86_64.whl \
&& pip install $TENSORRT_DIR/onnx_graphsurgeon/onnx_graphsurgeon-0.2.6-py2.py3-none-any.whl \
&& pip install $TENSORRT_DIR/graphsurgeon/graphsurgeon-0.4.5-py2.py3-none-any.whl \
&& wget https://github.com/microsoft/onnxruntime/releases/download/v1.5.1/onnxruntime-linux-x64-1.5.1.tgz \
&& tar -zxvf onnxruntime-linux-x64-1.5.1.tgz \
&& rm -rf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz onnxruntime-linux-x64-1.5.1.tgz \
&& pip --no-cache-dir install onnxruntime==1.5.1 \
&& git clone https://github.com/open-mmlab/mmcv/ /workspace/mmcv \
&& cd mmcv \
&& MMCV_WITH_OPS=1 MMCV_WITH_TRT=1 MMCV_WITH_ORT=1 pip install -e . \
&& pip --no-cache-dir install onnx-simplifier==0.3.6 \
&& pip --no-cache-dir install onnxruntime==1.6.0 onnx==1.9.0 pycocotools terminaltables

#FROM ubuntu:${OS_VERSION} as official
ARG PYTORCH_VERSION
LABEL com.nvidia.volumes.needed="nvidia_driver"
RUN --mount=type=cache,id=apt-final,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libjpeg-dev \
libpng-dev \
# MMDet Requirements
ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
&& rm -rf /var/lib/apt/lists/*


WORKDIR /workspace
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PYTORCH_VERSION ${PYTORCH_VERSION}
ENV TENSORRT_DIR=/workspace/TensorRT-7.2.3.4
ENV ONNXRUNTIME_DIR=/workspace/onnxruntime-linux-x64-1.5.1
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ONNXRUNTIME_DIR/lib:$TENSORRT_DIR/lib
ENV PATH /opt/conda/bin:$PATH

ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX"
ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../"
ENV FORCE_CUDA="1"

RUN git clone https://github.com/open-mmlab/mmdetection \
&& cd mmdetection \
&& export ONNX_BACKEND=MMCVTensorRT \
&& export PYTHONPATH=$PYTHONPATH:pwd \
&& python tools/deployment/pytorch2onnx.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth --input-img demo/demo.jpg --test-img demo/demo.jpg --dynamic-export
# && python tools/deployment/onnx2tensorrt.py configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py tmp.onnx --trt-file tmp.trt --verify --min-shape 300 300 --max-shape 1600 1600
CMD /bin/bash
68 changes: 68 additions & 0 deletions docker/DockerfileOptimized
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# This is an optimized dockerfile but not for directly usage of developing when
# it involves onnruntime stuff.

ARG OS_VERSION=18.04
ARG PYTHON_VERSION=3.7.2
ARG PYTORCH_VERSION="1.8.1"
ARG CUDA_VERSION="11.1"
ARG CUDNN_VERSION="8"
# TensorRT version fixed by downloaded .tar package

FROM pytorch/pytorch:${PYTORCH_VERSION}-cuda${CUDA_VERSION}-cudnn${CUDNN_VERSION}-devel as torch
ARG ONNXRUNTIME_VERSION=1.5.1
ARG ONNX_VERSION=1.9.0
ARG ONNX_SIMP_VERSION=0.3.6
RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
cmake \
wget \
git \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /workspace
COPY TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz /workspace/TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz
ENV TENSORRT_DIR=/workspace/TensorRT-7.2.3.4
ENV ONNXRUNTIME_DIR=/workspace/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ONNXRUNTIME_DIR/lib:$TENSORRT_DIR/lib
RUN tar -xvzf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz \
&& pip install $TENSORRT_DIR/python/tensorrt-7.2.3.4-cp38-none-linux_x86_64.whl \
&& pip install $TENSORRT_DIR/onnx_graphsurgeon/onnx_graphsurgeon-0.2.6-py2.py3-none-any.whl \
&& pip install $TENSORRT_DIR/graphsurgeon/graphsurgeon-0.4.5-py2.py3-none-any.whl \
&& wget "https://github.com/microsoft/onnxruntime/releases/download/v${ONNXRUNTIME_VERSION}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz" \
&& tar -zxvf onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \
&& rm -rf TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}.tgz \
&& pip --no-cache-dir install onnxruntime==${ONNXRUNTIME_VERSION} \
&& git clone https://github.com/open-mmlab/mmcv/ /workspace/mmcv \
&& cd mmcv \
&& MMCV_WITH_OPS=1 MMCV_WITH_TRT=1 MMCV_WITH_ORT=1 pip install -e . \
&& pip --no-cache-dir install onnx-simplifier==${ONNX_SIMP_VERSION} onnx==${ONNX_VERSION} pycocotools terminaltables

FROM ubuntu:${OS_VERSION} as official
ARG PYTORCH_VERSION="1.8.1"
RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
libjpeg-dev \
libpng-dev \
ccache \
cmake \
gcc \
git \
# MMDet Requirements
ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \
&& rm -rf /var/lib/apt/lists/*
ENV PATH /opt/conda/bin:$PATH
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PYTORCH_VERSION ${PYTORCH_VERSION}
WORKDIR /workspace

FROM official as final
COPY --from=torch /workspace /workspace
COPY --from=torch /opt/conda /opt/conda
RUN /opt/conda/bin/conda clean -ya
WORKDIR /workspace
CMD /bin/bash
13 changes: 13 additions & 0 deletions docker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
## Use the container
Place the Dockerfile and the [TensorRT-7.2.3.4.Ubuntu-18.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz](https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.3/tars/TensorRT-7.2.3.4.Ubuntu-16.04.x86_64-gnu.cuda-11.1.cudnn8.1.tar.gz) package togather under a folder named docker.
Run the following command to build the docker image:
```
sudo docker build docker/ -t mmdeploy
```
Then run the command bellow to play with the docker image:
```
sudo docker run --gpus all --shm-size=8g -it -p 8084:8084 mmdeploy
```

## Use the optimized container
The optimized docker file is provided and it can be used in the future.

0 comments on commit 0eca9eb

Please sign in to comment.