|
1 |
| -FROM nvidia/cuda:11.4.2-cudnn8-devel-ubuntu20.04 |
| 1 | +FROM ghcr.io/pinto0309/openvino2tensorflow:base.11.6.2-cudnn8-tf2.9.0rc0-trt8.4.0-openvino2021.4.582 |
2 | 2 |
|
3 | 3 | ENV DEBIAN_FRONTEND=noninteractive
|
4 |
| -ARG OSVER=ubuntu2004 |
5 |
| -ARG TENSORFLOWVER=2.8.0 |
6 |
| -ARG CPVER=cp38 |
7 |
| -ARG OPENVINOVER=2021.4.582 |
8 |
| -ARG OPENVINOROOTDIR=/opt/intel/openvino_2021 |
9 |
| -ARG TENSORRTVER=cuda11.4-trt8.2.3.0-ga-20220113 |
10 |
| -ARG ONNXRUNTIMEVER=1.11.0 |
11 | 4 | ARG APPVER
|
12 | 5 | ARG WKDIR=/home/user
|
13 |
| - |
14 |
| -# dash -> bash |
15 |
| -RUN echo "dash dash/sh boolean false" | debconf-set-selections \ |
16 |
| - && dpkg-reconfigure -p low dash |
17 |
| -COPY bashrc ${WKDIR}/.bashrc |
18 | 6 | WORKDIR ${WKDIR}
|
19 | 7 |
|
20 |
| -# Install dependencies (1) |
21 |
| -RUN apt-get update && apt-get install -y \ |
22 |
| - automake autoconf libpng-dev nano python3-pip \ |
23 |
| - curl zip unzip libtool swig zlib1g-dev pkg-config \ |
24 |
| - python3-mock libpython3-dev libpython3-all-dev \ |
25 |
| - g++ gcc make pciutils cpio gosu wget libmkldnn-dev \ |
26 |
| - libgtk-3-dev libxtst-dev sudo apt-transport-https \ |
27 |
| - build-essential gnupg git xz-utils vim libyaml-cpp-dev \ |
28 |
| - libva-drm2 libva-x11-2 vainfo libva-wayland2 libva-glx2 \ |
29 |
| - libva-dev libdrm-dev xorg xorg-dev protobuf-compiler \ |
30 |
| - openbox libx11-dev libgl1-mesa-glx libgl1-mesa-dev \ |
31 |
| - libtbb2 libtbb-dev libopenblas-dev libopenmpi-dev \ |
32 |
| - python-is-python3 software-properties-common \ |
33 |
| - && sed -i 's/# set linenumbers/set linenumbers/g' /etc/nanorc \ |
34 |
| - && apt clean \ |
35 |
| - && rm -rf /var/lib/apt/lists/* |
36 |
| - |
37 |
| -# Install dependencies (2) - Ubuntu18.04: numpy==1.19.5, Ubuntu20.04: numpy>=1.20.x |
38 |
| -RUN pip3 install --upgrade pip \ |
39 |
| - && pip install --upgrade numpy==1.19.5 \ |
40 |
| - && pip install --upgrade tensorflowjs \ |
41 |
| - && pip install --upgrade coremltools \ |
42 |
| - && pip install --upgrade paddlepaddle \ |
43 |
| - && pip install --upgrade lap \ |
44 |
| - && pip install --upgrade pycocotools \ |
45 |
| - && pip install --upgrade scipy \ |
46 |
| - && pip install --upgrade paddle2onnx \ |
47 |
| - && pip install --upgrade onnx \ |
48 |
| - && pip install --upgrade onnxruntime \ |
49 |
| - && pip install --upgrade onnxruntime-extensions \ |
50 |
| - && pip install --upgrade onnx-simplifier \ |
51 |
| - && pip install --upgrade onnxconverter-common \ |
52 |
| - && pip install --upgrade onnxmltools \ |
53 |
| - && pip install --upgrade tf2onnx \ |
54 |
| - && pip install --upgrade onnx-tf \ |
55 |
| - && pip install --upgrade tensorflow-datasets \ |
56 |
| - && pip install --upgrade openvino2tensorflow \ |
| 8 | +# Install dependencies |
| 9 | +RUN pip install --upgrade openvino2tensorflow \ |
57 | 10 | && pip install --upgrade tflite2tensorflow \
|
58 |
| - && pip install --upgrade gdown \ |
59 |
| - && pip install --upgrade PyYAML \ |
60 |
| - && pip install --upgrade matplotlib \ |
61 |
| - && pip install --upgrade tf_slim \ |
62 |
| - && pip install --upgrade pandas \ |
63 |
| - && pip install --upgrade numexpr \ |
64 |
| - && pip install --upgrade onnx2json \ |
65 |
| - && pip install --upgrade json2onnx \ |
66 |
| - && pip install --upgrade sne4onnx \ |
67 |
| - && pip install --upgrade snd4onnx \ |
68 |
| - && pip install --upgrade snc4onnx \ |
69 |
| - && pip install --upgrade scs4onnx \ |
70 |
| - && pip install --upgrade sog4onnx \ |
71 |
| - && pip install --upgrade sam4onnx \ |
72 |
| - && pip install --upgrade soc4onnx \ |
73 |
| - && pip install --upgrade scc4onnx \ |
74 |
| - && pip install --upgrade gluoncv \ |
75 |
| - && pip install --upgrade dgl \ |
76 |
| - && pip install --upgrade cmake \ |
77 |
| - && pip install --upgrade ninja \ |
78 |
| - && python3 -m pip install onnx_graphsurgeon \ |
79 |
| - --index-url https://pypi.ngc.nvidia.com \ |
80 |
| - && pip install torch==1.10.0+cu113 torchvision==0.11.1+cu113 torchaudio==0.10.0+cu113 \ |
81 |
| - -f https://download.pytorch.org/whl/cu113/torch_stable.html \ |
82 |
| - && pip install pycuda==2021.1 \ |
83 |
| - && pip install scikit-image \ |
84 |
| - && pip install performance-monitor \ |
85 |
| - && ldconfig \ |
86 |
| - && pip cache purge \ |
87 |
| - && apt clean \ |
88 |
| - && rm -rf /var/lib/apt/lists/* |
89 |
| - |
90 |
| -# Install custom tflite_runtime, flatc, edgetpu-compiler |
91 |
| -RUN wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/tflite_runtime-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
92 |
| - && chmod +x tflite_runtime-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
93 |
| - && pip3 install --force-reinstall tflite_runtime-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
94 |
| - && rm tflite_runtime-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
95 |
| - && wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/flatc.tar.gz \ |
96 |
| - && tar -zxvf flatc.tar.gz \ |
97 |
| - && chmod +x flatc \ |
98 |
| - && rm flatc.tar.gz \ |
99 |
| - && wget https://github.com/PINTO0309/tflite2tensorflow/raw/main/schema/schema.fbs \ |
100 |
| - && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ |
101 |
| - && echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list \ |
102 |
| - && apt-get update \ |
103 |
| - && apt-get install edgetpu-compiler \ |
104 |
| - && pip cache purge \ |
105 |
| - && apt clean \ |
106 |
| - && rm -rf /var/lib/apt/lists/* |
107 |
| - |
108 |
| -# Install OpenVINO |
109 |
| -RUN wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/l_openvino_toolkit_p_${OPENVINOVER}.tgz \ |
110 |
| - && tar xf l_openvino_toolkit_p_${OPENVINOVER}.tgz \ |
111 |
| - && rm l_openvino_toolkit_p_${OPENVINOVER}.tgz \ |
112 |
| - && l_openvino_toolkit_p_${OPENVINOVER}/install_openvino_dependencies.sh -y \ |
113 |
| - && sed -i 's/decline/accept/g' l_openvino_toolkit_p_${OPENVINOVER}/silent.cfg \ |
114 |
| - && l_openvino_toolkit_p_${OPENVINOVER}/install.sh --silent l_openvino_toolkit_p_${OPENVINOVER}/silent.cfg \ |
115 |
| - && source ${OPENVINOROOTDIR}/bin/setupvars.sh \ |
116 |
| - && ${INTEL_OPENVINO_DIR}/install_dependencies/install_openvino_dependencies.sh \ |
117 |
| - && sed -i 's/sudo -E //g' ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh \ |
118 |
| - && sed -i 's/tensorflow/#tensorflow/g' ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/requirements.txt \ |
119 |
| - && sed -i 's/numpy/#numpy/g' ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/requirements.txt \ |
120 |
| - && sed -i 's/onnx/#onnx/g' ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/requirements.txt \ |
121 |
| - && ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/install_prerequisites/install_prerequisites.sh \ |
122 |
| - && rm -rf l_openvino_toolkit_p_${OPENVINOVER} \ |
123 |
| - && echo "source ${OPENVINOROOTDIR}/bin/setupvars.sh" >> .bashrc \ |
124 |
| - && echo "${OPENVINOROOTDIR}/deployment_tools/ngraph/lib/" >> /etc/ld.so.conf \ |
125 |
| - && echo "${OPENVINOROOTDIR}/deployment_tools/inference_engine/lib/intel64/" >> /etc/ld.so.conf \ |
126 |
| - && pip cache purge \ |
127 |
| - && apt clean \ |
128 |
| - && rm -rf /var/lib/apt/lists/* |
129 |
| - |
130 |
| -# Install TensorRT additional package |
131 |
| -RUN wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb \ |
132 |
| - && dpkg -i nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb \ |
133 |
| - && apt-key add /var/nv-tensorrt-repo-${OSVER}-${TENSORRTVER}/7fa2af80.pub \ |
134 |
| - && apt-get update \ |
135 |
| - && apt-get install -y \ |
136 |
| - tensorrt uff-converter-tf graphsurgeon-tf \ |
137 |
| - python3-libnvinfer-dev onnx-graphsurgeon \ |
138 |
| - && rm nv-tensorrt-repo-${OSVER}-${TENSORRTVER}_1-1_amd64.deb \ |
139 |
| - && cd /usr/src/tensorrt/samples/trtexec \ |
140 |
| - && make \ |
141 |
| - && apt clean \ |
142 |
| - && rm -rf /var/lib/apt/lists/* |
143 |
| - |
144 |
| -# onnxruntime-gpu (CUDA, TensorRT) |
145 |
| -# https://zenn.dev/pinto0309/scraps/1e9c0a00112cf9 |
146 |
| -RUN pip uninstall -y onnxruntime onnxruntime-gpu \ |
147 |
| - && wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/onnxruntime_gpu-${ONNXRUNTIMEVER}-${CPVER}-none-linux_x86_64.whl \ |
148 |
| - && pip install onnxruntime_gpu-${ONNXRUNTIMEVER}-${CPVER}-none-linux_x86_64.whl \ |
149 |
| - && rm onnxruntime_gpu-${ONNXRUNTIMEVER}-${CPVER}-none-linux_x86_64.whl |
150 |
| - |
151 |
| -# Install Custom TensorFlow (MediaPipe Custom OP, FlexDelegate, XNNPACK enabled) |
152 |
| -# https://github.com/PINTO0309/Tensorflow-bin |
153 |
| -RUN wget https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/tensorflow-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
154 |
| - && pip3 install --force-reinstall tensorflow-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
155 |
| - && rm tensorflow-${TENSORFLOWVER}-${CPVER}-none-linux_x86_64.whl \ |
156 |
| - && pip cache purge \ |
157 |
| - && apt clean \ |
158 |
| - && rm -rf /var/lib/apt/lists/* |
159 |
| - |
160 |
| -# Install onnx-tensorrt |
161 |
| -RUN git clone --recursive https://github.com/onnx/onnx-tensorrt \ |
162 |
| - && cd onnx-tensorrt \ |
163 |
| - && git checkout 1f041ce6d7b30e9bce0aacb2243309edffc8fb3c \ |
164 |
| - && mkdir build && cd build \ |
165 |
| - && cmake .. -DTENSORRT_ROOT=/usr/src/tensorrt \ |
166 |
| - && make -j$(nproc) && make install |
167 |
| - |
168 |
| -# Install torch2trt |
169 |
| -RUN git clone https://github.com/NVIDIA-AI-IOT/torch2trt \ |
170 |
| - && cd torch2trt \ |
171 |
| - && git checkout 0400b38123d01cc845364870bdf0a0044ea2b3b2 \ |
172 |
| - # https://github.com/NVIDIA-AI-IOT/torch2trt/issues/619 |
173 |
| - && wget https://github.com/NVIDIA-AI-IOT/torch2trt/commit/8b9fb46ddbe99c2ddf3f1ed148c97435cbeb8fd3.patch \ |
174 |
| - && git apply 8b9fb46ddbe99c2ddf3f1ed148c97435cbeb8fd3.patch \ |
175 |
| - && python3 setup.py install |
176 |
| - |
177 |
| -# Download the ultra-small sample data set for INT8 calibration |
178 |
| -RUN mkdir sample_npy \ |
179 |
| - && wget -O sample_npy/calibration_data_img_sample.npy https://github.com/PINTO0309/tflite2tensorflow/releases/download/${APPVER}/calibration_data_img_sample.npy |
180 |
| - |
181 |
| -# LLVM |
182 |
| -RUN wget https://apt.llvm.org/llvm.sh \ |
183 |
| - && chmod +x llvm.sh \ |
184 |
| - && ./llvm.sh 14 \ |
185 |
| - && apt clean \ |
186 |
| - && rm -rf /var/lib/apt/lists/* |
187 |
| - |
188 |
| -# # NNPACK |
189 |
| -# RUN git clone --recursive https://github.com/Maratyszcza/NNPACK.git \ |
190 |
| -# && cd NNPACK \ |
191 |
| -# && git checkout c07e3a0400713d546e0dea2d5466dd22ea389c73 \ |
192 |
| -# && git submodule update --init --recursive \ |
193 |
| -# && sed -i "s|gnu99|gnu99 -fPIC|g" CMakeLists.txt \ |
194 |
| -# && sed -i "s|gnu++11|gnu++11 -fPIC|g" CMakeLists.txt \ |
195 |
| -# && mkdir build \ |
196 |
| -# && cd build \ |
197 |
| -# && cmake -G Ninja -D BUILD_SHARED_LIBS=ON .. \ |
198 |
| -# && ninja \ |
199 |
| -# && ninja install \ |
200 |
| -# && sh -c "echo '/usr/local/lib' > /etc/ld.so.conf.d/nnpack.conf" \ |
201 |
| -# && ldconfig |
202 |
| - |
203 |
| -# Clear caches |
204 |
| -RUN apt clean \ |
205 |
| - && rm -rf /var/lib/apt/lists/* |
206 |
| - |
207 |
| -# Create a user who can sudo in the Docker container |
208 |
| -ENV USERNAME=user |
209 |
| -RUN echo "root:root" | chpasswd \ |
210 |
| - && adduser --disabled-password --gecos "" "${USERNAME}" \ |
211 |
| - && echo "${USERNAME}:${USERNAME}" | chpasswd \ |
212 |
| - && echo "%${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers.d/${USERNAME} \ |
213 |
| - && chmod 0440 /etc/sudoers.d/${USERNAME} |
214 |
| -USER ${USERNAME} |
215 |
| -RUN sudo chown ${USERNAME}:${USERNAME} ${WKDIR} \ |
216 |
| - && sudo chmod 777 ${WKDIR}/.bashrc |
217 |
| - |
218 |
| -# OpenCL settings - https://github.com/intel/compute-runtime/releases |
219 |
| -RUN cd ${OPENVINOROOTDIR}/install_dependencies/ \ |
220 |
| - && yes | sudo -E ./install_NEO_OCL_driver.sh \ |
221 |
| - && cd ${WKDIR} \ |
222 |
| - && wget https://github.com/intel/compute-runtime/releases/download/21.35.20826/intel-gmmlib_21.2.1_amd64.deb \ |
223 |
| - && wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.8517/intel-igc-core_1.0.8517_amd64.deb \ |
224 |
| - && wget https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.8517/intel-igc-opencl_1.0.8517_amd64.deb \ |
225 |
| - && wget https://github.com/intel/compute-runtime/releases/download/21.35.20826/intel-opencl_21.35.20826_amd64.deb \ |
226 |
| - && wget https://github.com/intel/compute-runtime/releases/download/21.35.20826/intel-ocloc_21.35.20826_amd64.deb \ |
227 |
| - && wget https://github.com/intel/compute-runtime/releases/download/21.35.20826/intel-level-zero-gpu_1.2.20826_amd64.deb \ |
228 |
| - && sudo dpkg -i *.deb \ |
229 |
| - && rm *.deb \ |
230 |
| - && sudo apt clean \ |
231 |
| - && sudo rm -rf /var/lib/apt/lists/* |
232 |
| - |
233 |
| -# TVM |
234 |
| -RUN git clone --recursive https://github.com/apache/tvm \ |
235 |
| - && cd tvm \ |
236 |
| - # 0.9.dev0 |
237 |
| - && git checkout faa2e6ad578f09b1248b5303b17c3fa24ebd0bd6 \ |
238 |
| - && git submodule init \ |
239 |
| - && git submodule update \ |
240 |
| - && mkdir build \ |
241 |
| - && cd build \ |
242 |
| - && cmake \ |
243 |
| - -DUSE_CUDA=ON \ |
244 |
| - -DUSE_MICRO=OFF \ |
245 |
| - -DUSE_MICRO_STANDALONE_RUNTIME=OFF \ |
246 |
| - -DUSE_CPP_RPC=ON \ |
247 |
| - -DUSE_GRAPH_EXECUTOR_CUDA_GRAPH=ON \ |
248 |
| - -DUSE_PIPELINE_EXECUTOR=ON \ |
249 |
| - -DUSE_RPC=ON \ |
250 |
| - -DUSE_LLVM=ON \ |
251 |
| - -DUSE_MKLDNN=ON \ |
252 |
| - -DUSE_OPENMP=ON \ |
253 |
| - -DUSE_NNPACK=OFF \ |
254 |
| - # -DUSE_TFLITE=/usr/local/lib/libtensorflow-lite.a \ |
255 |
| - # -DUSE_EDGETPU=OFF \ |
256 |
| - -DUSE_CUDNN=ON \ |
257 |
| - -DUSE_TENSORRT_CODEGEN=ON \ |
258 |
| - -DUSE_TENSORRT_RUNTIME=ON \ |
259 |
| - -DUSE_TF_TVMDSOOP=ON \ |
260 |
| - -DUSE_PT_TVMDSOOP=ON \ |
261 |
| - -DUSE_TARGET_ONNX=ON \ |
262 |
| - -DBUILD_STATIC_RUNTIME=ON \ |
263 |
| - -DSUMMARIZE=ON \ |
264 |
| - .. \ |
265 |
| - -G Ninja \ |
266 |
| - && ninja \ |
267 |
| - && sudo ninja install |
268 |
| - |
269 |
| -# TVM Python binding |
270 |
| -RUN cd ${HOME}/tvm/python \ |
271 |
| - && python setup.py install --user |
272 |
| - |
273 |
| -# Final processing of onnx-tensorrt install |
274 |
| -RUN echo 'GPU=$(python3 -c "import torch;print(torch.cuda.is_available())")' >> ${HOME}/.bashrc \ |
275 |
| - && echo 'if [ $GPU = "True" ]; then' >> ${HOME}/.bashrc \ |
276 |
| - && echo "export PATH=${PATH}:/usr/src/tensorrt/bin:/onnx-tensorrt/build" >> ${HOME}/.bashrc \ |
277 |
| - && echo "cd ${HOME}/onnx-tensorrt" >> ${HOME}/.bashrc \ |
278 |
| - && echo "sudo python setup.py install" >> ${HOME}/.bashrc \ |
279 |
| - && echo "fi" >> ${HOME}/.bashrc \ |
280 |
| - && echo "cd ${WKDIR}" >> ${HOME}/.bashrc \ |
281 |
| - && echo "cd ${HOME}/workdir" >> ${HOME}/.bashrc |
| 11 | + && sudo ldconfig \ |
| 12 | + && sudo pip cache purge |
0 commit comments