From 67f6d76786d4f61df2fac2489f2e55f8c22a0419 Mon Sep 17 00:00:00 2001 From: Miguel Pineda <110496466+ma-pineda@users.noreply.github.com> Date: Mon, 12 Aug 2024 09:43:28 -0600 Subject: [PATCH] Adding classical-ml and data-analytics presets (#255) Signed-off-by: Tyler Titsworth Signed-off-by: tylertitsworth Co-authored-by: Tyler Titsworth Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: jafraustro --- .github/workflows/container-ci.yaml | 9 +- .github/workflows/integration-test.yaml | 2 +- preset/classical-ml/Dockerfile | 128 ++--- preset/classical-ml/docker-compose.yaml | 82 ++- preset/classical-ml/requirements.txt | 2 +- preset/classical-ml/tests/scikit/kmeans.py | 1 + .../classical-ml/tests/scikit/test_scikit.sh | 4 +- preset/data-analytics/Dockerfile | 115 ++-- preset/data-analytics/docker-compose.yaml | 66 +-- preset/data-analytics/requirements.txt | 2 +- preset/deep-learning/Dockerfile | 527 ++++++++---------- preset/deep-learning/docker-compose.yaml | 201 +++---- preset/deep-learning/requirements.txt | 15 +- preset/deep-learning/tests.yaml | 33 +- preset/inference-optimization/Dockerfile | 39 +- .../docker-compose.yaml | 200 +++---- .../inference-optimization/requirements.txt | 19 +- preset/inference-optimization/tests.yaml | 45 +- 18 files changed, 709 insertions(+), 781 deletions(-) mode change 100644 => 100755 preset/deep-learning/tests.yaml diff --git a/.github/workflows/container-ci.yaml b/.github/workflows/container-ci.yaml index 08669ffa..6dbf8532 100644 --- a/.github/workflows/container-ci.yaml +++ b/.github/workflows/container-ci.yaml @@ -63,7 +63,7 @@ jobs: setup-build: outputs: matrix: ${{ steps.build-matrix.outputs.matrix }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} steps: - name: Harden Runner uses: step-security/harden-runner@0d381219ddf674d61a7572ddd19d7941e271515c # v2.9.0 @@ -79,13 +79,14 @@ jobs: build-containers: needs: [setup-build] env: ${{ matrix }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} strategy: matrix: ${{ fromJson(needs.setup-build.outputs.matrix) }} fail-fast: false outputs: group: ${{ steps.build-group.outputs.container-group }} steps: + - uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1 - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 if: ${{ !inputs.no_build }} - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -111,7 +112,7 @@ jobs: setup-scan: needs: [build-containers] if: ${{ github.event_name == 'pull_request' }} - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} outputs: matrix: ${{ steps.scan-matrix.outputs.matrix }} steps: @@ -164,7 +165,7 @@ jobs: #################################################################################################### setup-test: needs: [build-containers] - runs-on: ubuntu-latest # ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} + runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} outputs: matrix: ${{ steps.test-matrix.outputs.matrix }} steps: diff --git a/.github/workflows/integration-test.yaml b/.github/workflows/integration-test.yaml index af6f4cc2..2a102efd 100644 --- a/.github/workflows/integration-test.yaml +++ b/.github/workflows/integration-test.yaml @@ -113,7 +113,7 @@ jobs: path: output.txt recreate: true status-check: - needs: [group-diff, pipeline-ci] + needs: [group-diff, pipeline-ci, merge-logs] runs-on: ${{ github.repository_owner == 'intel' && 'intel-ubuntu-latest' || 'ubuntu-latest' }} if: always() steps: diff --git a/preset/classical-ml/Dockerfile b/preset/classical-ml/Dockerfile index a9666e3a..bd6cebde 100644 --- a/preset/classical-ml/Dockerfile +++ b/preset/classical-ml/Dockerfile @@ -12,40 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. + ARG BASE_IMAGE="ubuntu" ARG BASE_TAG="22.04" -FROM ${BASE_IMAGE}:${BASE_TAG} as classical-ml-base +FROM ${BASE_IMAGE}:${BASE_TAG} as classical-ml ENV DEBIAN_FRONTEND=noninteractive -# See http://bugs.python.org/issue19846 - ENV LANG=C.UTF-8 SHELL ["/bin/bash", "-c"] RUN apt-get update -y && \ apt-get install -y --no-install-recommends --fix-missing \ - bzip2 \ - ca-certificates \ - diffutils \ - gcc \ - git \ - gzip \ - make \ - patch \ - rsync \ - unzip \ - wget \ - xz-utils && \ + bzip2 \ + ca-certificates \ + diffutils \ + gcc \ + git \ + gzip \ + make \ + patch \ + rsync \ + unzip \ + wget \ + xz-utils && \ rm -rf /var/lib/apt/lists/* -FROM classical-ml-base as classical-ml-python - -# Setting up non-root directories RUN useradd --uid 1000 -d /home/dev -s /bin/bash -m dev -# Set a password for the user (Optional) RUN echo 'dev:password' | chpasswd USER dev WORKDIR /home/dev @@ -56,68 +51,69 @@ ARG PYTHON_VERSION ARG IDP_VERSION ARG INTEL_CHANNEL -RUN wget --progress=dot:giga --no-check-certificate https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-${MINIFORGE_VERSION}.sh -O miniforge.sh && \ +RUN wget --progress=dot:giga --no-check-certificate https://github.com/conda-forge/miniforge/releases/latest/download/${MINIFORGE_VERSION}.sh -O miniforge.sh && \ chmod +x miniforge.sh && \ ./miniforge.sh -b -p "${CONDA_ROOT}" && \ rm ./miniforge.sh && \ - ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge3" && \ + ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge" && \ export PATH="${CONDA_ROOT}/bin/:${PATH}" && \ - conda update -y conda && \ - conda config --add channels conda-forge && \ - conda config --add channels https://software.repos.intel.com/python/conda/ && \ conda init --all && \ conda install -y \ - 'jupyterlab>=4.1.8' \ - 'notebook>=7.1.3' \ - 'jupyterhub>=4.1.5' \ - 'jupyter-server-proxy>=4.1.2' \ - 'mako>=1.2.2' \ - 'pyjwt>=2.4.0' \ - 'cryptography>=42.0.5' \ - 'nodejs>=20.12.2' \ - 'aiohttp>=3.9.4' \ + 'colorama==0.4.6' \ + 'conda==24.5.0' \ + 'jupyterhub==5.1.0' \ + 'jupyter-server-proxy==4.3.0' \ + 'mamba==1.5.8' \ + 'networkx==3.3' \ + 'notebook==7.2.1' \ + 'pip==24.0' \ + 'python==3.10.14' \ 'idna>=3.7' \ - 'oauthlib>=3.2.2' \ - && \ - jupyter labextension disable "@jupyterlab/apputils-extension:announcements" && \ - conda clean -y --all + 'requests>=2.32.0' \ + 'setuptools>=70.0.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + 'nodejs==22.5.1' \ + && \ + jupyter labextension disable "@jupyterlab/apputils-extension:announcements" \ + && \ + conda clean -y --all \ + && \ + conda config --add channels ${INTEL_CHANNEL} ENV PATH ${CONDA_ROOT}/condabin:${CONDA_ROOT}/bin/:${PATH} +RUN conda config --set pip_interop_enabled True ARG IDP_VERSION +ARG DAAL4PY_VERSION ARG DPNP_VERSION ARG XGBOOST_VERSION ARG MODIN_VERSION ARG NUMPY_VERSION ARG SKLEARNEX_VERSION -# Conda packages -RUN conda create -yn classical-ml -c ${INTEL_CHANNEL} -c conda-forge \ - dpnp=${DPNP_VERSION} \ - numpy=${NUMPY_VERSION} \ - python=${PYTHON_VERSION} \ - scikit-learn-intelex==${SKLEARNEX_VERSION} \ - xgboost=${XGBOOST_VERSION} \ - modin-ray=${MODIN_VERSION} \ - 'python-dotenv>=1.0.1' \ - 'tqdm>=4.66.2' \ - 'matplotlib-base>=3.4.3' \ - 'threadpoolctl>=3.3.0' \ - 'ipython>=8.18.1' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'protobuf>=4.24' \ - 'pillow>=10.2.0' \ - 'tornado>=6.3.3' && \ +RUN conda create -yn classical-ml \ + "python=${PYTHON_VERSION}" \ + "daal4py=${DAAL4PY_VERSION}" \ + "dpnp=${DPNP_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base==3.8.4' \ + "modin-ray=${MODIN_VERSION}" \ + 'python-dotenv==1.0.1' \ + "scikit-learn-intelex=${SKLEARNEX_VERSION}" \ + 'tqdm==4.66.4' \ + "xgboost=${XGBOOST_VERSION}" \ + 'idna>=3.7' \ + 'requests>=2.32.0' \ + 'setuptools>=70.0.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + && \ conda clean -y --all - - -# PyPI packages RUN conda run -n classical-ml python -m pip install --no-deps --no-cache-dir \ - 'dataset-librarian==1.0.4' \ - 'cloud-data-connector==1.0.3' - + 'dataset-librarian==1.0.4' ENV PYTHONSTARTUP=~/.patch_sklearn.py COPY base/.patch_sklearn.py ~/.patch_sklearn.py @@ -125,8 +121,6 @@ COPY base/.patch_sklearn.py ~/.patch_sklearn.py ENV PYTHONSTARTUP=/home/dev/.patch_sklearn.py COPY base/.patch_sklearn.py /home/dev/.patch_sklearn.py -FROM classical-ml-python as classical-ml-jupyter - EXPOSE 8888 RUN mkdir -p ~/jupyter/ && chmod -R a+rwx ~/jupyter/ && \ @@ -136,10 +130,10 @@ WORKDIR /home/dev COPY --chown=dev notebooks /home/dev/jupyter COPY --chown=dev tests /home/dev/sample-tests -RUN "${CONDA_ROOT}/envs/classical-ml/bin/python" -m ipykernel install --user --name classical-ml --display-name "Classical ML" && \ - "${CONDA_ROOT}/envs/classical-ml/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo classical-ml | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - "${CONDA_ROOT}/envs/classical-ml/bin/python" -m ipykernel.kernelspec --user && \ - conda clean -y --all +RUN KERNEL_DIR="${CONDA_ROOT}/share/jupyter/kernels/classical-ml" && \ + conda run -n classical-ml python -m ipykernel install --prefix "$CONDA_ROOT" --name classical-ml --display-name "Classical ML" && \ + conda run -n classical-ml kernda -o -y "$KERNEL_DIR/kernel.json" && \ + conda run -n base jupyter kernelspec list CMD ["bash", "-c", "source activate classical-ml && jupyter lab --notebook-dir=~/jupyter --port 8888 --ip 0.0.0.0 --no-browser --allow-root"] diff --git a/preset/classical-ml/docker-compose.yaml b/preset/classical-ml/docker-compose.yaml index a6e06fbd..c2dc9c1a 100644 --- a/preset/classical-ml/docker-compose.yaml +++ b/preset/classical-ml/docker-compose.yaml @@ -15,6 +15,7 @@ # -*- coding: utf-8 -*- # + version: '3' services: classical-ml: @@ -22,28 +23,30 @@ services: args: BASE_IMAGE: ${BASE_IMAGE:-ubuntu} BASE_TAG: ${BASE_TAG:-22.04} - DPNP_VERSION: ${NUMBA_DPEX_VERSION:-0.14.0} - IDP_VERSION: ${IDP_VERSION:-2024.1.0} + DAAL4PY_VERSION: ${DAAL4PY_VERSION:-2024.5.0} + DPNP_VERSION: ${DPNP_VERSION:-0.15.0} + IDP_VERSION: ${IDP_VERSION:-2024.2} INTEL_CHANNEL: ${INTEL_CHANNEL:-https://software.repos.intel.com/python/conda/} - MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Linux-x86_64} - MODIN_VERSION: ${MODIN_VERSION:-0.26.1} - MPI_VERSION: ${MPI_VERSION:-2021.12.0} - NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.22.1} + MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Miniforge3-Linux-x86_64} + MODIN_VERSION: ${MODIN_VERSION:-0.30.0} + MPI_VERSION: ${MPI_VERSION:-2021.13} + NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.23.0} NUMPY_VERSION: ${NUMPY_VERSION:-1.26.4} - PYTHON_VERSION: ${PYTHON_VERSION:-3.10} - SKLEARNEX_VERSION: ${SKLEARNEX_VERSION:-2024.2.0} + PYTHON_VERSION: ${PYTHON_VERSION:-3.9} + SKLEARNEX_VERSION: ${SKLEARNEX_VERSION:-2024.5.0} XGBOOST_VERSION: ${XGBOOST_VERSION:-2.0.3} http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: '' context: . + target: classical-ml labels: docs: classical_ml org.opencontainers.image.title: "Intel® AI Tools Selector Preset Containers - Classical ML" org.opencontainers.base.name: "ubuntu:22.04" org.opencontainers.image.name: "intel/classical-ml" - org.opencontainers.image.version: 2024.1.0-py${PYTHON_VERSION:-3.10} - dependency.python: ${PYTHON_VERSION:-3.10} + org.opencontainers.image.version: 2024.2.0-py${PYTHON_VERSION:-3.9} + dependency.python: ${PYTHON_VERSION:-3.9} dependency.python.pip: requirements.txt dependency.apt.bzip2: true dependency.apt.ca-certificates: true @@ -57,39 +60,26 @@ services: dependency.apt.unzip: true dependency.apt.wget: true dependency.apt.xz-utils: true - dependency.conda.jupyterlab: '>=4.1.8' - dependency.conda.notebook: '>=7.1.3' - dependency.conda.jupyterhub: '>=4.1.5' - dependency.conda.jupyter-server-proxy: '>=4.1.2' - dependency.conda.mako: '>=1.2.2' - dependency.conda.pyjwt: '>=2.4.0' - dependency.conda.cryptography: '>=42.0.5' - dependency.conda.nodejs: '>=20.12.2' - dependency.conda.aiohttp: '>=3.9.4' - dependency.conda.idna: '>=3.7' - dependency.conda.oauthlib: '>=3.2.2' - dependency.conda.dpnp: '>=0.14.0' - dependency.conda.numpy: '>=1.26.4' - dependency.conda.python: "=${PYTHON_VERSION:-3.10}" - dependency.conda.scikit-learn-intelex: '>=2024.2.0' - dependency.conda.xgboost: '>=2.0.3' - dependency.conda.modin-ray: '>=0.26.1' - dependency.conda.python-dotenv: '>=1.0.1' - dependency.conda.tqdm: '>=4.66.2' - dependency.conda.matplotlib-base: '>=3.4.3' - dependency.conda.dataset_librarian: '>=1.0.4' - dependency.conda.threadpoolctl: '>=3.3.0' - dependency.conda.ipython: '>=8.18.1' - dependency.conda.ipykernel: '>=6.29.3' - dependency.conda.kernda: '>=0.3.0' - dependency.conda.protobuf: '>=4.24' - dependency.conda.pillow: '>=10.2.0' - dependency.conda.tornado: '>=6.3.3' - target: classical-ml-jupyter - command: | - bash -c "conda run -n classical-ml python -c 'import sklearn; import xgboost; print(\"SciKit:\", sklearn.__version__, \" XGBoost:\",xgboost.__version__)' && \ - conda run -n classical-ml python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" - image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-classical-ml-2024.1.0-py${PYTHON_VERSION:-3.10} + dependency.conda.colorama: '==0.4.6' + dependency.conda.conda: '==24.5.0' + dependency.conda.daal4py: '=2024.5.0' + dependency.conda.dpnp: '=0.15.0' + dependency.conda.ipykernel: '==6.29.5' + dependency.conda.jupyterhub: '==5.1.0' + dependency.conda.jupyter-server-proxy: '==4.3.0' + dependency.conda.kernda: '==0.3.0' + dependency.conda.mamba: '==1.5.8' + dependency.conda.matplotlib-base: '==3.8.4' + dependency.conda.modin-ray: '=0.30.0' + dependency.conda.networkx: '==3.3' + dependency.conda.notebook: '==7.2.1' + dependency.conda.pip: '==24.0' + dependency.conda.python: '==3.10.14' + dependency.conda.python-dotenv: '==1.0.1' + dependency.conda.scikit-learn-intelex: '=2024.5.0' + dependency.conda.tqdm: '==4.66.4' + dependency.conda.xgboost: '=2.0.3' + image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-classical-ml-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} environment: http_proxy: ${http_proxy} https_proxy: ${https_proxy} @@ -97,3 +87,9 @@ services: shm_size: 12GB volumes: - /dev/dri/by-path:/dev/dri/by-path + command: > + bash -c " conda run -n classical-ml python -c 'import sklearn;import xgboost;print(\"SciKit:\", + sklearn.__version__, \" XGBoost:\", xgboost.__version__)' && + + conda run -n classical-ml python -c 'import modin.pandas as pd;import modin.config + as cfg;cfg.Engine.put(\"Ray\");df = pd.DataFrame([1]);print(df+1)' " diff --git a/preset/classical-ml/requirements.txt b/preset/classical-ml/requirements.txt index d231202d..8fe3dfff 100644 --- a/preset/classical-ml/requirements.txt +++ b/preset/classical-ml/requirements.txt @@ -1 +1 @@ -cloud-data-connector==1.0.3 +dataset-librarian==1.0.4 diff --git a/preset/classical-ml/tests/scikit/kmeans.py b/preset/classical-ml/tests/scikit/kmeans.py index 9120b7d0..c78acba7 100644 --- a/preset/classical-ml/tests/scikit/kmeans.py +++ b/preset/classical-ml/tests/scikit/kmeans.py @@ -62,6 +62,7 @@ data, labels = load_digits(return_X_y=True) (n_samples, n_features), n_digits = data.shape, np.unique(labels).size +data = np.array(data, dtype=np.float64) print(f"# digits: {n_digits}; # samples: {n_samples}; # features {n_features}") diff --git a/preset/classical-ml/tests/scikit/test_scikit.sh b/preset/classical-ml/tests/scikit/test_scikit.sh index a6b2f24e..9d16e938 100755 --- a/preset/classical-ml/tests/scikit/test_scikit.sh +++ b/preset/classical-ml/tests/scikit/test_scikit.sh @@ -14,8 +14,8 @@ # limitations under the License. set -xe + SCRIPT_DIR=$(dirname "$0") python "${SCRIPT_DIR}/kmeans.py" - -python "${SCRIPT_DIR}/kmeans.py" true +python "${SCRIPT_DIR}/kmeans.py" true # Enable intel opt diff --git a/preset/data-analytics/Dockerfile b/preset/data-analytics/Dockerfile index 37954c83..ffb56ceb 100644 --- a/preset/data-analytics/Dockerfile +++ b/preset/data-analytics/Dockerfile @@ -12,107 +12,100 @@ # See the License for the specific language governing permissions and # limitations under the License. + ARG BASE_IMAGE="ubuntu" ARG BASE_TAG="22.04" -FROM ${BASE_IMAGE}:${BASE_TAG} as data-analytics-base +FROM ${BASE_IMAGE}:${BASE_TAG} as data-analytics ENV DEBIAN_FRONTEND=noninteractive -# See http://bugs.python.org/issue19846 - ENV LANG=C.UTF-8 SHELL ["/bin/bash", "-c"] RUN apt-get update -y && \ apt-get install -y --no-install-recommends --fix-missing \ - bzip2 \ - ca-certificates \ - diffutils \ - gcc \ - git \ - gzip \ - make \ - patch \ - rsync \ - unzip \ - wget \ - xz-utils && \ + bzip2 \ + ca-certificates \ + diffutils \ + gcc \ + git \ + gzip \ + make \ + patch \ + rsync \ + unzip \ + wget \ + xz-utils && \ rm -rf /var/lib/apt/lists/* -FROM data-analytics-base as data-analytics-python - -# Setting up non-root directories RUN useradd --uid 1000 -d /home/dev -s /bin/bash -m dev -# Set a password for the user (Optional) RUN echo 'dev:password' | chpasswd USER dev WORKDIR /home/dev ENV CONDA_ROOT=/home/dev/conda - ARG MINIFORGE_VERSION ARG PYTHON_VERSION ARG IDP_VERSION ARG INTEL_CHANNEL -RUN wget --progress=dot:giga --no-check-certificate "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-${MINIFORGE_VERSION}.sh" -O miniforge.sh && \ +RUN wget --progress=dot:giga --no-check-certificate "https://github.com/conda-forge/miniforge/releases/latest/download/${MINIFORGE_VERSION}.sh" -O miniforge.sh && \ chmod +x miniforge.sh && \ ./miniforge.sh -b -p "${CONDA_ROOT}" && \ rm ./miniforge.sh && \ - ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge3" && \ + ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge" && \ export PATH="${CONDA_ROOT}/bin/:${PATH}" && \ - conda update -y conda && \ - conda config --add channels conda-forge && \ - conda config --add channels https://software.repos.intel.com/python/conda/ && \ conda init --all && \ conda install -y \ - 'jupyterlab>=4.1.8' \ - 'notebook>=7.1.3' \ - 'jupyterhub>=4.1.5' \ - 'jupyter-server-proxy>=4.1.2' \ - 'mako>=1.2.2' \ - 'pyjwt>=2.4.0' \ - 'cryptography>=42.0.5' \ - 'nodejs>=20.12.2' \ + 'colorama==0.4.6' \ + 'conda==24.5.0' \ + 'jupyterhub==5.1.0' \ + 'jupyter-server-proxy==4.3.0' \ + 'mamba==1.5.8' \ + 'networkx==3.3' \ + 'notebook==7.2.1' \ + 'python==3.10.14' \ 'idna>=3.7' \ - 'tqdm>=4.66.2' \ - && \ - jupyter labextension disable "@jupyterlab/apputils-extension:announcements" && \ - conda clean -y --all + 'requests>=2.32.0' \ + 'setuptools>=70.0.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + 'nodejs==22.5.1' \ + && \ + jupyter labextension disable "@jupyterlab/apputils-extension:announcements" \ + && \ + conda clean -y --all \ + && \ + conda config --add channels ${INTEL_CHANNEL} ENV PATH ${CONDA_ROOT}/condabin:${CONDA_ROOT}/bin/:${PATH} +RUN conda config --set pip_interop_enabled True ARG IDP_VERSION ARG DPNP_VERSION ARG MODIN_VERSION ARG NUMPY_VERSION -# data-analytics Env - conda packages -RUN conda create -yn data-analytics -c "${INTEL_CHANNEL}" -c conda-forge \ - dpnp="${DPNP_VERSION}" \ - numpy="${NUMPY_VERSION}" \ - python="${PYTHON_VERSION}" \ - modin-ray="${MODIN_VERSION}" \ - 'python-dotenv>=1.0.1' \ - 'tqdm>=4.66.2' \ - 'matplotlib-base>=3.4.3' \ - 'threadpoolctl>=3.3.0' \ - 'ipython>=8.18.1' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'protobuf>=4.24.4' \ - 'pillow>=10.2.0' \ +RUN conda create -yn data-analytics \ + "python=${PYTHON_VERSION}" \ + "dpnp=${DPNP_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base==3.8.4' \ + "modin-ray=${MODIN_VERSION}" \ + 'python-dotenv==1.0.1' \ 'idna>=3.7' \ - 'tornado>=6.3.3' && \ + 'requests>=2.32.0' \ + 'setuptools>=70.0.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + && \ conda clean -y --all RUN conda run -n data-analytics python -m pip install --no-deps --no-cache-dir \ - 'dataset-librarian==1.0.4' \ - 'cloud-data-connector==1.0.3' - -FROM data-analytics-python as data-analytics-jupyter + 'dataset-librarian==1.0.4' EXPOSE 8888 @@ -122,10 +115,10 @@ RUN mkdir -p ~/jupyter/ && chmod -R a+rwx ~/jupyter/ && \ COPY --chown=dev notebooks /home/dev/jupyter COPY --chown=dev tests /home/dev/sample-tests -RUN "${CONDA_ROOT}/envs/data-analytics/bin/python" -m ipykernel install --user --name data-analytics --display-name "Data Analytics" && \ - "${CONDA_ROOT}/envs/data-analytics/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo data-analytics | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - "${CONDA_ROOT}/envs/data-analytics/bin/python" -m ipykernel.kernelspec --user && \ - conda clean -y --all +RUN KERNEL_DIR="${CONDA_ROOT}/share/jupyter/kernels/data-analytics" && \ + conda run -n data-analytics python -m ipykernel install --prefix "$CONDA_ROOT" --name data-analytics --display-name "Data Analytics" && \ + conda run -n data-analytics kernda -o -y "$KERNEL_DIR/kernel.json" && \ + conda run -n base jupyter kernelspec list CMD ["bash", "-c", "source activate data-analytics && jupyter lab --notebook-dir=~/jupyter --port 8888 --ip 0.0.0.0 --no-browser --allow-root"] diff --git a/preset/data-analytics/docker-compose.yaml b/preset/data-analytics/docker-compose.yaml index 99b37f6d..9c00331e 100644 --- a/preset/data-analytics/docker-compose.yaml +++ b/preset/data-analytics/docker-compose.yaml @@ -15,6 +15,7 @@ # -*- coding: utf-8 -*- # + version: '3' services: data-analytics: @@ -22,26 +23,26 @@ services: args: BASE_IMAGE: ${BASE_IMAGE:-ubuntu} BASE_TAG: ${BASE_TAG:-22.04} - DPNP_VERSION: ${NUMBA_DPEX_VERSION:-0.14.0} - IDP_VERSION: ${IDP_VERSION:-2024.1.0} + DPNP_VERSION: ${DPNP_VERSION:-0.15.0} + IDP_VERSION: ${IDP_VERSION:-2024.2} INTEL_CHANNEL: ${INTEL_CHANNEL:-https://software.repos.intel.com/python/conda/} - MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Linux-x86_64} - MODIN_VERSION: ${MODIN_VERSION:-0.26.1} - MPI_VERSION: ${MPI_VERSION:-2021.12.0} - NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.22.1} + MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Miniforge3-Linux-x86_64} + MODIN_VERSION: ${MODIN_VERSION:-0.30.0} NUMPY_VERSION: ${NUMPY_VERSION:-1.26.4} - PYTHON_VERSION: ${PYTHON_VERSION:-3.10} + PYTHON_VERSION: ${PYTHON_VERSION:-3.9} + XGBOOST_VERSION: ${XGBOOST_VERSION:-2.0.3} http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: '' context: . + target: data-analytics labels: docs: data_analytics org.opencontainers.image.title: "Intel® AI Tools Selector Preset Containers - Data Analytics" org.opencontainers.base.name: "ubuntu:22.04" org.opencontainers.image.name: "intel/data-analytics" - org.opencontainers.image.version: 2024.1.0-py${PYTHON_VERSION:-3.10} - dependency.python: ${PYTHON_VERSION:-3.10} + org.opencontainers.image.version: 2024.2.0-py${PYTHON_VERSION:-3.9} + dependency.python: ${PYTHON_VERSION:-3.9} dependency.python.pip: requirements.txt dependency.apt.bzip2: true dependency.apt.ca-certificates: true @@ -55,34 +56,21 @@ services: dependency.apt.unzip: true dependency.apt.wget: true dependency.apt.xz-utils: true - dependency.conda.jupyterlab: '>=4.1.8' - dependency.conda.notebook: '>=7.1.3' - dependency.conda.jupyterhub: '>=4.1.5' - dependency.conda.jupyter-server-proxy: '>=4.1.2' - dependency.conda.mako: '>=1.2.2' - dependency.conda.pyjwt: '>=2.4.0' - dependency.conda.cryptography: '>=42.0.5' - dependency.conda.nodejs: '>=20.12.2' - dependency.conda.idna: '>=3.7' - dependency.conda.tqdm: '>=4.66.2' - dependency.conda.dpnp: '>=0.14.0' - dependency.conda.numpy: '>=1.26.4' - dependency.conda.python: "=${PYTHON_VERSION:-3.10}" - dependency.conda.modin-ray: '>=0.26.1' - dependency.conda.python-dotenv: '>=1.0.1' - dependency.conda.matplotlib-base: '>=3.4.3' - dependency.conda.dataset_librarian: '>=1.0.4' - dependency.conda.threadpoolctl: '>=3.3.0' - dependency.conda.ipython: '>=8.18.1' - dependency.conda.ipykernel: '>=6.29.3' - dependency.conda.kernda: '>=0.3.0' - dependency.conda.protobuf: '>=4.24.4' - dependency.conda.pillow: '>=10.2.0' - dependency.conda.tornado: '>=6.3.3' - target: data-analytics-jupyter - command: > - bash -c "conda run -n data-analytics python -c 'import modin.pandas as pd, modin.config as cfg; cfg.Engine.put(\"Ray\"); df = pd.DataFrame([1]);print(df+1)'" - image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-data-analytics-2024.1.0-py${PYTHON_VERSION:-3.10} + dependency.conda.colorama: '==0.4.6' + dependency.conda.conda: '==24.5.0' + dependency.conda.dpnp: '=0.15.0' + dependency.conda.ipykernel: '==6.29.5' + dependency.conda.jupyterhub: '==5.1.0' + dependency.conda.jupyter-server-proxy: '==4.3.0' + dependency.conda.kernda: '==0.3.0' + dependency.conda.mamba: '==1.5.8' + dependency.conda.matplotlib-base: '==3.8.4' + dependency.conda.modin-ray: '=0.30.0' + dependency.conda.networkx: '==3.3' + dependency.conda.notebook: '==7.2.1' + dependency.conda.python: '==3.10.14' + dependency.conda.python-dotenv: '==1.0.1' + image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-data-analytics-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} environment: http_proxy: ${http_proxy} https_proxy: ${https_proxy} @@ -90,3 +78,7 @@ services: shm_size: 12GB volumes: - /dev/dri/by-path:/dev/dri/by-path + command: > + bash -c " conda run -n data-analytics python -c 'import modin.pandas as pd;import + modin.config as cfg;cfg.Engine.put(\"Ray\");df = pd.DataFrame([1]);print(df+1)' + " diff --git a/preset/data-analytics/requirements.txt b/preset/data-analytics/requirements.txt index d231202d..8fe3dfff 100644 --- a/preset/data-analytics/requirements.txt +++ b/preset/data-analytics/requirements.txt @@ -1 +1 @@ -cloud-data-connector==1.0.3 +dataset-librarian==1.0.4 diff --git a/preset/deep-learning/Dockerfile b/preset/deep-learning/Dockerfile index 05721e11..213606b8 100644 --- a/preset/deep-learning/Dockerfile +++ b/preset/deep-learning/Dockerfile @@ -12,158 +12,148 @@ # See the License for the specific language governing permissions and # limitations under the License. + ARG BASE_IMAGE=ubuntu ARG BASE_TAG=22.04 -FROM ${BASE_IMAGE}:${BASE_TAG} AS dgpu-base +FROM ${BASE_IMAGE}:${BASE_TAG} AS deep-learning-base -ENV DEBIAN_FRONTEND=noninteractive +SHELL ["/bin/bash", "-c"] -# See http://bugs.python.org/issue19846 +ENV DEBIAN_FRONTEND=noninteractive ENV LANG C.UTF-8 ARG PYTHON_VERSION EXPOSE 8080 -ENV LANG=C.UTF-8 - -SHELL ["/bin/bash", "-c"] - RUN apt-get update -y && \ apt-get install -y --no-install-recommends --fix-missing \ - apt-utils \ - build-essential \ - bzip2 \ - ca-certificates \ - clinfo \ - cmake \ - diffutils \ - g++ \ - gcc \ - git \ - gnupg2 \ - gpg-agent \ - gzip \ - make \ - numactl \ - patch \ - rsync \ - unzip \ - wget \ - sudo \ - xz-utils && \ + apt-utils \ + build-essential \ + bzip2 \ + ca-certificates \ + clinfo \ + cmake \ + diffutils \ + g++ \ + gcc \ + git \ + gnupg2 \ + gpg-agent \ + gzip \ + make \ + numactl \ + patch \ + rsync \ + unzip \ + wget \ + sudo \ + xz-utils \ + && \ rm -rf /var/lib/apt/lists/* -# GPU Drivers setup ARG DEVICE ARG ICD_VER ARG LEVEL_ZERO_GPU_VER ARG LEVEL_ZERO_VER ARG LEVEL_ZERO_DEV_VER - -# Public Drivers link RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \ - gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg -RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" | \ + gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg && \ + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" | \ tee /etc/apt/sources.list.d/intel-gpu-jammy.list RUN apt-get update && \ apt-get install -y --no-install-recommends --fix-missing \ - intel-opencl-icd="${ICD_VER}" \ - intel-level-zero-gpu="${LEVEL_ZERO_GPU_VER}" \ - level-zero="${LEVEL_ZERO_VER}" + intel-level-zero-gpu="${LEVEL_ZERO_GPU_VER}" \ + intel-opencl-icd="${ICD_VER}" \ + level-zero="${LEVEL_ZERO_VER}" RUN apt-get update && \ apt-get install -y --no-install-recommends --fix-missing \ - intel-media-va-driver-non-free \ - libmfx1 \ - libmfxgen1 \ - libvpl2 \ - libegl-mesa0 \ - libegl1-mesa \ - libegl1-mesa-dev \ - libgbm1 \ - libgl1-mesa-dev \ - libgl1-mesa-dri \ - libglapi-mesa \ - libgles2-mesa-dev \ - libglx-mesa0 \ - libigdgmm12 \ - libxatracker2 \ - mesa-va-drivers \ - mesa-vdpau-drivers \ - mesa-vulkan-drivers \ - va-driver-all \ - vainfo \ - hwinfo \ - clinfo + clinfo \ + hwinfo \ + intel-media-va-driver-non-free \ + libegl-mesa0 \ + libegl1-mesa \ + libegl1-mesa-dev \ + libgbm1 \ + libgl1-mesa-dev \ + libgl1-mesa-dri \ + libglapi-mesa \ + libgles2-mesa-dev \ + libglx-mesa0 \ + libigdgmm12 \ + libmfx1 \ + libmfxgen1 \ + libvpl2 \ + mesa-va-drivers \ + mesa-vdpau-drivers \ + mesa-vulkan-drivers \ + va-driver-all \ + vainfo RUN apt-get install -y --no-install-recommends --fix-missing \ - libigc-dev \ - intel-igc-cm \ - libigdfcl-dev \ - libigfxcmrt-dev \ - level-zero-dev="${LEVEL_ZERO_DEV_VER}" && \ - rm -rf /var/lib/apt/lists/* - -RUN rm /etc/apt/sources.list.d/*list + intel-igc-cm \ + libigc-dev \ + libigdfcl-dev \ + libigfxcmrt-dev \ + level-zero-dev="${LEVEL_ZERO_DEV_VER}" \ + && \ + rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/*list -FROM dgpu-base as deep-learning-python - -# Setting up non-root directories RUN useradd --uid 1000 -d /home/dev -s /bin/bash dev RUN groupadd -g 109 render -## Add the user to the required groups RUN usermod -aG root,sudo,video,render dev -# Set a password for the user (Optional) RUN echo 'dev:password' | chpasswd USER dev WORKDIR /home/dev ENV CONDA_ROOT=/home/dev/conda - -# Miniforge Python Installation ARG MINIFORGE_VERSION ARG PYTHON_VERSION ARG IDP_VERSION ARG INTEL_CHANNEL -RUN wget --progress=dot:giga --no-check-certificate "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-${MINIFORGE_VERSION}.sh" -O miniforge.sh && \ +RUN wget --progress=dot:giga --no-check-certificate "https://github.com/conda-forge/miniforge/releases/latest/download/${MINIFORGE_VERSION}.sh" -O miniforge.sh && \ chmod +x miniforge.sh && \ ./miniforge.sh -b -p "${CONDA_ROOT}" && \ rm ./miniforge.sh && \ - ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge3" && \ + ln -s "${CONDA_ROOT}" "${CONDA_ROOT}/../miniforge" && \ export PATH="${CONDA_ROOT}/bin/:${PATH}" && \ - conda update -y conda && \ - conda config --add channels conda-forge && \ - conda config --add channels https://software.repos.intel.com/python/conda/ && \ conda init --all && \ - conda install -c conda-forge \ - 'jupyterlab>=4.1.8' \ - 'notebook>=7.1.3' \ - 'jupyterhub>=4.1.5' \ - 'jupyter-server-proxy>=4.1.2' \ - 'mako>=1.2.2' \ - 'pyjwt>=2.4.0' \ - 'cryptography>=42.0.5' \ - 'nodejs>=20.12.2' \ + conda install -y \ + 'colorama==0.4.6' \ + 'conda==24.5.0' \ + 'jupyter-server-proxy==4.3.0' \ + 'jupyterhub==5.1.0' \ + 'ld_impl_linux-64==2.40' \ + 'mamba==1.5.8' \ + 'networkx==3.3' \ + 'notebook==7.2.1' \ + 'python==3.10.14' \ + 'aiohttp>=3.9.4' \ + 'certifi>=2024.07.04' \ 'idna>=3.7' \ - 'tqdm>=4.66.2' \ - && \ - jupyter labextension disable "@jupyterlab/apputils-extension:announcements" && \ - conda clean -y --all + 'jinja2>=3.1.4' \ + 'requests>=2.32.0' \ + 'setuptools>=70.0.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + 'zipp>=3.19.1' \ + 'nodejs==22.5.1' \ + && \ + jupyter labextension disable "@jupyterlab/apputils-extension:announcements" \ + && \ + conda clean -y --all \ + && \ + conda config --add channels ${INTEL_CHANNEL} ENV PATH ${CONDA_ROOT}/condabin:${CONDA_ROOT}/bin/:${PATH} +RUN conda config --set pip_interop_enabled True -RUN conda config --set pip_interop_enabled True # Improve interoperabilty among conda an pypi packages - - -# PyTorch Installation -ARG IDP_VERSION ARG DPNP_VERSION ARG NUMPY_VERSION - ARG TORCH_CPU_VERSION ARG ONECCL_CPU_VERSION ARG IPEX_CPU_VERSION @@ -171,120 +161,94 @@ ARG TORCHVISION_CPU_VERSION ARG TORCHAUDIO_CPU_VERSION ARG DEEPSPEED_VERSION -# PyTorch CPU Env - conda packages -RUN conda create -yn pytorch-cpu -c "${INTEL_CHANNEL}" -c conda-forge \ - dpnp="${DPNP_VERSION}" \ - numpy="${NUMPY_VERSION}" \ - python="${PYTHON_VERSION}" \ - intel-openmp="${IDP_VERSION}" \ - pytorch="${TORCH_CPU_VERSION}" \ - oneccl_bind_pt="${ONECCL_CPU_VERSION}" \ - intel-extension-for-pytorch="${IPEX_CPU_VERSION}" \ - torchvision="${TORCHVISION_CPU_VERSION}" \ - torchaudio="${TORCHAUDIO_CPU_VERSION}" \ - 'matplotlib-base>=3.4.3' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'pillow>=10.2.0' \ - 'aiohttp>=3.9.0' \ - 'tornado>=6.3.3' \ - 'jinja2>=3.1.3' \ +RUN conda create -yn 'pytorch-cpu' \ + -c huggingface \ + "python=${PYTHON_VERSION}" \ + 'accelerate==0.32.1' \ + "dpnp=${DPNP_VERSION}" \ + "intel-extension-for-pytorch=${IPEX_CPU_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base>=3.8.4' \ + "oneccl_bind_pt=${ONECCL_CPU_VERSION}" \ + "pytorch=${TORCH_CPU_VERSION}" \ + 'tensorboardx==2.6.2.2' \ + "torchaudio=${TORCHAUDIO_CPU_VERSION}" \ + "torchvision=${TORCHVISION_CPU_VERSION}" \ + 'python-dotenv==1.0.1' \ + 'aiohttp>=3.9.4' \ + 'certifi>=2024.07.04' \ 'idna>=3.7' \ - 'onnx>=1.15.0' \ + 'jinja2>=3.1.4' \ + 'onnx>=1.16.0' \ + 'requests>=2.32.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + 'zipp>=3.19.1' \ && \ conda clean -y --all -# PyPI packages -RUN conda run -n pytorch-cpu pip install --no-deps --no-cache-dir --ignore-installed \ - 'ninja>=1.11.1.1' \ - 'python-dotenv>=1.0.1' \ - 'tqdm>=4.66.2' \ - 'cloud-data-connector==1.0.3' \ - 'dataset-librarian==1.0.4' && \ - conda run -n pytorch-cpu pip install --no-cache-dir --ignore-installed \ - 'transformers>=4.40.2' \ - 'datasets>=2.19.1' \ - 'evaluate>=0.4.2' && \ - conda run -n pytorch-cpu pip install --no-cache-dir -U 'accelerate>=0.30.0' && \ - conda run -n pytorch-cpu pip install --no-cache-dir "git+https://github.com/huggingface/optimum-intel.git" && \ +RUN conda run -n 'pytorch-cpu' pip install --no-deps --no-cache-dir \ + 'dataset-librarian==1.0.4' \ + && \ + conda run -n 'pytorch-cpu' pip install --no-cache-dir \ + 'evaluate==0.4.2' \ + "git+https://github.com/huggingface/optimum-intel.git" \ + && \ conda clean -y --all - - -RUN conda run -n pytorch-cpu conda install 'protobuf=4.24' -c conda-forge --override --force-reinstall -y - -# PyTorch Installation ARG IDP_VERSION ARG DPNP_VERSION ARG NUMPY_VERSION - -ARG TORCH_GPU_VERSION -ARG ONECCL_GPU_VERSION -ARG IPEX_GPU_VERSION -ARG TORCHVISION_GPU_VERSION -ARG TORCHAUDIO_GPU_VERSION +ARG TORCH_XPU_VERSION +ARG ONECCL_XPU_VERSION +ARG IPEX_XPU_VERSION +ARG TORCHVISION_XPU_VERSION +ARG TORCHAUDIO_XPU_VERSION ARG IDEX_VERSION -ARG DEEPSPEED_VERSION -# PyTorch GPU Env - conda packages -RUN conda create -yn pytorch-gpu -c "${INTEL_CHANNEL}" -c conda-forge \ - dpnp="${DPNP_VERSION}" \ - dpcpp-cpp-rt="${IDP_VERSION}" \ - mkl-dpcpp="${IDP_VERSION}" \ - dpcpp_impl_linux-64="${IDP_VERSION}" \ - numpy="${NUMPY_VERSION}" \ - python="${PYTHON_VERSION}" \ - intel-openmp="${IDP_VERSION}" \ - python="${PYTHON_VERSION}" \ - pytorch="${TORCH_GPU_VERSION}" \ - oneccl_bind_pt="${ONECCL_GPU_VERSION}" \ - intel-extension-for-pytorch="${IPEX_GPU_VERSION}" \ - torchvision="${TORCHVISION_GPU_VERSION}" \ - torchaudio="${TORCHAUDIO_GPU_VERSION}" \ - 'tensorboardx>=2.6.2.2' \ - 'matplotlib-base>=3.4.3' \ - 'pandas>=2.2.2' \ - 'ipython>=8.18.1' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'pillow>=10.2.0' \ - 'aiohttp>=3.9.0' \ - 'tornado>=6.3.3' \ - 'jinja2>=3.1.3' \ +RUN conda create -yn 'pytorch-gpu' \ + -c huggingface \ + "python=${PYTHON_VERSION}" \ + 'accelerate==0.32.1' \ + "dpnp=${DPNP_VERSION}" \ + "intel-extension-for-pytorch=${IPEX_XPU_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base>=3.8.4' \ + "oneccl_bind_pt=${ONECCL_XPU_VERSION}" \ + "pytorch=${TORCH_XPU_VERSION}" \ + 'tensorboardx==2.6.2.2' \ + "torchaudio=${TORCHAUDIO_XPU_VERSION}" \ + "torchvision=${TORCHVISION_XPU_VERSION}" \ + 'python-dotenv==1.0.1' \ + 'aiohttp>=3.9.4' \ + 'certifi>=2024.07.04' \ 'idna>=3.7' \ - 'onnx>=1.15.0' \ - 'packaging=23.2' \ - 'setuptools=69.1.0' \ + 'jinja2>=3.1.4' \ + 'onnx>=1.16.0' \ + 'requests>=2.32.0' \ + 'tqdm>=4.66.3' \ + 'urllib3>=2.2.2' \ + 'zipp>=3.19.1' \ && \ conda clean -y --all -# PyPI packages -RUN conda run -n pytorch-gpu pip install --no-deps --no-cache-dir --ignore-installed \ - 'ninja>=1.11.1.1' \ - 'python-dotenv>=1.0.1' \ - 'tqdm>=4.66.2' \ - 'cloud-data-connector==1.0.3' \ - 'dataset-librarian==1.0.4' && \ - conda run -n pytorch-gpu pip install --no-cache-dir --ignore-installed \ - 'transformers>=4.40.2' \ - 'datasets>=2.19.1' \ - 'evaluate>=0.4.2' && \ - conda run -n pytorch-gpu pip install --no-cache-dir -U 'accelerate>=0.30.0' && \ - conda run -n pytorch-gpu pip install --no-cache-dir "git+https://github.com/huggingface/optimum-intel.git" && \ +RUN conda run -n 'pytorch-gpu' pip install --no-deps --no-cache-dir \ + 'dataset-librarian==1.0.4' \ + && \ + conda run -n 'pytorch-gpu' pip install --no-cache-dir \ + 'evaluate==0.4.2' \ + "git+https://github.com/huggingface/optimum-intel.git" \ + && \ conda clean -y --all - - -RUN conda run -n pytorch-gpu conda install 'protobuf=4.24' -c conda-forge --override --force-reinstall -y - - -# TensorFlow Installation ARG IDP_VERSION ARG DPNP_VERSION ARG NUMPY_VERSION - ARG TF_VERSION -ARG ITEX_VERSION +ARG ITEX_CPU_VERSION +ARG ITEX_XPU_VERSION ARG HOROVOD_VERSION ARG IMPI_VERSION @@ -293,149 +257,122 @@ ARG HOROVOD_WITHOUT_MXNET=1 ARG HOROVOD_WITHOUT_GLOO=1 ARG HOROVOD_WITH_MPI=1 - -# Tensorflow Env - conda packages -RUN conda create -yn tensorflow-cpu -c "${INTEL_CHANNEL}" -c conda-forge \ - dpnp="${DPNP_VERSION}" \ - dpcpp-cpp-rt="${IDP_VERSION}" \ - mkl-dpcpp="${IDP_VERSION}" \ - numpy="${NUMPY_VERSION}" \ - python="${PYTHON_VERSION}" \ - intel-extension-for-tensorflow="${ITEX_VERSION}=*cpu*" \ - intel-optimization-for-horovod="${INTEL_HOROVOD}" \ - tensorflow="${TF_VERSION}" \ - impi-devel="${IMPI_VERSION}" \ - 'matplotlib-base>=3.4.3' \ - 'ipython>=8.18.1' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'pillow>=10.2.0' \ - 'cryptography>=42.0.4' \ - 'werkzeug>=2.2.3' \ - 'aiohttp>=3.9.0' \ - 'tornado>=6.3.3' \ - 'pyjwt>=2.8.0' \ - 'oauthlib>=3.2.2' \ - 'idna>=3.7' \ +RUN conda create -yn 'tensorflow-cpu' \ + "python=${PYTHON_VERSION}" \ + "dpnp=${DPNP_VERSION}" \ + "intel-extension-for-tensorflow=${ITEX_CPU_VERSION}=*cpu*" \ + "intel-optimization-for-horovod=${HOROVOD_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base>=3.8.4' \ 'onnx>=1.14.1' \ + 'py-cpuinfo==9.0.0' \ + "tensorflow=${TF_VERSION}" \ + 'tensorflow-hub==0.16.1' \ + 'tqdm==4.66.4' \ + 'python-dotenv==1.0.1' \ + 'aiohttp>=3.9.4' \ + 'certifi>=2024.07.04' \ + 'idna>=3.7' \ + 'requests>=2.32.0' \ + 'urllib3>=2.2.2' \ + 'werkzeug>=3.0.3' \ + 'zipp>=3.19.1' \ && \ conda clean -y --all -# PyPI packages -RUN conda run -n tensorflow-cpu pip install --no-cache-dir --ignore-installed \ - 'py-cpuinfo>=9.0.0' \ - 'requests>=2.31.0' \ - 'cryptography>=42.0.7' -RUN conda run -n tensorflow-cpu pip install --no-deps --no-cache-dir --ignore-installed \ - 'tensorflow-hub>=0.16.1' \ - 'tqdm>=4.66.2' \ +RUN conda run -n 'tensorflow-cpu' pip install --no-deps --no-cache-dir \ 'dataset-librarian==1.0.4' \ - 'cloud-data-connector>=1.0.3' && \ + && \ conda clean -y --all -# Tensorflow Env - conda packages -RUN conda create -yn tensorflow-gpu -c "${INTEL_CHANNEL}" -c conda-forge \ - dpnp="${DPNP_VERSION}" \ - dpcpp-cpp-rt="${IDP_VERSION}" \ - mkl-dpcpp="${IDP_VERSION}" \ - numpy="${NUMPY_VERSION}" \ - python="${PYTHON_VERSION}" \ - intel-extension-for-tensorflow="${ITEX_VERSION}=*xpu*" \ - intel-optimization-for-horovod="${INTEL_HOROVOD}" \ - tensorflow="${TF_VERSION}" \ - impi-devel="${IMPI_VERSION}" \ - 'matplotlib-base>=3.4.3' \ - 'ipython>=8.18.1' \ - 'ipykernel>=6.29.3' \ - 'kernda>=0.3.0' \ - 'pillow>=10.2.0' \ - 'cryptography>=42.0.4' \ - 'werkzeug>=2.2.3' \ - 'aiohttp>=3.9.0' \ - 'tornado>=6.3.3' \ - 'pyjwt>=2.8.0' \ - 'oauthlib>=3.2.2' \ - 'idna>=3.7' \ +RUN conda create -yn 'tensorflow-gpu' \ + "python=${PYTHON_VERSION}" \ + "dpnp=${DPNP_VERSION}" \ + "intel-extension-for-tensorflow=${ITEX_XPU_VERSION}=*xpu*" \ + "intel-optimization-for-horovod=${HOROVOD_VERSION}" \ + 'ipykernel==6.29.5' \ + 'kernda==0.3.0' \ + 'matplotlib-base>=3.8.4' \ 'onnx>=1.14.1' \ - 'packaging=23.2' \ - 'setuptools=69.1.0' \ + 'py-cpuinfo==9.0.0' \ + "tensorflow=${TF_VERSION}" \ + 'tensorflow-hub==0.16.1' \ + 'tqdm==4.66.4' \ + 'python-dotenv==1.0.1' \ + 'aiohttp>=3.9.4' \ + 'certifi>=2024.07.04' \ + 'idna>=3.7' \ + 'requests>=2.32.0' \ + 'urllib3>=2.2.2' \ + 'zipp>=3.19.1' \ && \ conda clean -y --all -# PyPI packages -RUN conda run -n tensorflow-gpu pip install --no-cache-dir --ignore-installed \ - 'py-cpuinfo>=9.0.0' \ - 'requests>=2.31.0' \ - 'cryptography>=42.0.7' -RUN conda run -n tensorflow-gpu pip install --no-deps --no-cache-dir --ignore-installed \ - 'tensorflow-hub>=0.16.1' \ - 'tqdm>=4.66.2' \ +RUN conda run -n 'tensorflow-gpu' pip install --no-deps --no-cache-dir \ 'dataset-librarian==1.0.4' \ - 'cloud-data-connector==1.0.3' && \ + && \ conda clean -y --all -FROM deep-learning-python as deep-learning-jupyter - -ARG KERNEL_NAME_TF_CPU="Intel TensorFlow cpu" -ARG KERNEL_NAME_TF_GPU="Intel TensorFlow gpu" -ARG KERNEL_NAME_PT_CPU="Intel PyTorch cpu" -ARG KERNEL_NAME_PT_GPU="Intel PyTorch gpu" - EXPOSE 8888 RUN mkdir -p ~/jupyter/ && chmod -R a+rwx ~/jupyter/ && \ mkdir ~/.local && chmod a+rwx ~/.local RUN \ - "${CONDA_ROOT}/envs/pytorch-cpu/bin/python" -m ipykernel install --user --name pytorch-cpu --display-name "${KERNEL_NAME_PT_CPU}" && \ - "${CONDA_ROOT}/envs/pytorch-cpu/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo pytorch-cpu | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - "${CONDA_ROOT}/envs/pytorch-gpu/bin/python" -m ipykernel install --user --name pytorch-gpu --display-name "${KERNEL_NAME_PT_GPU}" && \ - "${CONDA_ROOT}/envs/pytorch-gpu/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo pytorch-gpu | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - "${CONDA_ROOT}/envs/tensorflow-cpu/bin/python" -m ipykernel install --user --name tensorflow-cpu --display-name "${KERNEL_NAME_TF_CPU}" && \ - "${CONDA_ROOT}/envs/tensorflow-cpu/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo tensorflow-cpu | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - "${CONDA_ROOT}/envs/tensorflow-gpu/bin/python" -m ipykernel install --user --name tensorflow-gpu --display-name "${KERNEL_NAME_TF_GPU}" && \ - "${CONDA_ROOT}/envs/tensorflow-gpu/bin/kernda" -o -y "$HOME/.local/share/jupyter/kernels/$(echo tensorflow-gpu | sed -e 's/\(.*\)/\L\1/')/kernel.json" && \ - python -m ipykernel.kernelspec --user + ENVS_LIST=('pytorch-cpu' 'pytorch-gpu' 'tensorflow-cpu' 'tensorflow-gpu') && \ + KERNEL_NAMES=('Intel PyTorch CPU' 'Intel PyTorch GPU' 'Intel TensorFlow CPU' 'Intel TensorFlow GPU') && \ + for i in "${!ENVS_LIST[@]}"; do \ + CONDA_ENV="${ENVS_LIST[i]}" && \ + KERNEL_NAME="${KERNEL_NAMES[i]}" && \ + KERNEL_DIR="${CONDA_ROOT}/share/jupyter/kernels/$CONDA_ENV" && \ + conda run -n "$CONDA_ENV" python -m ipykernel install --prefix "$CONDA_ROOT" --name "$CONDA_ENV" --display-name "$KERNEL_NAME" && \ + conda run -n "$CONDA_ENV" kernda -o -y "$KERNEL_DIR/kernel.json" && \ + conda run -n base jupyter kernelspec list \ + ; done CMD ["bash", "-c", "jupyter lab --notebook-dir=~/jupyter --port 8888 --ip 0.0.0.0 --no-browser --allow-root"] -FROM deep-learning-jupyter as distributed-deep-learning +FROM deep-learning-base as deep-learning +SHELL ["/bin/bash", "-c"] USER root -# Install OpenMPI -RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ - libopenmpi-dev \ - openmpi-bin \ - openmpi-common +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends --fix-missing \ + libopenmpi-dev \ + openmpi-bin \ + openmpi-common ENV OMPI_ALLOW_RUN_AS_ROOT=1 ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 ENV OMPI_MCA_tl_tcp_if_exclude="lo,docker0" -# Install OpenSSH RUN apt-get install -y --no-install-recommends --fix-missing \ - openssh-client \ - openssh-server && \ - rm /etc/ssh/ssh_host_*_key \ - /etc/ssh/ssh_host_*_key.pub && \ - rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /var/run/sshd && \ + openssh-client \ + openssh-server \ + && \ + rm -rf \ + /etc/ssh/ssh_host_*_key \ + /etc/ssh/ssh_host_*_key.pub \ + /var/lib/apt/lists/* \ + && \ + mkdir -p /var/run/sshd \ + && \ echo 'LoginGraceTime 0' >> /etc/ssh/sshd_config -# https://github.com/openucx/ucx/issues/4742#issuecomment-584059909 ENV UCX_TLS=ud,sm,self USER dev -RUN conda install -n pytorch-cpu -c "${INTEL_CHANNEL}" -c conda-forge \ - deepspeed="${DEEPSPEED_VERSION}" \ - 'tensorboardx>=2.6.2.2' - -RUN conda install -n pytorch-gpu -c "${INTEL_CHANNEL}" -c conda-forge \ - deepspeed="${DEEPSPEED_VERSION}" \ - 'tensorboardx>=2.6.2.2' +RUN ENVS_LIST=('pytorch-cpu' 'pytorch-gpu') && \ + for i in "${!ENVS_LIST[@]}"; do \ + CONDA_ENV="${ENVS_LIST[i]}" && \ + conda install -yn "$CONDA_ENV" \ + "deepspeed=${DEEPSPEED_VERSION}" \ + 'tensorboardx==2.6.2.2' \ + ; done && \ + conda clean -y --all COPY --chown=dev notebooks /home/dev/jupyter COPY --chown=dev tests /home/dev/sample-tests diff --git a/preset/deep-learning/docker-compose.yaml b/preset/deep-learning/docker-compose.yaml index 663e064c..023b6f82 100644 --- a/preset/deep-learning/docker-compose.yaml +++ b/preset/deep-learning/docker-compose.yaml @@ -15,6 +15,7 @@ # -*- coding: utf-8 -*- # + version: '3' services: dl-base: @@ -22,44 +23,42 @@ services: args: BASE_IMAGE: ${BASE_IMAGE:-ubuntu} BASE_TAG: ${BASE_TAG:-22.04} - DEEPSPEED_VERSION: ${DEEPSPEED_VERSION:-0.14.0} + DEEPSPEED_VERSION: ${DEEPSPEED_VERSION:-0.14.2} DEVICE: ${DEVICE:-flex} - DPNP_VERSION: ${NUMBA_DPEX_VERSION:-0.14.0} - HOROVOD_VERSION: ${HOROVOD_VERSION:-0.28.1.4} - ICD_VER: 23.43.27642.40-803~22.04 - IDP_VERSION: ${IDP_VERSION:-2024.1.0} - IMPI_VERSION: ${IMPI_VERSION:-2021.12} + DPNP_VERSION: ${DPNP_VERSION:-0.15.0} + HOROVOD_VERSION: ${HOROVOD_VERSION:-0.28.1.5} + ICD_VER: 23.43.27642.52-803~22.04 + IDP_VERSION: ${IDP_VERSION:-2024.2} + IMPI_VERSION: ${IMPI_VERSION:-2021.13} INTEL_CHANNEL: ${INTEL_CHANNEL:-https://software.repos.intel.com/python/conda/} - IPEX_CPU_VERSION: ${IPEX_CPU_VERSION:-2.2.0=*cpu*} - IPEX_GPU_VERSION: ${IPEX_GPU_VERSION:-2.1.20=*xpu*} - ITEX_VERSION: ${ITEX_VERSION:-2.15} + IPEX_CPU_VERSION: ${IPEX_CPU_VERSION:-2.3.100} + IPEX_XPU_VERSION: ${IPEX_XPU_VERSION:-2.1.40} + ITEX_CPU_VERSION: ${ITEX_CPU_VERSION:-2.15.0} + ITEX_XPU_VERSION: ${ITEX_XPU_VERSION:-2.15.0.1} LEVEL_ZERO_DEV_VER: 1.14.0-744~22.04 - LEVEL_ZERO_GPU_VER: 1.3.27642.40-803~22.04 + LEVEL_ZERO_GPU_VER: 1.3.27642.52-803~22.04 LEVEL_ZERO_VER: 1.14.0-744~22.04 - MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Linux-x86_64} - MPI_VERSION: ${MPI_VERSION:-2021.12.0} - NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.22.1} + MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Miniforge3-Linux-x86_64} + MPI_VERSION: ${MPI_VERSION:-2021.13} + NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.23.0} NUMPY_VERSION: ${NUMPY_VERSION:-1.26.4} - ONECCL_CPU_VERSION: ${ONECCL_CPU_VERSION:-2.2.0=*cpu*} - ONECCL_GPU_VERSION: ${ONECCL_GPU_VERSION:-2.1.200=*xpu*} - PYTHON_VERSION: ${PYTHON_VERSION:-3.10} - TF_VERSION: ${TF_VERSION:-2.15} - TORCHAUDIO_CPU_VERSION: ${TORCHAUDIO_CPU_VERSION:-2.2.0=*cpu*} - TORCHAUDIO_GPU_VERSION: ${TORCHAUDIO_GPU_VERSION:-2.1.0=*xpu*} - TORCHVISION_CPU_VERSION: ${TORCHVISION_CPU_VERSION:-0.17=*cpu*} - TORCHVISION_GPU_VERSION: ${TORCHVISION_GPU_VERSION:-0.16.0=*xpu*} - TORCH_CPU_VERSION: ${TORCH_CPU_VERSION:-2.2.0=*cpu*} - TORCH_GPU_VERSION: ${TORCH_GPU_VERSION:-2.1.0=*xpu*} + ONECCL_CPU_VERSION: ${ONECCL_CPU_VERSION:-2.3.0} + ONECCL_XPU_VERSION: ${ONECCL_XPU_VERSION:-2.1.400} + PYTHON_VERSION: ${PYTHON_VERSION:-3.9} + TF_VERSION: ${TF_VERSION:-2.15.1} + TORCHAUDIO_CPU_VERSION: ${TORCHAUDIO_CPU_VERSION:-2.3.1} + TORCHAUDIO_XPU_VERSION: ${TORCHAUDIO_XPU_VERSION:-2.1.0} + TORCHVISION_CPU_VERSION: ${TORCHVISION_CPU_VERSION:-0.18.1} + TORCHVISION_XPU_VERSION: ${TORCHVISION_XPU_VERSION:-0.16.0} + TORCH_CPU_VERSION: ${TORCH_CPU_VERSION:-2.3.1} + TORCH_XPU_VERSION: ${TORCH_XPU_VERSION:-2.1.0} http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: '' context: . labels: docs: false - target: deep-learning-jupyter - command: | - bash -c "conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ - conda run -n tensorflow-cpu python -c 'import tensorflow as tf; print(tf.__version__)'" + target: deep-learning-base environment: http_proxy: ${http_proxy} https_proxy: ${https_proxy} @@ -67,15 +66,24 @@ services: shm_size: 12GB volumes: - /dev/dri/by-path:/dev/dri/by-path + command: > + bash -c " conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import + intel_extension_for_pytorch as ipex;print(ipex.__version__);' && + + conda run -n tensorflow-cpu python -c 'import tensorflow as tf;print(tf.__version__)' + " + + deep-learning: build: + target: deep-learning labels: docs: deep_learning org.opencontainers.image.title: "Intel® AI Tools Selector Preset Containers - Deep Learning" org.opencontainers.base.name: "ubuntu:22.04" org.opencontainers.image.name: "intel/deep-learning" - org.opencontainers.image.version: 2024.1.0-py${PYTHON_VERSION:-3.10} - dependency.python: ${PYTHON_VERSION:-3.10} + org.opencontainers.image.version: 2024.2.0-py${PYTHON_VERSION:-3.9} + dependency.python: ${PYTHON_VERSION:-3.9} dependency.python.pip: requirements.txt dependency.apt.apt-utils: true dependency.apt.build-essential: true @@ -92,11 +100,11 @@ services: dependency.apt.gzip: true dependency.apt.hwinfo: true dependency.apt.intel-igc-cm: true - dependency.apt.intel-level-zero-gpu: '=1.3.27642.40-803~22.04' + dependency.apt.intel-level-zero-gpu: true dependency.apt.intel-media-va-driver-non-free: true - dependency.apt.intel-opencl-icd: '=23.43.27642.40-803~22.04' - dependency.apt.level-zero: '=1.14.0-744~22.04' - dependency.apt.level-zero-dev: '=1.14.0-744~22.04' + dependency.apt.intel-opencl-icd: true + dependency.apt.level-zero: true + dependency.apt.level-zero-dev: true dependency.apt.libegl1-mesa: true dependency.apt.libegl1-mesa-dev: true dependency.apt.libegl-mesa0: true @@ -114,7 +122,6 @@ services: dependency.apt.libmfxgen1: true dependency.apt.libopenmpi-dev: true dependency.apt.libvpl2: true - dependency.apt.libxatracker2: true dependency.apt.make: true dependency.apt.mesa-va-drivers: true dependency.apt.mesa-vdpau-drivers: true @@ -132,69 +139,71 @@ services: dependency.apt.vainfo: true dependency.apt.wget: true dependency.apt.xz-utils: true - dependency.conda.jupyterlab: '>=4.1.8' - dependency.conda.aiohttp: '>=3.9.0' - dependency.conda.cryptography: '>=42.0.4' - dependency.conda.dataset_librarian: '>=1.0.4' - dependency.conda.deepspeed: '=0.14.0' - dependency.conda.dpcpp_impl_linux-64: '=2024.1.0' - dependency.conda.dpcpp-cpp-rt: '=2024.1.0' - dependency.conda.dpnp: '=0.14.0' - dependency.conda.idna: '>=3.7' - dependency.conda.impi-devel: '=2021.12' - dependency.conda.intel-extension-for-pytorch_cpu: '=2.2.0=*cpu*' - dependency.conda.intel-extension-for-pytorch_gpu: '=2.1.20=*xpu*' - dependency.conda.intel-extension-for-tensorflow_cpu: '=2.15=*cpu*' - dependency.conda.intel-extension-for-tensorflow_gpu: '=2.15=*xpu*' - dependency.conda.intel-openmp: '=2024.1.0' - dependency.conda.intel-optimization-for-horovod: '=0.28.1.4' - dependency.conda.ipykernel: '>=6.29.3' - dependency.conda.ipython: '>=8.18.1' - dependency.conda.jinja2: '>=3.1.3' - dependency.conda.jupyterhub: '>=4.1.5' - dependency.conda.jupyter-server-proxy: '>=4.1.2' - dependency.conda.kernda: '>=0.3.0' - dependency.conda.mako: '>=1.2.2' - dependency.conda.matplotlib-base: '>=3.4.3' - dependency.conda.mkl-dpcpp: '2024.1.0' - dependency.conda.nodejs: '>=20.12.2' - dependency.conda.notebook: '>=7.1.3' - dependency.conda.numpy: '=1.26.4' - dependency.conda.oauthlib: '>=3.2.2' - dependency.conda.oneccl_bind_pt_cpu: '=2.2.0=*cpu*' - dependency.conda.oneccl_bind_pt_gpu: '=2.1.200=*xpu*' + dependency.conda.accelerate: '==0.32.1' + dependency.conda.colorama: '==0.4.6' + dependency.conda.conda: '==24.5.0' + dependency.conda.dpnp: '=0.15.0' + dependency.conda.intel-extension-for-pytorch_cpu: '=2.3.100' + dependency.conda.intel-extension-for-pytorch_xpu: '=2.1.40' + dependency.conda.intel-extension-for-tensorflow_cpu: '=2.15.0=*cpu*' + dependency.conda.intel-extension-for-tensorflow_xpu: '=2.15.0.1=*xpu*' + dependency.conda.intel-optimization-for-horovod: '=0.28.1.5' + dependency.conda.ipykernel: '==6.29.5' + dependency.conda.jupyterhub: '==5.1.0' + dependency.conda.jupyter-server-proxy: '==4.3.0' + dependency.conda.kernda: '==0.3.0' + dependency.conda.ld_impl_linux-64: '==2.40' + dependency.conda.mamba: '==1.5.8' + dependency.conda.matplotlib-base: '>=3.8.4' + dependency.conda.mpi: '==1.0' + dependency.conda.mpich: '==4.2.2' + dependency.conda.networkx: '==3.3' + dependency.conda.notebook: '==7.2.1' + dependency.conda.oneccl_bind_pt_cpu: '=2.3.0' + dependency.conda.oneccl_bind_pt_xpu: '=2.1.400' dependency.conda.onnx: '>=1.14.1' - dependency.conda.packaging: '=23.2' - dependency.conda.pandas: '>=2.2.2' - dependency.conda.pillow: '>=10.2.0' - dependency.conda.protobuf: '=4.24' - dependency.conda.pyjwt: '>=2.4.0' - dependency.conda.python: "=${PYTHON_VERSION:-3.10}" - dependency.conda.pytorch_cpu: '=2.2.0=*cpu*' - dependency.conda.pytorch_gpu: '=2.1.0=*xpu*' - dependency.conda.setuptools: '=69.1.0' - dependency.conda.tensorboardx: '>=2.6.2.2' - dependency.conda.tensorflow: '=2.15' - dependency.conda.torchaudio_cpu: '=2.2.0=*cpu*' - dependency.conda.torchaudio_gpu: '=2.1.0=*xpu*' - dependency.conda.torchvision_cpu: '=0.17=*cpu*' - dependency.conda.torchvision_gpu: '=0.16.0=*xpu*' - dependency.conda.tornado: '>=6.3.3' - dependency.conda.tqdm: '>=4.66.2' - dependency.conda.werkzeug: '>=2.2.3' - target: distributed-deep-learning + dependency.conda.py-cpuinfo: '==9.0.0' + dependency.conda.python: '==3.10.14' + dependency.conda.pytorch_cpu: '=2.3.1' + dependency.conda.pytorch_xpu: '=2.1.0' + dependency.conda.tensorboardx: '==2.6.2.2' + dependency.conda.tensorflow: '=2.15.1' + dependency.conda.tensorflow-hub: '==0.16.1' + dependency.conda.torchaudio_cpu: '=2.3.1' + dependency.conda.torchaudio_xpu: '=2.1.0' + dependency.conda.torchvision_cpu: '=0.18.1' + dependency.conda.torchvision_xpu: '=0.16.0' + dependency.conda.tqdm: '==4.66.4' depends_on: - dl-base extends: dl-base - image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-2024.1.0-py${PYTHON_VERSION:-3.10} - command: | - bash -c "conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ - conda run -n pytorch-cpu bash -c 'mpirun --version' && \ - conda run -n pytorch-cpu python -c 'import oneccl_bindings_for_pytorch as oneccl;print(\"\\nOneCCL:\", oneccl.__version__)' && \ - conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available());print(ipex.xpu.has_onemkl())' && \ - conda run -n pytorch-gpu bash -c 'mpirun --version' && \ - conda run -n pytorch-gpu python -c 'import oneccl_bindings_for_pytorch as oneccl;print(\"\\nOneCCL:\", oneccl.__version__)' && \ - conda run -n tensorflow-cpu python -c 'import tensorflow;print(tensorflow.__version__);import intel_extension_for_tensorflow as itex;print(itex.__version__)' && \ - conda run -n tensorflow-gpu python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' && \ - conda run -n tensorflow-gpu bash -c 'horovodrun --check-build && mpirun --version' && \ - conda run -n tensorflow-gpu python -c 'import horovod.tensorflow as hvd;hvd.init();import horovod.tensorflow'" + image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + command: > + bash -c " conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import + intel_extension_for_pytorch as ipex;print(ipex.__version__);' && + + conda run -n pytorch-cpu bash -c 'mpirun --version' && + + conda run -n pytorch-cpu python -c 'import oneccl_bindings_for_pytorch as oneccl;print(\"\\nOneCCL:\", + oneccl.__version__)' && + + conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import + intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available());print(ipex.xpu.has_onemkl())' + && + + conda run -n pytorch-gpu bash -c 'mpirun --version' && + + conda run -n pytorch-gpu python -c 'import oneccl_bindings_for_pytorch as oneccl;print(\"\\nOneCCL:\", + oneccl.__version__)' && + + conda run -n tensorflow-cpu python -c 'import tensorflow;print(tensorflow.__version__);import + intel_extension_for_tensorflow as itex;print(itex.__version__)' && + + conda run -n tensorflow-gpu python -c 'from tensorflow.python.client import + device_lib;print(device_lib.list_local_devices())' && + + conda run -n tensorflow-gpu bash -c 'horovodrun --check-build && mpirun --version' + && + + conda run -n tensorflow-gpu python -c 'import horovod.tensorflow as hvd;hvd.init();import + horovod.tensorflow' " diff --git a/preset/deep-learning/requirements.txt b/preset/deep-learning/requirements.txt index 4122126b..db93ef0d 100644 --- a/preset/deep-learning/requirements.txt +++ b/preset/deep-learning/requirements.txt @@ -1,14 +1,3 @@ -accelerate>=0.30.0 -cloud-data-connector>=1.0.3 -cryptography>=42.0.7 -dataset-librarian>=1.0.4 -datasets>=2.19.1 -evaluate>=0.4.2 +dataset-librarian==1.0.4 +evaluate==0.4.2 git+https://github.com/huggingface/optimum-intel.git -ninja>=1.11.1.1 -py-cpuinfo>=9.0.0 -python-dotenv>=1.0.1 -requests>=2.31.0 -tensorflow-hub>=0.16.1 -tqdm>=4.66.2 -transformers>=4.40.2 diff --git a/preset/deep-learning/tests.yaml b/preset/deep-learning/tests.yaml old mode 100644 new mode 100755 index 0b0cdcae..4d22975b --- a/preset/deep-learning/tests.yaml +++ b/preset/deep-learning/tests.yaml @@ -12,39 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. +--- deep-learning-ipex-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n pytorch-cpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device cpu --ipex - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} deep-learning-ipex-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n pytorch-gpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device xpu --ipex - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] + deep-learning-ipex-notebook-${PYTHON_VERSION:-3.9}-cpu: cmd: papermill --log-output jupyter/ipex/ResNet50_Inference.ipynb -k pytorch-cpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True deep-learning-ipex-notebook-${PYTHON_VERSION:-3.9}-gpu: cmd: papermill --log-output jupyter/ipex/ResNet50_Inference.ipynb -k pytorch-gpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True + device: ["/dev/dri"] + deep-learning-ipex-quantization-notebook-${PYTHON_VERSION:-3.9}-cpu: cmd: papermill --log-output jupyter/ipex-quantization/IntelPytorch_Quantization.ipynb -k pytorch-cpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True + deep-learning-itex-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} deep-learning-itex-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] + deep-learning-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} deep-learning-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] + deep-learning-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n pytorch-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} deep-learning-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n pytorch-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-deep-learning-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] diff --git a/preset/inference-optimization/Dockerfile b/preset/inference-optimization/Dockerfile index 6689b437..a38e8266 100644 --- a/preset/inference-optimization/Dockerfile +++ b/preset/inference-optimization/Dockerfile @@ -12,35 +12,34 @@ # See the License for the specific language governing permissions and # limitations under the License. + ARG COMPOSE_PROJECT_NAME FROM ${COMPOSE_PROJECT_NAME}-dl-base as inference-optimization -ENV SIGOPT_PROJECT=. +SHELL ["/bin/bash", "-c"] +ENV SIGOPT_PROJECT=. ARG NEURAL_COMPRESSOR_VERSION ARG INTEL_CHANNEL - -RUN conda install -yn pytorch-cpu -c "${INTEL_CHANNEL}" -c conda-forge \ - neural-compressor="${NEURAL_COMPRESSOR_VERSION}" - -RUN conda install -yn pytorch-gpu -c "${INTEL_CHANNEL}" -c conda-forge \ - neural-compressor="${NEURAL_COMPRESSOR_VERSION}" - -RUN conda install -yn tensorflow-cpu -c "${INTEL_CHANNEL}" -c conda-forge \ - neural-compressor="${NEURAL_COMPRESSOR_VERSION}" - -RUN conda install -yn tensorflow-gpu -c "${INTEL_CHANNEL}" -c conda-forge \ - neural-compressor="${NEURAL_COMPRESSOR_VERSION}" - -RUN conda run -n tensorflow-cpu python -m pip install --no-deps --no-cache-dir \ - 'tf2onnx>=1.16.1' \ - 'onnxruntime>=1.17.3' && \ +RUN ENVS_LIST=('pytorch-cpu' 'pytorch-gpu' 'tensorflow-cpu' 'tensorflow-gpu') && \ + for i in "${!ENVS_LIST[@]}"; do \ + CONDA_ENV="${ENVS_LIST[i]}" && \ + conda install -yn "$CONDA_ENV" \ + "neural-compressor=${NEURAL_COMPRESSOR_VERSION}" \ + 'scikit-learn>=1.5.0' \ + ; \ + done && \ conda clean -y --all -RUN conda run -n tensorflow-gpu python -m pip install --no-deps --no-cache-dir \ - 'tf2onnx>=1.16.1' \ - 'onnxruntime>=1.17.3' && \ +RUN ENVS_LIST=('tensorflow-cpu' 'tensorflow-gpu') && \ + for i in "${!ENVS_LIST[@]}"; do \ + CONDA_ENV="${ENVS_LIST[i]}" && \ + conda run -n "$CONDA_ENV" python -m pip install --no-deps --no-cache-dir \ + 'tf2onnx==1.16.1' \ + 'onnxruntime==1.18.1' \ + ; \ + done && \ conda clean -y --all COPY --chown=dev notebooks /home/dev/jupyter diff --git a/preset/inference-optimization/docker-compose.yaml b/preset/inference-optimization/docker-compose.yaml index ac8ebc07..cf543bff 100644 --- a/preset/inference-optimization/docker-compose.yaml +++ b/preset/inference-optimization/docker-compose.yaml @@ -15,6 +15,7 @@ # -*- coding: utf-8 -*- # + version: '3' services: dl-base: @@ -22,42 +23,42 @@ services: args: BASE_IMAGE: ${BASE_IMAGE:-ubuntu} BASE_TAG: ${BASE_TAG:-22.04} - DEEPSPEED_VERSION: ${DEEPSPEED_VERSION:-0.14.0} DEVICE: ${DEVICE:-flex} - DPNP_VERSION: ${NUMBA_DPEX_VERSION:-0.14.0} - HOROVOD_VERSION: ${HOROVOD_VERSION:-0.28.1.4} - ICD_VER: 23.43.27642.40-803~22.04 - IDP_VERSION: ${IDP_VERSION:-2024.1.0} - IMPI_VERSION: ${IMPI_VERSION:-2021.12} + DPNP_VERSION: ${DPNP_VERSION:-0.15.0} + HOROVOD_VERSION: ${HOROVOD_VERSION:-0.28.1.5} + ICD_VER: 23.43.27642.52-803~22.04 + IDP_VERSION: ${IDP_VERSION:-2024.2} + IMPI_VERSION: ${IMPI_VERSION:-2021.13} INTEL_CHANNEL: ${INTEL_CHANNEL:-https://software.repos.intel.com/python/conda/} - IPEX_CPU_VERSION: ${IPEX_CPU_VERSION:-2.2.0=*cpu*} - IPEX_GPU_VERSION: ${IPEX_GPU_VERSION:-2.1.20=*xpu*} - ITEX_VERSION: ${ITEX_VERSION:-2.15} + IPEX_CPU_VERSION: ${IPEX_CPU_VERSION:-2.3.100} + IPEX_XPU_VERSION: ${IPEX_XPU_VERSION:-2.1.40} + ITEX_CPU_VERSION: ${ITEX_CPU_VERSION:-2.15.0} + ITEX_XPU_VERSION: ${ITEX_XPU_VERSION:-2.15.0.1} LEVEL_ZERO_DEV_VER: 1.14.0-744~22.04 - LEVEL_ZERO_GPU_VER: 1.3.27642.40-803~22.04 + LEVEL_ZERO_GPU_VER: 1.3.27642.52-803~22.04 LEVEL_ZERO_VER: 1.14.0-744~22.04 - MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Linux-x86_64} - MPI_VERSION: ${MPI_VERSION:-2021.12.0} - NEURAL_COMPRESSOR_VERSION: ${NEURAL_COMPRESSOR_VERSION:-2.4.1} - NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.22.1} + MINIFORGE_VERSION: ${MINIFORGE_VERSION:-Miniforge3-Linux-x86_64} + MPI_VERSION: ${MPI_VERSION:-2021.13} + NEURAL_COMPRESSOR_VERSION: ${NEURAL_COMPRESSOR_VERSION:-2.5.1} + NUMBA_DPEX_VERSION: ${NUMBA_DPEX_VERSION:-0.23.0} NUMPY_VERSION: ${NUMPY_VERSION:-1.26.4} - ONECCL_CPU_VERSION: ${ONECCL_CPU_VERSION:-2.2.0=*cpu*} - ONECCL_GPU_VERSION: ${ONECCL_GPU_VERSION:-2.1.200=*xpu*} - PYTHON_VERSION: ${PYTHON_VERSION:-3.10} - TF_VERSION: ${TF_VERSION:-2.15} - TORCHAUDIO_CPU_VERSION: ${TORCHAUDIO_CPU_VERSION:-2.2.0=*cpu*} - TORCHAUDIO_GPU_VERSION: ${TORCHAUDIO_GPU_VERSION:-2.1.0=*xpu*} - TORCHVISION_CPU_VERSION: ${TORCHVISION_CPU_VERSION:-0.17=*cpu*} - TORCHVISION_GPU_VERSION: ${TORCHVISION_GPU_VERSION:-0.16.0=*xpu*} - TORCH_CPU_VERSION: ${TORCH_CPU_VERSION:-2.2.0=*cpu*} - TORCH_GPU_VERSION: ${TORCH_GPU_VERSION:-2.1.0=*xpu*} + ONECCL_CPU_VERSION: ${ONECCL_CPU_VERSION:-2.3.0} + ONECCL_XPU_VERSION: ${ONECCL_XPU_VERSION:-2.1.400} + PYTHON_VERSION: ${PYTHON_VERSION:-3.9} + TF_VERSION: ${TF_VERSION:-2.15.1} + TORCHAUDIO_CPU_VERSION: ${TORCHAUDIO_CPU_VERSION:-2.3.1} + TORCHAUDIO_XPU_VERSION: ${TORCHAUDIO_XPU_VERSION:-2.1.0} + TORCHVISION_CPU_VERSION: ${TORCHVISION_CPU_VERSION:-0.18.1} + TORCHVISION_XPU_VERSION: ${TORCHVISION_XPU_VERSION:-0.16.0} + TORCH_CPU_VERSION: ${TORCH_CPU_VERSION:-2.3.1} + TORCH_XPU_VERSION: ${TORCH_XPU_VERSION:-2.1.0} http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: '' context: ../deep-learning labels: docs: false - target: deep-learning-jupyter + target: deep-learning-base environment: http_proxy: ${http_proxy} https_proxy: ${https_proxy} @@ -65,9 +66,12 @@ services: shm_size: 12GB volumes: - /dev/dri/by-path:/dev/dri/by-path - command: | - bash -c "conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ - conda run -n tensorflow-cpu python -c 'import tensorflow as tf; print(tf.__version__)'" + command: > + bash -c " conda run -n pytorch-cpu python -c 'import torch;print(torch.__version__);import + intel_extension_for_pytorch as ipex;print(ipex.__version__)' && + + conda run -n tensorflow-cpu python -c 'import tensorflow as tf;print(tf.__version__)' + " inference-optimization: @@ -75,13 +79,14 @@ services: args: COMPOSE_PROJECT_NAME: ${COMPOSE_PROJECT_NAME:-preset} context: . + target: inference-optimization labels: docs: inference_optimization org.opencontainers.image.title: "Intel® AI Tools Selector Preset Containers - Inference Optimization" org.opencontainers.base.name: "intel/deep-learning" org.opencontainers.image.name: "intel/inference-optimization" - org.opencontainers.image.version: 2024.1.0-py${PYTHON_VERSION:-3.10} - dependency.python: ${PYTHON_VERSION:-3.10} + org.opencontainers.image.version: 2024.2.0-py${PYTHON_VERSION:-3.9} + dependency.python: ${PYTHON_VERSION:-3.9} dependency.python.pip: requirements.txt dependency.apt.apt-utils: true dependency.apt.build-essential: true @@ -98,11 +103,11 @@ services: dependency.apt.gzip: true dependency.apt.hwinfo: true dependency.apt.intel-igc-cm: true - dependency.apt.intel-level-zero-gpu: '1.3.27642.40-803~22.04' + dependency.apt.intel-level-zero-gpu: true dependency.apt.intel-media-va-driver-non-free: true - dependency.apt.intel-opencl-icd: '23.43.27642.40-803~22.04' - dependency.apt.level-zero: '1.14.0-744~22.04' - dependency.apt.level-zero-dev: '1.14.0-744~22.04' + dependency.apt.intel-opencl-icd: true + dependency.apt.level-zero: true + dependency.apt.level-zero-dev: true dependency.apt.libegl1-mesa: true dependency.apt.libegl1-mesa-dev: true dependency.apt.libegl-mesa0: true @@ -120,7 +125,6 @@ services: dependency.apt.libmfxgen1: true dependency.apt.libopenmpi-dev: true dependency.apt.libvpl2: true - dependency.apt.libxatracker2: true dependency.apt.make: true dependency.apt.mesa-va-drivers: true dependency.apt.mesa-vdpau-drivers: true @@ -138,68 +142,72 @@ services: dependency.apt.vainfo: true dependency.apt.wget: true dependency.apt.xz-utils: true - dependency.conda.jupyterlab: '>=4.1.8' - dependency.conda.aiohttp: '>=3.9.0' - dependency.conda.cryptography: '>=42.0.4' - dependency.conda.dataset_librarian: '>=1.0.4' - dependency.conda.deepspeed: '>=0.14.0' - dependency.conda.dpcpp_impl_linux-64: '>=2024.1.' - dependency.conda.dpcpp-cpp-rt: '>=2024.1.' - dependency.conda.dpnp: '>=0.14.0' - dependency.conda.idna: '>=3.7' - dependency.conda.impi-devel: '>=2021.12' - dependency.conda.intel-extension-for-pytorch_cpu: '>=2.2.0=*cpu*' - dependency.conda.intel-extension-for-pytorch_gpu: '>=2.1.20=*xpu*' - dependency.conda.intel-extension-for-tensorflow_cpu: '>=2.15=*cpu*' - dependency.conda.intel-extension-for-tensorflow_gpu: '>=2.15=*xpu*' - dependency.conda.intel-openmp: '>=2024.1.0' - dependency.conda.intel-optimization-for-horovod: '>=0.28.1.4' - dependency.conda.ipykernel: '>=6.29.3' - dependency.conda.ipython: '>=8.18.1' - dependency.conda.jinja2: '>=3.1.3' - dependency.conda.jupyterhub: '>=4.1.5' - dependency.conda.jupyter-server-proxy: '>=4.1.2' - dependency.conda.kernda: '>=0.3.0' - dependency.conda.mako: '>=1.2.2' - dependency.conda.matplotlib-base: '>=3.4.3' - dependency.conda.mkl-dpcpp: '>=2024.1.0' - dependency.conda.neural-compressor: '>=2.4.1' - dependency.conda.nodejs: '>=20.12.2' - dependency.conda.notebook: '>=7.1.3' - dependency.conda.numpy: '>=1.26.4' - dependency.conda.oauthlib: '>=3.2.2' - dependency.conda.oneccl_bind_pt_cpu: '>=2.2.0=*cpu*' - dependency.conda.oneccl_bind_pt_gpu: '>=2.1.200=*xpu*' + dependency.conda.accelerate: '==0.32.1' + dependency.conda.colorama: '==0.4.6' + dependency.conda.conda: '==24.5.0' + dependency.conda.dpnp: '=0.15.0' + dependency.conda.intel-extension-for-pytorch_cpu: '=2.3.100' + dependency.conda.intel-extension-for-pytorch_xpu: '=2.1.40' + dependency.conda.intel-extension-for-tensorflow_cpu: '=2.15.0=*cpu*' + dependency.conda.intel-extension-for-tensorflow_xpu: '=2.15.0.1=*xpu*' + dependency.conda.intel-optimization-for-horovod: '=0.28.1.5' + dependency.conda.ipykernel: '==6.29.5' + dependency.conda.jupyterhub: '==5.1.0' + dependency.conda.jupyter-server-proxy: '==4.3.0' + dependency.conda.kernda: '==0.3.0' + dependency.conda.ld_impl_linux-64: '==2.40' + dependency.conda.mamba: '==1.5.8' + dependency.conda.matplotlib-base: '>=3.8.4' + dependency.conda.mpi: '==1.0' + dependency.conda.mpich: '==4.2.2' + dependency.conda.networkx: '==3.3' + dependency.conda.neural-compressor: '=2.5.1' + dependency.conda.notebook: '==7.2.1' + dependency.conda.oneccl_bind_pt_cpu: '=2.3.0' + dependency.conda.oneccl_bind_pt_xpu: '=2.1.400' dependency.conda.onnx: '>=1.14.1' - dependency.conda.packaging: '>=23.2' - dependency.conda.pandas: '>=2.2.2' - dependency.conda.pillow: '>=10.2.0' - dependency.conda.protobuf: '>=4.24' - dependency.conda.pyjwt: '>=2.4.0' - dependency.conda.python: "=${PYTHON_VERSION:-3.10}" - dependency.conda.pytorch_cpu: '>=2.2.0=*cpu*' - dependency.conda.pytorch_gpu: '>=2.1.0=*xpu*' - dependency.conda.setuptools: '>=69.1.0' - dependency.conda.tensorboardx: '>=2.6.2.2' - dependency.conda.tensorflow: '>=2.15' - dependency.conda.torchaudio_cpu: '>=2.2.0=*cpu*' - dependency.conda.torchaudio_gpu: '>=2.1.0=*xpu*' - dependency.conda.torchvision_cpu: '>=0.17=*cpu*' - dependency.conda.torchvision_gpu: '>=0.16.0=*xpu*' - dependency.conda.tornado: '>=6.3.3' - dependency.conda.tqdm: '>=4.66.2' - dependency.conda.werkzeug: '>=2.2.3' - target: inference-optimization + dependency.conda.onnxruntime: '==1.18.1' + dependency.conda.py-cpuinfo: '==9.0.0' + dependency.conda.python: '==3.10.14' + dependency.conda.pytorch_cpu: '=2.3.1' + dependency.conda.pytorch_xpu: '=2.1.0' + dependency.conda.scikit-learn: '>=1.5.0' + dependency.conda.tensorboardx: '==2.6.2.2' + dependency.conda.tensorflow: '=2.15.1' + dependency.conda.tensorflow-hub: '==0.16.1' + dependency.conda.tf2onnx: '==1.16.1' + dependency.conda.torchaudio_cpu: '=2.3.1' + dependency.conda.torchaudio_xpu: '=2.1.0' + dependency.conda.torchvision_cpu: '=0.18.1' + dependency.conda.torchvision_xpu: '=0.16.0' + dependency.conda.tqdm: '==4.66.4' depends_on: - dl-base extends: dl-base - image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-2024.1.0-py${PYTHON_VERSION:-3.10} - command: | - bash -c "conda run -n pytorch-cpu python -c 'import intel_extension_for_pytorch as ipex;print(ipex.__version__);' && \ - conda run -n pytorch-cpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ - conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available());' && \ - conda run -n pytorch-gpu python -c 'import neural_compressor;print(\"Neural Compressor Version:\", neural_compressor.__version__)' && \ - conda run -n tensorflow-cpu python -c 'import intel_extension_for_tensorflow as itex;print(itex.__version__);' && \ - conda run -n tensorflow-cpu python -c 'import neural_compressor, tf2onnx; print(\"\\nNeural Compressor Version:\", neural_compressor.__version__, \"\\\nTensorFlow2ONNX Version:\", tf2onnx.__version__)' && \ - conda run -n tensorflow-gpu python -c 'from tensorflow.python.client import device_lib; print(device_lib.list_local_devices())' && \ - conda run -n tensorflow-gpu python -c 'import neural_compressor, tf2onnx; print(\"\\nNeural Compressor Version:\", neural_compressor.__version__, \"\\\nTensorFlow2ONNX Version:\", tf2onnx.__version__)'" + image: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + command: > + bash -c "conda run -n pytorch-cpu python -c 'import intel_extension_for_pytorch + as ipex;print(ipex.__version__)' && + + conda run -n pytorch-cpu python -c 'import neural_compressor;print(\"Neural + Compressor Version:\", neural_compressor.__version__)' && + + conda run -n pytorch-gpu python -c 'import torch;print(torch.device(\"xpu\"));import + intel_extension_for_pytorch as ipex;print(ipex.xpu.is_available())' && + + conda run -n pytorch-gpu python -c 'import neural_compressor;print(\"Neural + Compressor Version:\", neural_compressor.__version__)' && + + conda run -n tensorflow-cpu python -c 'import intel_extension_for_tensorflow + as itex;print(itex.__version__)' && + + conda run -n tensorflow-cpu python -c 'import neural_compressor, tf2onnx;print(\"\nNeural + Compressor Version:\", neural_compressor.__version__)';print(\"\nTensorFlow2ONNX + Version:\", tf2onnx.__version__)' && + + conda run -n tensorflow-gpu python -c 'from tensorflow.python.client import + device_lib;print(device_lib.list_local_devices())' && + + conda run -n tensorflow-gpu python -c 'import neural_compressor, tf2onnx;print(\"\\nNeural + Compressor Version:\", neural_compressor.__version__)';print(\"\\TensorFlow2ONNX + Version:\", tf2onnx.__version__)' " diff --git a/preset/inference-optimization/requirements.txt b/preset/inference-optimization/requirements.txt index 15dad774..8f0091ac 100644 --- a/preset/inference-optimization/requirements.txt +++ b/preset/inference-optimization/requirements.txt @@ -1,16 +1,5 @@ -accelerate>=0.30.0 -cloud-data-connector>=1.0.3 -cryptography>=42.0.7 -dataset-librarian>=1.0.4 -datasets>=2.19.1 -evaluate>=0.4.2 +dataset-librarian==1.0.4 +evaluate==0.4.2 git+https://github.com/huggingface/optimum-intel.git -ninja>=1.11.1.1 -onnxruntime>=1.17.3 -py-cpuinfo>=9.0.0 -python-dotenv>=1.0.1 -requests>=2.31.0 -tensorflow-hub>=0.16.1 -tf2onnx>==1.16.1 -tqdm>=4.66.2 -transformers>=4.40.2 +tf2onnx==1.16.1 +onnxruntime==1.18.1 diff --git a/preset/inference-optimization/tests.yaml b/preset/inference-optimization/tests.yaml index 98731067..5d608342 100644 --- a/preset/inference-optimization/tests.yaml +++ b/preset/inference-optimization/tests.yaml @@ -12,78 +12,87 @@ # See the License for the specific language governing permissions and # limitations under the License. +--- inference-optimization-inc-ipex-quantization-notebook-${PYTHON_VERSION:-3.9}-cpu: cmd: papermill --log-output jupyter/inc-ipex-quantization/quantize_with_inc.ipynb result.ipynb -k pytorch-cpu --cwd jupyter/inc-ipex-quantization - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True inference-optimization-inc-ipex-quantization-notebook-${PYTHON_VERSION:-3.9}-gpu: cmd: papermill --log-output jupyter/inc-ipex-quantization/quantize_with_inc.ipynb result.ipynb -k pytorch-gpu --cwd jupyter/inc-ipex-quantization - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True + device: ["/dev/dri"] inference-optimization-inc-itex-notebook-${PYTHON_VERSION:-3.9}-cpu: cmd: papermill --log-output jupyter/inc-itex/inc_sample_tensorflow.ipynb result.ipynb -k tensorflow-cpu --cwd jupyter/inc-itex - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True # Status: Commented due to out of resources error # inference-optimization-inc-itex-notebook-${PYTHON_VERSION:-3.9}-gpu: # cmd: papermill --log-output jupyter/inc-itex/inc_sample_tensorflow.ipynb result.ipynb -k tensorflow-gpu --cwd jupyter/inc-itex -# img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} +# img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} # notebook: True inference-optimization-inc-tensorflow-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu sample-tests/neural_compressor/tensorflow/run.sh cpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-inc-tensorflow-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu sample-tests/neural_compressor/tensorflow/run.sh gpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] inference-optimization-inc-torch-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n pytorch-cpu sample-tests/neural_compressor/torch/run.sh cpu - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-ipex-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n pytorch-cpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device cpu --ipex - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-ipex-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n pytorch-gpu python -W ignore sample-tests/intel_extension_for_pytorch/test_ipex.py --device xpu --ipex - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] inference-optimization-itex-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-itex-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu python -W ignore sample-tests/intel_extension_for_tensorflow/test_itex.py - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] inference-optimization-itex-inference-notebook-${PYTHON_VERSION:-3.9}-cpu: cmd: papermill --log-output jupyter/itex-inference/tutorial_optimize_TensorFlow_pretrained_model.ipynb result.ipynb -k tensorflow-cpu --cwd jupyter/itex-inference - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} notebook: True # Need update from TensorFlow v1 to V2 # inference-optimization-itex-inference-notebook-${PYTHON_VERSION:-3.9}-gpu: # cmd: papermill --log-output jupyter/itex-inference/tutorial_optimize_TensorFlow_pretrained_model.ipynb result.ipynb -k tensorflow-gpu --cwd jupyter/itex-inference -# img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} +# img: ${REGISTRY}/aiops/mlops-ci:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} # notebook: True inference-optimization-onnx-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu sample-tests/onnx/run.sh - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-onnx-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu sample-tests/onnx/run.sh - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] inference-optimization-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n tensorflow-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-tensorflow-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n tensorflow-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"] inference-optimization-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-cpu: cmd: conda run -n pytorch-cpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} inference-optimization-torch-dataset-librarian-${PYTHON_VERSION:-3.9}-gpu: cmd: conda run -n pytorch-gpu bash -c 'yes | python -m dataset_librarian.dataset -n msmarco --download -d ~/msmarco' - img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.1.0}-py${PYTHON_VERSION:-3.9} + img: ${REGISTRY}/${REPO}:b-${GITHUB_RUN_NUMBER:-0}-inference-optimization-${RELEASE:-2024.2.0}-py${PYTHON_VERSION:-3.9} + device: ["/dev/dri"]