Skip to content

Update llama.cpp submodule to latest release b3078 #14

Update llama.cpp submodule to latest release b3078

Update llama.cpp submodule to latest release b3078 #14

name: CI Quality Gate Submodule
on:
pull_request:
types: [opened, synchronize, reopened]
workflow_dispatch:
env:
LLM_MODEL_URL: https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf
EMBEDDING_MODEL_URL: https://catalog.jan.ai/dist/models/embeds/nomic-embed-text-v1.5.f16.gguf
jobs:
build-and-test:
if: startsWith(github.head_ref, 'update-submodule')
runs-on: ${{ matrix.runs-on }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- os: "linux"
name: "amd64-avx2"
runs-on: "ubuntu-18-04"
cmake-flags: "-DLLAMA_NATIVE=OFF"
run-e2e: true
vulkan: false
- os: "linux"
name: "amd64-avx"
runs-on: "ubuntu-18-04"
cmake-flags: "-DLLAMA_AVX2=OFF -DLLAMA_NATIVE=OFF"
run-e2e: false
vulkan: false
- os: "linux"
name: "amd64-avx512"
runs-on: "ubuntu-18-04"
cmake-flags: "-DLLAMA_AVX512=ON -DLLAMA_NATIVE=OFF"
run-e2e: false
vulkan: false
- os: "linux"
name: "amd64-vulkan"
runs-on: "ubuntu-18-04-cuda-11-7"
cmake-flags: "-DLLAMA_VULKAN=ON -DLLAMA_NATIVE=OFF"
run-e2e: false
vulkan: true
- os: "linux"
name: "amd64-cuda-11-7"
runs-on: "ubuntu-18-04-cuda-11-7"
cmake-flags: "-DLLAMA_NATIVE=OFF -DLLAMA_CUDA=ON"
run-e2e: false
vulkan: false
- os: "linux"
name: "amd64-cuda-12-0"
runs-on: "ubuntu-18-04-cuda-12-0"
cmake-flags: "-DLLAMA_NATIVE=OFF -DLLAMA_CUDA=ON"
run-e2e: false
vulkan: false
- os: "mac"
name: "amd64"
runs-on: "macos-13"
cmake-flags: "-DLLAMA_METAL=OFF"
run-e2e: true
vulkan: false
- os: "mac"
name: "arm64"
runs-on: "mac-silicon"
cmake-flags: "-DLLAMA_METAL_EMBED_LIBRARY=ON"
run-e2e: true
vulkan: false
- os: "windows"
name: "amd64-avx2"
runs-on: "windows-latest"
cmake-flags: "-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: true
vulkan: false
- os: "windows"
name: "amd64-avx"
runs-on: "windows-latest"
cmake-flags: "-DLLAMA_AVX2=OFF -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: true
vulkan: false
- os: "windows"
name: "amd64-avx512"
runs-on: "windows-cuda-11-7"
cmake-flags: "-DLLAMA_AVX512=ON -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: true
vulkan: false
- os: "windows"
name: "amd64-vulkan"
runs-on: "windows-latest"
cmake-flags: "-DLLAMA_VULKAN=ON -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: true
- os: "windows"
name: "amd64-avx2-cuda-12-0"
runs-on: "windows-cuda-12-0"
cmake-flags: "-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
- os: "windows"
name: "amd64-avx-cuda-12-0"
runs-on: "windows-cuda-12-0"
cmake-flags: "-DLLAMA_AVX2=OFF -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
- os: "windows"
name: "amd64-avx512-cuda-12-0"
runs-on: "windows-cuda-12-0"
cmake-flags: "-DLLAMA_AVX512=ON -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
- os: "windows"
name: "amd64-avx2-cuda-11-7"
runs-on: "windows-cuda-11-7"
cmake-flags: "-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
- os: "windows"
name: "amd64-avx-cuda-11-7"
runs-on: "windows-cuda-11-7"
cmake-flags: "-DLLAMA_AVX2=OFF -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
- os: "windows"
name: "amd64-avx512-cuda-11-7"
runs-on: "windows-cuda-11-7"
cmake-flags: "-DLLAMA_AVX512=ON -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=RELEASE"
run-e2e: false
vulkan: false
steps:
- name: Clone
id: checkout
uses: actions/checkout@v3
with:
submodules: recursive
- name: Prepare Vulkan SDK
if: ${{ matrix.vulkan }}
uses: humbletim/[email protected]
with:
vulkan-query-version: 1.3.275.0
vulkan-components: Vulkan-Headers, Vulkan-Loader
vulkan-use-cache: true
- name: Install choco on Windows
if: runner.os == 'Windows'
run: |
choco install make -y
- name: Build
run: |
make build-example-server CMAKE_EXTRA_FLAGS="${{ matrix.cmake-flags }}"
- name: Pre Package
run: |
make pre-package
- name: Package
run: |
make package
- name: Install Python
if: ${{matrix.run-e2e}}
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Run e2e submodule testing
if: ${{ matrix.run-e2e }}
run: |
make run-e2e-submodule-test LLM_MODEL_URL=${{ env.LLM_MODEL_URL }} EMBEDDING_MODEL_URL=${{ env.EMBEDDING_MODEL_URL }}
- name: Upload Artifact
uses: actions/upload-artifact@v2
with:
name: cortex.llamacpp-${{ matrix.os }}-${{ matrix.name }}
path: ./cortex.llamacpp