Skip to content

[SPIRV] Switch to new pass generation tablegen definitions. (#18214) #2800

[SPIRV] Switch to new pass generation tablegen definitions. (#18214)

[SPIRV] Switch to new pass generation tablegen definitions. (#18214) #2800

Workflow file for this run

# Copyright 2024 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: Benchmark
on:
workflow_dispatch:
pull_request:
push:
branches:
- main
concurrency:
# A PR number if a pull request and otherwise the commit hash. This cancels
# queued and in-progress runs for the same PR (presubmit) or commit
# (postsubmit). The workflow name is prepended to avoid conflicts between
# different workflows.
group: ${{ github.workflow }}-${{ github.event.number || github.sha }}
cancel-in-progress: true
env:
# This needs to be in env instead of the outputs of setup because it contains
# the run attempt and we want that to be the current attempt, not whatever
# attempt the setup step last ran in.
GCS_DIR: gs://iree-github-actions-${{ github.event_name == 'pull_request' && 'presubmit' || 'postsubmit' }}-artifacts/${{ github.run_id }}/${{ github.run_attempt }}
jobs:
setup:
uses: ./.github/workflows/setup.yml
build_for_benchmarks:
needs: setup
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_for_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_all.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
write-caches: ${{ needs.setup.outputs.write-caches }}
run-tests: false
build_benchmark_tools:
needs: [setup, build_for_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_benchmark_tools') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_benchmark_tools.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
install-dir: ${{ needs.build_for_benchmarks.outputs.install-dir }}
install-dir-archive: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
install-dir-gcs-artifact: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
build_e2e_test_artifacts:
needs: [setup, build_for_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'build_e2e_test_artifacts') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/build_e2e_test_artifacts.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
install-dir: ${{ needs.build_for_benchmarks.outputs.install-dir }}
install-dir-archive: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
install-dir-gcs-artifact: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
benchmark-presets: ${{ needs.setup.outputs.benchmark-presets }}
shard-count: "c2-standard-60=2,default=1"
test_benchmark_suites:
needs: [setup, build_for_benchmarks, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'test_benchmark_suites') && needs.setup.outputs.benchmark-presets != ''
strategy:
matrix:
target:
- platform: linux
arch: riscv_64
docker_image: "gcr.io/iree-oss/riscv@sha256:62e87bad3405d691ddba6f9be0ef44eeb60461a467c8d86f0842c81a1f97da79"
run_scripts: "./build_tools/cmake/build_riscv.sh && ./build_tools/cmake/test_riscv.sh"
- platform: linux
arch: riscv_32
docker_image: "gcr.io/iree-oss/riscv@sha256:62e87bad3405d691ddba6f9be0ef44eeb60461a467c8d86f0842c81a1f97da79"
run_scripts: "./build_tools/cmake/build_riscv.sh && ./build_tools/cmake/test_riscv.sh"
- platform: linux
arch: x86_64
docker_image: "gcr.io/iree-oss/base@sha256:dc314b4fe30fc1315742512891357bffed4d1b62ffcb46258b1e0761c737b446"
run_scripts: "./build_tools/cmake/test_benchmark_suites_on_linux.sh"
# Requires Intel CascadeLake CPU.
host_machine: c2s601t
runs-on:
- self-hosted # must come first
- runner-group=${{ needs.setup.outputs.runner-group }}
- environment=${{ needs.setup.outputs.runner-env }}
- ${{ matrix.target.host_machine || 'cpu' }} # Default to generic x86_64 VM.
env:
PLATFORM: ${{ matrix.target.platform }}
ARCH: ${{ matrix.target.arch }}
DOCKER_IMAGE: ${{ matrix.target.docker_image }}
RUN_SCRIPTS: ${{ matrix.target.run_scripts }}
INSTALL_DIR: ${{ needs.build_for_benchmarks.outputs.install-dir }}
INSTALL_DIR_ARCHIVE: ${{ needs.build_for_benchmarks.outputs.install-dir-archive }}
INSTALL_DIR_GCS_ARTIFACT: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact }}
TARGET_BUILD_DIR: build-${{ matrix.target.platform }}-${{ matrix.target.arch }}
E2E_TEST_ARTIFACTS_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
steps:
- name: "Checking out repository"
uses: actions/[email protected]
- name: "Checking out runtime submodules"
run: ./build_tools/scripts/git/update_runtime_submodules.sh
- name: "Downloading install dir archive"
run: gcloud storage cp "${INSTALL_DIR_GCS_ARTIFACT}" "${INSTALL_DIR_ARCHIVE}"
- name: "Extracting install directory"
run: tar -xf "${INSTALL_DIR_ARCHIVE}"
# TODO(#11136): Only download the needed artifacts instead of everything.
- name: "Downloading e2e test artifacts"
run: |
mkdir -p ${E2E_TEST_ARTIFACTS_DIR}
gcloud storage cp -r "${E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR}/*" "${E2E_TEST_ARTIFACTS_DIR}"
- name: "Build iree-run-module and test benchmark suite modules"
run: |
./build_tools/github_actions/docker_run.sh \
--env "IREE_TARGET_PLATFORM=${PLATFORM}" \
--env "IREE_TARGET_ARCH=${ARCH}" \
--env "IREE_TARGET_BUILD_DIR=${TARGET_BUILD_DIR}" \
--env "BUILD_PRESET=benchmark-suite-test" \
--env "IREE_HOST_BIN_DIR=${INSTALL_DIR}/bin" \
--env "E2E_TEST_ARTIFACTS_DIR=${E2E_TEST_ARTIFACTS_DIR}" \
"${DOCKER_IMAGE}" \
bash -euo pipefail -c \
"${RUN_SCRIPTS}"
compilation_benchmarks:
needs: [setup, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'compilation_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/benchmark_compilation.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
e2e-test-artifacts-build-log: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log }}
e2e-test-artifacts-build-log-gcs-artifact: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-build-log-gcs-artifact }}
execution_benchmarks:
needs: [setup, build_benchmark_tools, build_e2e_test_artifacts]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'execution_benchmarks') && needs.setup.outputs.benchmark-presets != ''
uses: ./.github/workflows/benchmark_execution.yml
with:
runner-group: ${{ needs.setup.outputs.runner-group }}
runner-env: ${{ needs.setup.outputs.runner-env }}
e2e-test-artifacts-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-dir }}
e2e-test-artifacts-gcs-artifact-dir: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir }}
benchmark-tools-gcs-artifact-dir: ${{ needs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir }}
process_benchmark_results:
needs: [setup, compilation_benchmarks, execution_benchmarks]
if: contains(fromJson(needs.setup.outputs.enabled-jobs), 'process_benchmark_results') && needs.setup.outputs.benchmark-presets != ''
runs-on:
- self-hosted # must come first
- runner-group=${{ needs.setup.outputs.runner-group }}
- environment=${{ needs.setup.outputs.runner-env }}
- cpu
- os-family=Linux
env:
COMPILE_STATS_RESULTS: ${{ needs.compilation_benchmarks.outputs.compile-stats-results }}
COMPILE_STATS_RESULTS_GCS_ARTIFACT: ${{ needs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact }}
# Empty if no execution benchmark runs.
EXECUTION_BENCHMARK_RESULTS_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-dir }}
# Empty if no execution benchmark runs.
EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-gcs-artifact-dir }}
steps:
- name: "Checking out repository"
uses: actions/[email protected]
with:
# We need the full history (and main branch) to generate the report.
fetch-depth: 0
- name: Downloading compilation benchmark results
run: |
gcloud storage cp \
"${COMPILE_STATS_RESULTS_GCS_ARTIFACT}" \
"${COMPILE_STATS_RESULTS}"
- name: Downloading execution benchmark results
id: download-execution-results
# Skip the download if there is no execution benchmark results (e.g. no
# benchmark matches the preset/filter). In such case, no benchmark job
# is run in benchmark_execution.yml and the output variables are empty.
if: env.EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR != ''
run: |
gcloud storage cp -r \
"${EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR}/benchmark-results-*.json" \
"${EXECUTION_BENCHMARK_RESULTS_DIR}"
echo "execution-benchmark-results-pattern=${EXECUTION_BENCHMARK_RESULTS_DIR}/benchmark-results-*.json" >> "${GITHUB_OUTPUT}"
- name: Generating comment
if: fromJson(needs.setup.outputs.is-pr)
id: generate-comment
env:
# Wildcard pattern to match all execution benchmark results. Empty if
# execution_benchmarks is skipped, which results in no match.
EXECUTION_BENCHMARK_RESULTS_PATTERN: ${{ steps.download-execution-results.outputs.execution-benchmark-results-pattern }}
IREE_BUILD_URL: https://github.com/iree-org/iree/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}
PR_NUMBER: ${{ github.event.pull_request.number }}
BENCHMARK_COMMENT_ARTIFACT: benchmark-comment.json
run: |
build_tools/github_actions/docker_run.sh \
gcr.io/iree-oss/benchmark-report@sha256:7498c6f32f63f13faf085463cc38656d4297519c824e63e1c99c8c258147f6ff \
./build_tools/benchmarks/generate_benchmark_comment.py \
--verbose \
--pr_number="${PR_NUMBER}" \
--pr_committish="${GITHUB_SHA}" \
--pr_base_branch="origin/${GITHUB_BASE_REF}" \
--comment_type="benchmark-summary" \
--build_url="${IREE_BUILD_URL}" \
--benchmark_files="${EXECUTION_BENCHMARK_RESULTS_PATTERN}" \
--compile_stats_files="${COMPILE_STATS_RESULTS}" \
--output="${BENCHMARK_COMMENT_ARTIFACT}"
echo "benchmark-comment-artifact=${BENCHMARK_COMMENT_ARTIFACT}" >> "${GITHUB_OUTPUT}"
- name: Uploading comment artifact
# Due to security reasons, instead of posting the comment to PR, we only
# upload the comment data in presubmit workflow and trigger the posting
# workflow on the main branch. See post_benchmark_comment.yaml
if: fromJson(needs.setup.outputs.is-pr)
env:
BENCHMARK_COMMENT_ARTIFACT: ${{ steps.generate-comment.outputs.benchmark-comment-artifact }}
BENCHMARK_COMMENT_GCS_ARTIFACT: ${{ env.GCS_DIR }}/${{ steps.generate-comment.outputs.benchmark-comment-artifact }}
run: |
gcloud storage cp \
"${BENCHMARK_COMMENT_ARTIFACT}" \
"${BENCHMARK_COMMENT_GCS_ARTIFACT}"
- name: Uploading results to dashboard
if: github.ref_name == 'main'
env:
EXECUTION_BENCHMARK_RESULTS_PATTERN: ${{ steps.download-execution-results.outputs.execution-benchmark-results-pattern }}
IREE_DASHBOARD_API_TOKEN: ${{ secrets.IREE_DASHBOARD_API_TOKEN }}
run: |
build_tools/github_actions/docker_run.sh \
--env "IREE_DASHBOARD_API_TOKEN=${IREE_DASHBOARD_API_TOKEN}" \
gcr.io/iree-oss/benchmark-report@sha256:7498c6f32f63f13faf085463cc38656d4297519c824e63e1c99c8c258147f6ff \
./build_tools/benchmarks/upload_benchmarks_to_dashboard.py \
--verbose \
--benchmark_files="${EXECUTION_BENCHMARK_RESULTS_PATTERN}" \
--compile_stats_files="${COMPILE_STATS_RESULTS}"
##############################################################################
# Depends on all the other jobs to provide a single anchor that indicates the
# final status. Status reporting will become more sophisticated in the future
# and we can hopefully avoid the need to explicitly list every single job...
benchmark_summary:
# Even if you have an explicit if condition, you still need to override
# GitHub's default behavior of not running if any dependencies failed.
if: always()
runs-on: ubuntu-20.04
needs:
- setup
- build_for_benchmarks
# Benchmark pipeline
- build_benchmark_tools
- build_e2e_test_artifacts
- test_benchmark_suites
- compilation_benchmarks
- execution_benchmarks
- process_benchmark_results
steps:
- name: "Checking out repository"
uses: actions/[email protected]
- name: Getting failed jobs
id: failed_jobs
run: |
echo '${{ toJson(needs) }}'
FAILED_JOBS="$(echo '${{ toJson(needs) }}' \
| jq --raw-output \
'map_values(select(.result!="success" and .result!="skipped")) | keys | join(",")' \
)"
echo "failed-jobs=${FAILED_JOBS}" >> $GITHUB_OUTPUT
if [[ "${FAILED_JOBS}" != "" ]]; then
echo "The following jobs failed: ${FAILED_JOBS}"
exit 1
fi
- name: Show useful artifact links
if: always()
env:
# If the job of an artifact is skipped or failed, we show "NOT_PRESENT".
INSTALL_DIR_GCS_ARTIFACT: ${{ needs.build_for_benchmarks.outputs.install-dir-gcs-artifact || 'NOT_PRESENT' }}
E2E_TEST_ARTIFACTS_GCS_ARTIFACT_DIR: ${{ needs.build_e2e_test_artifacts.outputs.e2e-test-artifacts-gcs-artifact-dir || 'NOT_PRESENT' }}
BENCHMARK_TOOLS_GCS_ARTIFACT_DIR: ${{ needs.build_benchmark_tools.outputs.benchmark-tools-gcs-artifact-dir || 'NOT_PRESENT' }}
EXECUTION_BENCHMARK_RESULTS_GCS_ARTIFACT_DIR: ${{ needs.execution_benchmarks.outputs.benchmark-results-gcs-artifact-dir || 'NOT_PRESENT' }}
COMPILATION_BENCHMARK_RESULTS_GCS_ARTIFACT: ${{ needs.compilation_benchmarks.outputs.compile-stats-results-gcs-artifact || 'NOT_PRESENT' }}
run: |
envsubst < ./.github/workflows/ARTIFACT_SUMMARY_TEMPLATE.md >> "${GITHUB_STEP_SUMMARY}"
- name: Posting to Discord
uses: sarisia/[email protected]
if: failure() && github.ref_name == 'main'
with:
webhook: ${{ secrets.DISCORD_WEBHOOK }}
description: "The following jobs failed: ${{ steps.failed_jobs.outputs.failed-jobs }}"
url: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/attempts/${{ github.run_attempt }}"