Skip to content

Commit

Permalink
[ci] Generate Jenkinsfile from a template (#10740)
Browse files Browse the repository at this point in the history
* [ci] Generate Jenkinsfile from a template

This uses `jinja2` to generate the Jenkinsfile. This is useful since it lets us both keep common functionality easy to define (i.e. iterate over all images and do something) while keeping the output easy to debug (you can look at the `Jenkinsfile` directly instead of trying to imagine what the Groovy interpreter will do). This will become more useful as we start to make CI more configurable, such as adding dynamic test sharding.

This mostly introduces the infrastructure and makes some token changes to demonstrate the generation process, but already its use is shown since the parameters was missing an entry for the `ci_hexagon` image.

* Address comments, fix CI with temporary workaround

Co-authored-by: driazati <[email protected]>
  • Loading branch information
driazati and driazati authored Mar 25, 2022
1 parent 079eb4e commit 1b654e9
Show file tree
Hide file tree
Showing 7 changed files with 1,113 additions and 29 deletions.
60 changes: 31 additions & 29 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,12 @@
// - Periodically cleanup the old versions on local workers
//

// Hashtag in the source to build current CI docker builds
//
//
import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// ============================= IMPORTANT NOTE =============================
// This file is generated by 'jenkins/generate.py'. Do not edit this file directly!
// Make edits to 'jenkins/Jenkinsfile.j2' and regenerate this with
// 'python3 jenkins/generate.py'

import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = 'tlcpack/ci-lint:v0.69'
ci_gpu = 'tlcpack/ci-gpu:v0.82'
Expand All @@ -60,13 +61,14 @@ ci_hexagon = 'tlcpack/ci-hexagon:v0.02'
// over default values above.
properties([
parameters([
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
string(name: 'ci_wasm_param', defaultValue: ''),
string(name: 'ci_arm_param', defaultValue: ''),
string(name: 'ci_cpu_param', defaultValue: ''),
string(name: 'ci_gpu_param', defaultValue: ''),
string(name: 'ci_hexagon_param', defaultValue: ''),
string(name: 'ci_i386_param', defaultValue: ''),
string(name: 'ci_lint_param', defaultValue: ''),
string(name: 'ci_qemu_param', defaultValue: ''),
string(name: 'ci_arm_param', defaultValue: '')
string(name: 'ci_wasm_param', defaultValue: ''),
])
])

Expand Down Expand Up @@ -198,7 +200,7 @@ stage('Prepare') {
stage('Sanity Check') {
timeout(time: max_time, unit: 'MINUTES') {
node('CPU') {
ws(per_exec_ws('tvm/sanity')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/sanity") {
init_git()
is_docs_only_build = sh (
returnStatus: true,
Expand Down Expand Up @@ -350,7 +352,7 @@ if (rebuild_docker_images) {
// stage('Sanity Check (re-run)') {
// timeout(time: max_time, unit: 'MINUTES') {
// node('CPU') {
// ws(per_exec_ws('tvm/sanity')) {
// ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/sanity") {
// init_git()
// sh (
// script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
Expand Down Expand Up @@ -445,7 +447,7 @@ stage('Build') {
parallel 'BUILD: GPU': {
if (!skip_ci) {
node('CPU') {
ws(per_exec_ws('tvm/build-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-gpu") {
init_git()
sh "${docker_run} --no-gpu ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build"
make("${ci_gpu} --no-gpu", 'build', '-j2')
Expand All @@ -461,7 +463,7 @@ stage('Build') {
'BUILD: CPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/build-cpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-cpu") {
init_git()
sh (
script: "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build",
Expand All @@ -484,7 +486,7 @@ stage('Build') {
'BUILD: WASM': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/build-wasm')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-wasm") {
init_git()
sh (
script: "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build",
Expand All @@ -508,7 +510,7 @@ stage('Build') {
'BUILD: i386': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/build-i386')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-i386") {
init_git()
sh (
script: "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build",
Expand All @@ -525,7 +527,7 @@ stage('Build') {
'BUILD: arm': {
if (!skip_ci && is_docs_only_build != 1) {
node('ARM') {
ws(per_exec_ws('tvm/build-arm')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-arm") {
init_git()
sh (
script: "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build",
Expand All @@ -542,7 +544,7 @@ stage('Build') {
'BUILD: QEMU': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/build-qemu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-qemu") {
init_git()
sh (
script: "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh build",
Expand Down Expand Up @@ -574,7 +576,7 @@ stage('Build') {
'BUILD: Hexagon': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/build-hexagon')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/build-hexagon") {
init_git()
sh (
script: "${docker_run} ${ci_hexagon} ./tests/scripts/task_config_build_hexagon.sh build",
Expand Down Expand Up @@ -613,7 +615,7 @@ stage('Test') {
parallel 'unittest: GPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('TensorCore') {
ws(per_exec_ws('tvm/ut-python-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-gpu") {
try {
init_git()
unpack_lib('gpu2', tvm_multilib)
Expand Down Expand Up @@ -648,7 +650,7 @@ stage('Test') {
'integration: CPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/ut-python-cpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") {
try {
init_git()
unpack_lib('cpu', tvm_multilib_tsim)
Expand All @@ -671,7 +673,7 @@ stage('Test') {
'unittest: CPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/ut-python-cpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-cpu") {
try {
init_git()
unpack_lib('cpu', tvm_multilib_tsim)
Expand All @@ -697,7 +699,7 @@ stage('Test') {
'python3: i386': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/ut-python-i386')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-i386") {
try {
init_git()
unpack_lib('i386', tvm_multilib)
Expand All @@ -723,7 +725,7 @@ stage('Test') {
'python3: aarch64': {
if (!skip_ci && is_docs_only_build != 1) {
node('ARM') {
ws(per_exec_ws('tvm/ut-python-arm')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/ut-python-arm") {
try {
init_git()
unpack_lib('arm', tvm_multilib)
Expand Down Expand Up @@ -756,7 +758,7 @@ stage('Test') {
'topi: GPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('GPU') {
ws(per_exec_ws('tvm/topi-python-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/topi-python-gpu") {
try {
init_git()
unpack_lib('gpu', tvm_multilib)
Expand All @@ -779,7 +781,7 @@ stage('Test') {
'frontend: GPU 1': {
if (!skip_ci && is_docs_only_build != 1) {
node('GPU') {
ws(per_exec_ws('tvm/frontend-python-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") {
try {
init_git()
unpack_lib('gpu', tvm_multilib)
Expand All @@ -802,7 +804,7 @@ stage('Test') {
'frontend: GPU 2': {
if (!skip_ci && is_docs_only_build != 1) {
node('GPU') {
ws(per_exec_ws('tvm/frontend-python-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-gpu") {
try {
init_git()
unpack_lib('gpu', tvm_multilib)
Expand All @@ -825,7 +827,7 @@ stage('Test') {
'frontend: CPU': {
if (!skip_ci && is_docs_only_build != 1) {
node('CPU') {
ws(per_exec_ws('tvm/frontend-python-cpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/frontend-python-cpu") {
try {
init_git()
unpack_lib('cpu', tvm_multilib)
Expand All @@ -848,7 +850,7 @@ stage('Test') {
'docs: GPU': {
if (!skip_ci) {
node('TensorCore') {
ws(per_exec_ws('tvm/docs-python-gpu')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/docs-python-gpu") {
init_git()
unpack_lib('gpu', tvm_multilib)
timeout(time: max_time, unit: 'MINUTES') {
Expand Down Expand Up @@ -927,7 +929,7 @@ def deploy_docs() {
stage('Deploy') {
if (env.BRANCH_NAME == 'main' && env.DOCS_DEPLOY_ENABLED == 'yes') {
node('CPU') {
ws(per_exec_ws('tvm/deploy-docs')) {
ws("workspace/exec_${env.EXECUTOR_NUMBER}/tvm/deploy-docs") {
unpack_lib('docs', 'docs.tgz')
deploy_docs()
}
Expand Down
Loading

0 comments on commit 1b654e9

Please sign in to comment.