diff --git a/Jenkinsfile b/Jenkinsfile index b98c79a839..c7442c1109 100755 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -83,10 +83,8 @@ tvm_multilib_tsim = 'build/libvta_tsim.so, ' + // command to start a docker container docker_run = 'docker/bash.sh' -docker_build = 'docker/build.sh' // timeout in minutes max_time = 240 -rebuild_docker_images = false def per_exec_ws(folder) { return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder @@ -134,18 +132,6 @@ def cancel_previous_build() { } def should_skip_ci(pr_number) { - if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) { - // never skip CI on build sourced from a branch - return false - } - glob_skip_ci_code = sh ( - returnStatus: true, - script: "./tests/scripts/git_skip_ci_globs.py", - label: 'Check if CI should be skipped due to changed files', - ) - if (glob_skip_ci_code == 0) { - return true - } withCredentials([string( credentialsId: 'tvm-bot-jenkins-reader', variable: 'TOKEN', @@ -157,18 +143,10 @@ def should_skip_ci(pr_number) { script: "./tests/scripts/git_skip_ci.py --pr '${pr_number}'", label: 'Check if CI should be skipped', ) - } + } return git_skip_ci_code == 0 } -// skips builds from branch indexing; sourced from https://www.jvt.me/posts/2020/02/23/jenkins-multibranch-skip-branch-index/ -// execute this before anything else, including requesting any time on an agent -if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) { - print "INFO: Build skipped due to trigger being Branch Indexing" - currentBuild.result = 'ABORTED' // optional, gives a better hint to the user that it's been skipped, rather than the default which shows it's successful - return -} - cancel_previous_build() stage('Prepare') { @@ -207,16 +185,6 @@ stage('Sanity Check') { ) skip_ci = should_skip_ci(env.CHANGE_ID) skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID) - rebuild_docker_images = sh ( - returnStatus: true, - script: './tests/scripts/git_change_docker.sh', - label: 'Check for any docker changes', - ) - if (rebuild_docker_images) { - // Exit before linting so we can use the newly created Docker images - // to run the lint - return - } sh ( script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", label: 'Run lint', @@ -226,105 +194,6 @@ stage('Sanity Check') { } } -def build_image(image_name) { - hash = sh( - returnStdout: true, - script: 'git log -1 --format=\'%h\'' - ).trim() - def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}" - sh( - script: "${docker_build} ${image_name} --spec ${full_name}", - label: 'Building docker image' - ) - sh( - script: "docker rmi ${full_name}", - label: 'Removing docker image' - ) - sh "echo NYI: Uploading docker image to registry..." -} - -if (rebuild_docker_images) { - stage('Docker Image Build') { - // TODO in a follow up PR: Upload to ECR, find tag and use in - // subsequent builds - parallel 'ci-lint': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_lint') - } - } - }, 'ci-cpu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_cpu') - } - } - }, 'ci-gpu': { - node('GPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_gpu') - } - } - }, 'ci-qemu': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_qemu') - } - } - }, 'ci-i386': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_i386') - } - } - }, 'ci-arm': { - node('ARM') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_arm') - } - } - }, 'ci-wasm': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_wasm') - } - } - }, 'ci-hexagon': { - node('CPU') { - timeout(time: max_time, unit: 'MINUTES') { - init_git() - build_image('ci_hexagon') - } - } - } - } - // // TODO: Once we are able to use the built images, enable this step - // // If the docker images changed, we need to run the image build before the lint - // // can run since it requires a base docker image. Most of the time the images - // // aren't build though so it's faster to use the same node that checks for - // // docker changes to run the lint in the usual case. - // stage('Sanity Check (re-run)') { - // timeout(time: max_time, unit: 'MINUTES') { - // node('CPU') { - // ws(per_exec_ws('tvm/sanity')) { - // init_git() - // sh ( - // script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh", - // label: 'Run lint', - // ) - // } - // } - // } - // } -} - // Run make. First try to do an incremental make from a previous workspace in hope to // accelerate the compilation. If something is wrong, clean the workspace and then // build from scratch. @@ -345,6 +214,7 @@ def make(docker_type, path, make_flag) { label: 'Clear old cmake workspace', ) cmake_build(docker_type, path, make_flag) + cpp_unittest(docker_type) } } } @@ -397,7 +267,7 @@ def cmake_build(image, path, make_flag) { def cpp_unittest(image) { sh ( - script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh", + script: "${docker_run} ${image} ./tests/scripts/task_cpp_unittest.sh", label: 'Build and run C++ tests', ) } @@ -407,7 +277,7 @@ stage('Build and Test') { node('CPU') { ws(per_exec_ws('tvm/build-cpu')) { init_git() - sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" + sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build" make(ci_cpu, 'build', '-j2') sh "${docker_run} ${ci_cpu} ./tests/scripts/task_python_integration.sh" } @@ -422,11 +292,11 @@ stage('Build and Test') { // node('GPUBUILD') { // ws(per_exec_ws('tvm/build-gpu')) { // init_git() -// sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh" +// sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu.sh build" // make(ci_gpu, 'build', '-j2') // pack_lib('gpu', tvm_multilib) // // compiler test -// sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh" +// sh "${docker_run} ${ci_gpu} ./tests/scripts/task_config_build_gpu_other.sh build2" // make(ci_gpu, 'build2', '-j2') // } // } @@ -436,7 +306,7 @@ stage('Build and Test') { // node('CPU') { // ws(per_exec_ws('tvm/build-cpu')) { // init_git() -// sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh" +// sh "${docker_run} ${ci_cpu} ./tests/scripts/task_config_build_cpu.sh build" // make(ci_cpu, 'build', '-j2') // pack_lib('cpu', tvm_multilib_tsim) // timeout(time: max_time, unit: 'MINUTES') { @@ -460,7 +330,7 @@ stage('Build and Test') { // node('CPU') { // ws(per_exec_ws('tvm/build-wasm')) { // init_git() -// sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh" +// sh "${docker_run} ${ci_wasm} ./tests/scripts/task_config_build_wasm.sh build" // make(ci_wasm, 'build', '-j2') // timeout(time: max_time, unit: 'MINUTES') { // sh "${docker_run} ${ci_wasm} ./tests/scripts/task_ci_setup.sh" @@ -477,7 +347,7 @@ stage('Build and Test') { // node('CPU') { // ws(per_exec_ws('tvm/build-i386')) { // init_git() -// sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh" +// sh "${docker_run} ${ci_i386} ./tests/scripts/task_config_build_i386.sh build" // make(ci_i386, 'build', '-j2') // pack_lib('i386', tvm_multilib_tsim) // } @@ -491,7 +361,7 @@ stage('Build and Test') { // node('ARM') { // ws(per_exec_ws('tvm/build-arm')) { // init_git() -// sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh" +// sh "${docker_run} ${ci_arm} ./tests/scripts/task_config_build_arm.sh build" // make(ci_arm, 'build', '-j4') // pack_lib('arm', tvm_multilib) // } @@ -505,7 +375,7 @@ stage('Build and Test') { // node('CPU') { // ws(per_exec_ws('tvm/build-qemu')) { // init_git() -// sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh" +// sh "${docker_run} ${ci_qemu} ./tests/scripts/task_config_build_qemu.sh build" // make(ci_qemu, 'build', '-j2') // timeout(time: max_time, unit: 'MINUTES') { // sh "${docker_run} ${ci_qemu} ./tests/scripts/task_ci_setup.sh" @@ -708,4 +578,4 @@ stage('Build and Test') { // } // } // } -// } +// } \ No newline at end of file diff --git a/python/tvm/meta_schedule/integration.py b/python/tvm/meta_schedule/integration.py index 8e0167457e..3b29eefc95 100644 --- a/python/tvm/meta_schedule/integration.py +++ b/python/tvm/meta_schedule/integration.py @@ -26,7 +26,6 @@ from tvm.relay import Function as RelayFunc from tvm.runtime import NDArray, Object from tvm.target import Target -from tvm.tir import PrimFunc from tvm.relax.expr import Function as RelaxFunc from tvm.relax.utils import tir_partitioner diff --git a/src/relax/backend/vm/vm_shape_lower.cc b/src/relax/backend/vm/vm_shape_lower.cc index 20ad17cad5..13b65f657e 100644 --- a/src/relax/backend/vm/vm_shape_lower.cc +++ b/src/relax/backend/vm/vm_shape_lower.cc @@ -135,7 +135,7 @@ class VMShapeLowerMutator : public ExprMutator { Map var_mapping = BuildVarMapping(e, buffer); PrimExpr value = tir::Substitute(e, var_mapping); int idx = expr2slot_.at(e); - seq.push_back(tir::Store(buffer->data, value, idx, tir::const_true())); + seq.push_back(tir::BufferStore(buffer, value, {idx})); } tir::Stmt body = tir::SeqStmt(seq); Array params{heap}; @@ -149,7 +149,7 @@ class VMShapeLowerMutator : public ExprMutator { auto func = [&](const ObjectRef& e) { if (e->IsInstance()) { PrimExpr prim_e = Downcast(e); - tir::Load load(ShapeDType(), buffer->data, expr2slot_.at(prim_e), tir::const_true()); + tir::BufferLoad load(buffer, {expr2slot_.at(prim_e)}); ret.Set(Downcast(e), load); } }; diff --git a/src/relay/backend/task_extraction.cc b/src/relay/backend/task_extraction.cc index 898e76b81b..ae2615eec1 100644 --- a/src/relay/backend/task_extraction.cc +++ b/src/relay/backend/task_extraction.cc @@ -60,7 +60,7 @@ Array ExtractTask(IRModule mod, Target target, std::string fused_name; std::tie(inputs_outputs, fused_name) = tec::LowerTECompute(relay_func, target, /*return_inputs=*/true); - auto prim_func = tir::CreatePrimFunc(inputs_outputs); + auto prim_func = tir::CreatePrimFunc(inputs_outputs, {}); GlobalVar prim_fn_var(fused_name); IRModule relay_mod({{prim_fn_var, relay_func}}); IRModule tir_mod({{prim_fn_var, prim_func}}); diff --git a/src/relay/backend/te_compiler_cache.cc b/src/relay/backend/te_compiler_cache.cc index 3534697bec..1736c193bc 100644 --- a/src/relay/backend/te_compiler_cache.cc +++ b/src/relay/backend/te_compiler_cache.cc @@ -338,7 +338,7 @@ class ScheduleBuilder : public ExprVisitor { } if (backend::IsMetaScheduleEnabled()) { IRModule relay_mod({{prim_fn_var, relay_func}}); - IRModule tir_mod({{prim_fn_var, tir::CreatePrimFunc(Concat(fn_inputs, tensor_outs))}}); + IRModule tir_mod({{prim_fn_var, tir::CreatePrimFunc(Concat(fn_inputs, tensor_outs), {})}}); Optional scheduled_mod = meta_schedule::MetaScheduleContext::QueryInsideWithScope( prim_fn_var->name_hint, relay_mod, target_, Array{tir_mod}); if (scheduled_mod) { diff --git a/src/te/operation/create_primfunc.h b/src/te/operation/create_primfunc.h index d911e5ebcd..f9434f8229 100644 --- a/src/te/operation/create_primfunc.h +++ b/src/te/operation/create_primfunc.h @@ -28,7 +28,8 @@ namespace tvm { namespace tir { /*! \brief Use Tensor Expression to create a schedulable TensorIR func. */ -PrimFunc CreatePrimFunc(const Array& arg_list); +PrimFunc CreatePrimFunc(const Array& arg_list, + const Optional> tir_var_list); /*! \brief Create a schedulable TensorIR func from TE compute outputs. */ PrimFunc CreatePrimFuncFromOutputs(const Array& outputs); diff --git a/tests/python/relax/test_autotir_integration.py b/tests/python/relax/test_autotir_integration.py index c75f126d66..a18df68a8d 100644 --- a/tests/python/relax/test_autotir_integration.py +++ b/tests/python/relax/test_autotir_integration.py @@ -27,6 +27,7 @@ from tvm.meta_schedule import ReplayTraceConfig, tune_tir from tvm.meta_schedule.database import PyDatabase, Workload, TuningRecord from tvm.meta_schedule.integration import extract_task_from_relax +from tvm.meta_schedule.utils import derived_object from tvm import transform import time import pytest @@ -78,6 +79,7 @@ def main(x:Tensor[(m,n), "float32"], w:Tensor[(n,k), "float32"]) -> Tensor: """ +@derived_object class DummyDatabase(PyDatabase): def __init__(self): super().__init__() diff --git a/tests/python/relax/test_vm.py b/tests/python/relax/test_vm.py index 4b688bd2a7..f4ea70c074 100644 --- a/tests/python/relax/test_vm.py +++ b/tests/python/relax/test_vm.py @@ -377,8 +377,8 @@ def shape_func0(heap: T.handle) -> None: offset_factor=1, ) # body - T.store(H.data, T.int64(2), (T.load("int64", H.data, T.int64(0)) * T.int64(2)), True) - T.store(H.data, T.int64(3), (T.load("int64", H.data, T.int64(1)) * T.int64(3)), True) + H[2] = H[0] * T.int64(2) + H[3] = H[1] * T.int64(3) @R.function def foo(x: Tensor[_, "float32"]) -> Shape: