Skip to content
This repository has been archived by the owner on May 22, 2023. It is now read-only.

Commit

Permalink
Fix after rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
yongwww committed Mar 23, 2022
1 parent 826225c commit 4a05295
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 144 deletions.
142 changes: 6 additions & 136 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,8 @@ tvm_multilib_tsim = 'build/libvta_tsim.so, ' +

// command to start a docker container
docker_run = 'docker/bash.sh'
docker_build = 'docker/build.sh'
// timeout in minutes
max_time = 240
rebuild_docker_images = false

def per_exec_ws(folder) {
return "workspace/exec_${env.EXECUTOR_NUMBER}/" + folder
Expand Down Expand Up @@ -134,18 +132,6 @@ def cancel_previous_build() {
}

def should_skip_ci(pr_number) {
if (env.BRANCH_NAME == null || !env.BRANCH_NAME.startsWith('PR-')) {
// never skip CI on build sourced from a branch
return false
}
glob_skip_ci_code = sh (
returnStatus: true,
script: "./tests/scripts/git_skip_ci_globs.py",
label: 'Check if CI should be skipped due to changed files',
)
if (glob_skip_ci_code == 0) {
return true
}
withCredentials([string(
credentialsId: 'tvm-bot-jenkins-reader',
variable: 'TOKEN',
Expand All @@ -157,18 +143,10 @@ def should_skip_ci(pr_number) {
script: "./tests/scripts/git_skip_ci.py --pr '${pr_number}'",
label: 'Check if CI should be skipped',
)
}
}
return git_skip_ci_code == 0
}

// skips builds from branch indexing; sourced from https://www.jvt.me/posts/2020/02/23/jenkins-multibranch-skip-branch-index/
// execute this before anything else, including requesting any time on an agent
if (currentBuild.getBuildCauses().toString().contains('BranchIndexingCause')) {
print "INFO: Build skipped due to trigger being Branch Indexing"
currentBuild.result = 'ABORTED' // optional, gives a better hint to the user that it's been skipped, rather than the default which shows it's successful
return
}

cancel_previous_build()

stage('Prepare') {
Expand Down Expand Up @@ -207,16 +185,6 @@ stage('Sanity Check') {
)
skip_ci = should_skip_ci(env.CHANGE_ID)
skip_slow_tests = should_skip_slow_tests(env.CHANGE_ID)
rebuild_docker_images = sh (
returnStatus: true,
script: './tests/scripts/git_change_docker.sh',
label: 'Check for any docker changes',
)
if (rebuild_docker_images) {
// Exit before linting so we can use the newly created Docker images
// to run the lint
return
}
sh (
script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
label: 'Run lint',
Expand All @@ -226,105 +194,6 @@ stage('Sanity Check') {
}
}

def build_image(image_name) {
hash = sh(
returnStdout: true,
script: 'git log -1 --format=\'%h\''
).trim()
def full_name = "${image_name}:${env.BRANCH_NAME}-${hash}"
sh(
script: "${docker_build} ${image_name} --spec ${full_name}",
label: 'Building docker image'
)
sh(
script: "docker rmi ${full_name}",
label: 'Removing docker image'
)
sh "echo NYI: Uploading docker image to registry..."
}

if (rebuild_docker_images) {
stage('Docker Image Build') {
// TODO in a follow up PR: Upload to ECR, find tag and use in
// subsequent builds
parallel 'ci-lint': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_lint')
}
}
}, 'ci-cpu': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_cpu')
}
}
}, 'ci-gpu': {
node('GPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_gpu')
}
}
}, 'ci-qemu': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_qemu')
}
}
}, 'ci-i386': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_i386')
}
}
}, 'ci-arm': {
node('ARM') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_arm')
}
}
}, 'ci-wasm': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_wasm')
}
}
}, 'ci-hexagon': {
node('CPU') {
timeout(time: max_time, unit: 'MINUTES') {
init_git()
build_image('ci_hexagon')
}
}
}
}
// // TODO: Once we are able to use the built images, enable this step
// // If the docker images changed, we need to run the image build before the lint
// // can run since it requires a base docker image. Most of the time the images
// // aren't build though so it's faster to use the same node that checks for
// // docker changes to run the lint in the usual case.
// stage('Sanity Check (re-run)') {
// timeout(time: max_time, unit: 'MINUTES') {
// node('CPU') {
// ws(per_exec_ws('tvm/sanity')) {
// init_git()
// sh (
// script: "${docker_run} ${ci_lint} ./tests/scripts/task_lint.sh",
// label: 'Run lint',
// )
// }
// }
// }
// }
}

// Run make. First try to do an incremental make from a previous workspace in hope to
// accelerate the compilation. If something is wrong, clean the workspace and then
// build from scratch.
Expand All @@ -345,6 +214,7 @@ def make(docker_type, path, make_flag) {
label: 'Clear old cmake workspace',
)
cmake_build(docker_type, path, make_flag)
cpp_unittest(docker_type)
}
}
}
Expand Down Expand Up @@ -384,20 +254,20 @@ def python_unittest(image) {
def fsim_test(image) {
sh (
script: "${docker_run} ${image} ./tests/scripts/task_python_vta_fsim.sh",
label: 'Run VTA tests in FSIM',
label: 'Run VTA tests in FSIM ',
)
}

def cmake_build(image, path, make_flag) {
sh (
script: "${docker_run} ${image} ./tests/scripts/task_build.py --sccache-bucket tvm-sccache-prod",
script: "${docker_run} ${image} ./tests/scripts/task_build.sh ${path} ${make_flag}",
label: 'Run cmake build',
)
}

def cpp_unittest(image) {
sh (
script: "${docker_run} --env CI_NUM_EXECUTORS ${image} ./tests/scripts/task_cpp_unittest.sh",
script: "${docker_run} ${image} ./tests/scripts/task_cpp_unittest.sh",
label: 'Build and run C++ tests',
)
}
Expand Down Expand Up @@ -708,4 +578,4 @@ stage('Build and Test') {
// }
// }
// }
// }
// }
1 change: 0 additions & 1 deletion python/tvm/meta_schedule/integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
from tvm.relay import Function as RelayFunc
from tvm.runtime import NDArray, Object
from tvm.target import Target
from tvm.tir import PrimFunc
from tvm.relax.expr import Function as RelaxFunc
from tvm.relax.utils import tir_partitioner

Expand Down
4 changes: 2 additions & 2 deletions src/relax/backend/vm/vm_shape_lower.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ class VMShapeLowerMutator : public ExprMutator {
Map<tir::Var, PrimExpr> var_mapping = BuildVarMapping(e, buffer);
PrimExpr value = tir::Substitute(e, var_mapping);
int idx = expr2slot_.at(e);
seq.push_back(tir::Store(buffer->data, value, idx, tir::const_true()));
seq.push_back(tir::BufferStore(buffer, value, {idx}));
}
tir::Stmt body = tir::SeqStmt(seq);
Array<tir::Var> params{heap};
Expand All @@ -149,7 +149,7 @@ class VMShapeLowerMutator : public ExprMutator {
auto func = [&](const ObjectRef& e) {
if (e->IsInstance<tir::VarNode>()) {
PrimExpr prim_e = Downcast<PrimExpr>(e);
tir::Load load(ShapeDType(), buffer->data, expr2slot_.at(prim_e), tir::const_true());
tir::BufferLoad load(buffer, {expr2slot_.at(prim_e)});
ret.Set(Downcast<tir::Var>(e), load);
}
};
Expand Down
2 changes: 1 addition & 1 deletion src/relay/backend/task_extraction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ Array<ExtractedTask> ExtractTask(IRModule mod, Target target,
std::string fused_name;
std::tie(inputs_outputs, fused_name) =
tec::LowerTECompute(relay_func, target, /*return_inputs=*/true);
auto prim_func = tir::CreatePrimFunc(inputs_outputs);
auto prim_func = tir::CreatePrimFunc(inputs_outputs, {});
GlobalVar prim_fn_var(fused_name);
IRModule relay_mod({{prim_fn_var, relay_func}});
IRModule tir_mod({{prim_fn_var, prim_func}});
Expand Down
2 changes: 1 addition & 1 deletion src/relay/backend/te_compiler_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ class ScheduleBuilder : public ExprVisitor {
}
if (backend::IsMetaScheduleEnabled()) {
IRModule relay_mod({{prim_fn_var, relay_func}});
IRModule tir_mod({{prim_fn_var, tir::CreatePrimFunc(Concat(fn_inputs, tensor_outs))}});
IRModule tir_mod({{prim_fn_var, tir::CreatePrimFunc(Concat(fn_inputs, tensor_outs), {})}});
Optional<IRModule> scheduled_mod = meta_schedule::MetaScheduleContext::QueryInsideWithScope(
prim_fn_var->name_hint, relay_mod, target_, Array<IRModule>{tir_mod});
if (scheduled_mod) {
Expand Down
3 changes: 2 additions & 1 deletion src/te/operation/create_primfunc.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ namespace tvm {
namespace tir {

/*! \brief Use Tensor Expression to create a schedulable TensorIR func. */
PrimFunc CreatePrimFunc(const Array<te::Tensor>& arg_list);
PrimFunc CreatePrimFunc(const Array<te::Tensor>& arg_list,
const Optional<Array<tir::Var>> tir_var_list);

/*! \brief Create a schedulable TensorIR func from TE compute outputs. */
PrimFunc CreatePrimFuncFromOutputs(const Array<te::Tensor>& outputs);
Expand Down
2 changes: 2 additions & 0 deletions tests/python/relax/test_autotir_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from tvm.meta_schedule import ReplayTraceConfig, tune_tir
from tvm.meta_schedule.database import PyDatabase, Workload, TuningRecord
from tvm.meta_schedule.integration import extract_task_from_relax
from tvm.meta_schedule.utils import derived_object
from tvm import transform
import time
import pytest
Expand Down Expand Up @@ -78,6 +79,7 @@ def main(x:Tensor[(m,n), "float32"], w:Tensor[(n,k), "float32"]) -> Tensor:
"""


@derived_object
class DummyDatabase(PyDatabase):
def __init__(self):
super().__init__()
Expand Down
4 changes: 2 additions & 2 deletions tests/python/relax/test_vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,8 @@ def shape_func0(heap: T.handle) -> None:
offset_factor=1,
)
# body
T.store(H.data, T.int64(2), (T.load("int64", H.data, T.int64(0)) * T.int64(2)), True)
T.store(H.data, T.int64(3), (T.load("int64", H.data, T.int64(1)) * T.int64(3)), True)
H[2] = H[0] * T.int64(2)
H[3] = H[1] * T.int64(3)

@R.function
def foo(x: Tensor[_, "float32"]) -> Shape:
Expand Down

0 comments on commit 4a05295

Please sign in to comment.