Skip to content

Commit

Permalink
[llvm] [refactor] Remove the use of vector with size=1 (#6002)
Browse files Browse the repository at this point in the history
Related issue = #5511 

<!--
Thank you for your contribution!

If it is your first time contributing to Taichi, please read our
Contributor Guidelines:
  https://docs.taichi-lang.org/docs/contributor_guide

- Please always prepend your PR title with tags such as [CUDA], [Lang],
[Doc], [Example]. For a complete list of valid PR tags, please check out
https://github.com/taichi-dev/taichi/blob/master/misc/prtags.json.
- Use upper-case tags (e.g., [Metal]) for PRs that change public APIs.
Otherwise, please use lower-case tags (e.g., [metal]).
- More details:
https://docs.taichi-lang.org/docs/contributor_guide#pr-title-format-and-tags

- Please fill in the issue number that this PR relates to.
- If your PR fixes the issue **completely**, use the `close` or `fixes`
prefix so that GitHub automatically closes the issue when the PR is
merged. For example,
    Related issue = close #2345
- If the PR does not belong to any existing issue, free to leave it
blank.
-->

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
lin-hitonami and pre-commit-ci[bot] authored Sep 8, 2022
1 parent 23d9b62 commit e3eb6be
Show file tree
Hide file tree
Showing 23 changed files with 128 additions and 177 deletions.
38 changes: 16 additions & 22 deletions taichi/codegen/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,51 +57,48 @@ std::unique_ptr<KernelCodeGen> KernelCodeGen::create(Arch arch,
}
#ifdef TI_WITH_LLVM

bool KernelCodeGen::maybe_read_compilation_from_cache(
const std::string &kernel_key,
std::vector<LLVMCompiledData> &data) {
std::optional<LLVMCompiledData>
KernelCodeGen::maybe_read_compilation_from_cache(
const std::string &kernel_key) {
TI_AUTO_PROF;
const auto &config = prog->config;
auto *llvm_prog = get_llvm_program(prog);
const auto &reader = llvm_prog->get_cache_reader();
if (!reader) {
return false;
return std::nullopt;
}

LlvmOfflineCache::KernelCacheData cache_data;
auto *tlctx = llvm_prog->get_llvm_context(config.arch);
auto &llvm_ctx = *tlctx->get_this_thread_context();

if (!reader->get_kernel_cache(cache_data, kernel_key, llvm_ctx)) {
return false;
return std::nullopt;
}
data.swap(cache_data.compiled_data_list);
kernel->mark_as_from_cache();
return true;
return {std::move(cache_data.compiled_data)};
}

void KernelCodeGen::cache_module(const std::string &kernel_key,
const std::vector<LLVMCompiledData> &data) {
const LLVMCompiledData &data) {
get_llvm_program(prog)->cache_kernel(kernel_key, data,
infer_launch_args(kernel));
}

std::vector<LLVMCompiledData> KernelCodeGen::compile_kernel_to_module() {
LLVMCompiledData KernelCodeGen::compile_kernel_to_module() {
auto *llvm_prog = get_llvm_program(prog);
auto *tlctx = llvm_prog->get_llvm_context(kernel->arch);
auto &config = prog->config;
std::string kernel_key = get_hashed_offline_cache_key(&config, kernel);
kernel->set_kernel_key_for_cache(kernel_key);
if (config.offline_cache && this->supports_offline_cache() &&
!kernel->is_evaluator) {
std::vector<LLVMCompiledData> res;
const bool ok = maybe_read_compilation_from_cache(kernel_key, res);
if (ok) {
auto res = maybe_read_compilation_from_cache(kernel_key);
if (res) {
TI_DEBUG("Create kernel '{}' from cache (key='{}')", kernel->get_name(),
kernel_key);
cache_module(kernel_key, res);
TI_ASSERT(res.size() == 1);
return res;
cache_module(kernel_key, *res);
return std::move(*res);
}
}
if (!kernel->lowered()) {
Expand Down Expand Up @@ -135,14 +132,12 @@ std::vector<LLVMCompiledData> KernelCodeGen::compile_kernel_to_module() {
worker.flush();
}
auto linked = tlctx->link_compiled_tasks(std::move(data));
std::vector<LLVMCompiledData> linked_data;
linked_data.push_back(std::move(*linked));

if (!kernel->is_evaluator) {
TI_DEBUG("Cache kernel '{}' (key='{}')", kernel->get_name(), kernel_key);
cache_module(kernel_key, linked_data);
cache_module(kernel_key, linked);
}
return linked_data;
return linked;
}

ModuleToFunctionConverter::ModuleToFunctionConverter(
Expand All @@ -151,9 +146,8 @@ ModuleToFunctionConverter::ModuleToFunctionConverter(
: tlctx_(tlctx), executor_(executor) {
}

FunctionType ModuleToFunctionConverter::convert(
const Kernel *kernel,
std::vector<LLVMCompiledData> &&data) const {
FunctionType ModuleToFunctionConverter::convert(const Kernel *kernel,
LLVMCompiledData data) const {
return convert(kernel->name, infer_launch_args(kernel), std::move(data));
}

Expand Down
16 changes: 7 additions & 9 deletions taichi/codegen/codegen.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,16 @@ class KernelCodeGen {
}

#ifdef TI_WITH_LLVM
virtual std::vector<LLVMCompiledData> compile_kernel_to_module();
virtual LLVMCompiledData compile_kernel_to_module();

virtual LLVMCompiledData compile_task(
std::unique_ptr<llvm::Module> &&module = nullptr,
OffloadedStmt *stmt = nullptr) {
TI_NOT_IMPLEMENTED
}
OffloadedStmt *stmt = nullptr){TI_NOT_IMPLEMENTED}

bool maybe_read_compilation_from_cache(const std::string &kernel_key,
std::vector<LLVMCompiledData> &data);
std::optional<LLVMCompiledData> maybe_read_compilation_from_cache(
const std::string &kernel_key);
void cache_module(const std::string &kernel_key,
const std::vector<LLVMCompiledData> &data);
const LLVMCompiledData &data);
#endif
};

Expand All @@ -59,10 +57,10 @@ class ModuleToFunctionConverter {

virtual FunctionType convert(const std::string &kernel_name,
const std::vector<LlvmLaunchArgInfo> &args,
std::vector<LLVMCompiledData> &&data) const = 0;
LLVMCompiledData data) const = 0;

virtual FunctionType convert(const Kernel *kernel,
std::vector<LLVMCompiledData> &&data) const;
LLVMCompiledData data) const;

protected:
TaichiLLVMContext *tlctx_{nullptr};
Expand Down
20 changes: 9 additions & 11 deletions taichi/codegen/cpu/codegen_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,19 +234,17 @@ std::unique_ptr<TaskCodeGenLLVM> KernelCodeGenCPU::make_codegen_llvm(
FunctionType CPUModuleToFunctionConverter::convert(
const std::string &kernel_name,
const std::vector<LlvmLaunchArgInfo> &args,
std::vector<LLVMCompiledData> &&data) const {
LLVMCompiledData data) const {
TI_AUTO_PROF;
auto jit_module = tlctx_->create_jit_module(std::move(data.back().module));
auto jit_module = tlctx_->create_jit_module(std::move(data.module));
using TaskFunc = int32 (*)(void *);
std::vector<TaskFunc> task_funcs;
task_funcs.reserve(data.size());
for (auto &datum : data) {
for (auto &task : datum.tasks) {
auto *func_ptr = jit_module->lookup_function(task.name);
TI_ASSERT_INFO(func_ptr, "Offloaded datum function {} not found",
task.name);
task_funcs.push_back((TaskFunc)(func_ptr));
}
task_funcs.reserve(data.tasks.size());
for (auto &task : data.tasks) {
auto *func_ptr = jit_module->lookup_function(task.name);
TI_ASSERT_INFO(func_ptr, "Offloaded datum function {} not found",
task.name);
task_funcs.push_back((TaskFunc)(func_ptr));
}
// Do NOT capture `this`...
return [executor = this->executor_, args, kernel_name,
Expand Down Expand Up @@ -286,7 +284,7 @@ FunctionType KernelCodeGenCPU::compile_to_function() {
auto *llvm_prog = get_llvm_program(prog);
auto *tlctx = llvm_prog->get_llvm_context(kernel->arch);

std::vector<LLVMCompiledData> data = compile_kernel_to_module();
LLVMCompiledData data = compile_kernel_to_module();

CPUModuleToFunctionConverter converter(
tlctx, get_llvm_program(prog)->get_runtime_executor());
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/cpu/codegen_cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class CPUModuleToFunctionConverter : public ModuleToFunctionConverter {

FunctionType convert(const std::string &kernel_name,
const std::vector<LlvmLaunchArgInfo> &args,
std::vector<LLVMCompiledData> &&data) const override;
LLVMCompiledData data) const override;
};

#endif
Expand Down
8 changes: 4 additions & 4 deletions taichi/codegen/cuda/codegen_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -704,7 +704,7 @@ FunctionType KernelCodeGenCUDA::compile_to_function() {
auto *llvm_prog = get_llvm_program(prog);
auto *tlctx = llvm_prog->get_llvm_context(kernel->arch);

std::vector<LLVMCompiledData> data = compile_kernel_to_module();
LLVMCompiledData data = compile_kernel_to_module();
CUDAModuleToFunctionConverter converter{tlctx,
llvm_prog->get_runtime_executor()};

Expand All @@ -714,9 +714,9 @@ FunctionType KernelCodeGenCUDA::compile_to_function() {
FunctionType CUDAModuleToFunctionConverter::convert(
const std::string &kernel_name,
const std::vector<LlvmLaunchArgInfo> &args,
std::vector<LLVMCompiledData> &&data) const {
auto &mod = data[0].module;
auto &tasks = data[0].tasks;
LLVMCompiledData data) const {
auto &mod = data.module;
auto &tasks = data.tasks;
#ifdef TI_WITH_CUDA
for (const auto &task : tasks) {
llvm::Function *func = mod->getFunction(task.name);
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/cuda/codegen_cuda.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class CUDAModuleToFunctionConverter : public ModuleToFunctionConverter {

FunctionType convert(const std::string &kernel_name,
const std::vector<LlvmLaunchArgInfo> &args,
std::vector<LLVMCompiledData> &&data) const override;
LLVMCompiledData data) const override;
};

TLANG_NAMESPACE_END
1 change: 1 addition & 0 deletions taichi/codegen/llvm/llvm_compiled_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ struct LLVMCompiledData {
std::unordered_set<int> struct_for_tls_sizes;
LLVMCompiledData() = default;
LLVMCompiledData(LLVMCompiledData &&) = default;
LLVMCompiledData &operator=(LLVMCompiledData &&) = default;
LLVMCompiledData(std::vector<OffloadedTask> tasks,
std::unique_ptr<llvm::Module> module,
std::unordered_set<int> used_tree_ids,
Expand Down
9 changes: 3 additions & 6 deletions taichi/codegen/wasm/codegen_wasm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ class TaskCodeGenWASM : public TaskCodeGenLLVM {

FunctionType KernelCodeGenWASM::compile_to_function() {
TI_AUTO_PROF
auto linked = std::move(compile_kernel_to_module()[0]);
auto linked = compile_kernel_to_module();
auto *tlctx = get_llvm_program(prog)->get_llvm_context(kernel->arch);
tlctx->create_jit_module(std::move(linked.module));
auto kernel_symbol = tlctx->lookup_function_pointer(linked.tasks[0].name);
Expand Down Expand Up @@ -281,18 +281,15 @@ LLVMCompiledData KernelCodeGenWASM::compile_task(
return {name_list, std::move(gen->module), {}, {}};
}

std::vector<LLVMCompiledData> KernelCodeGenWASM::compile_kernel_to_module() {
LLVMCompiledData KernelCodeGenWASM::compile_kernel_to_module() {
auto *tlctx = get_llvm_program(prog)->get_llvm_context(kernel->arch);
if (!kernel->lowered()) {
kernel->lower(/*to_executable=*/false);
}
auto res = compile_task();
std::vector<std::unique_ptr<LLVMCompiledData>> data;
data.push_back(std::make_unique<LLVMCompiledData>(std::move(res)));
auto linked = tlctx->link_compiled_tasks(std::move(data));
std::vector<LLVMCompiledData> ret;
ret.push_back(std::move(*linked));
return ret;
return tlctx->link_compiled_tasks(std::move(data));
}

} // namespace lang
Expand Down
2 changes: 1 addition & 1 deletion taichi/codegen/wasm/codegen_wasm.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class KernelCodeGenWASM : public KernelCodeGen {
std::unique_ptr<llvm::Module> &&module = nullptr,
OffloadedStmt *stmt = nullptr) override; // AOT Module Gen

std::vector<LLVMCompiledData> compile_kernel_to_module() override;
LLVMCompiledData compile_kernel_to_module() override;
#endif
};

Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cpu/aot_module_builder_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace cpu {

LLVMCompiledData AotModuleBuilderImpl::compile_kernel(Kernel *kernel) {
auto cgen = KernelCodeGenCPU(kernel);
return std::move(cgen.compile_kernel_to_module()[0]);
return cgen.compile_kernel_to_module();
}

} // namespace cpu
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cpu/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class AotModuleImpl : public LlvmAotModule {

CPUModuleToFunctionConverter converter{tlctx, executor_};
return converter.convert(name, loaded.args,
std::move(loaded.compiled_data_list));
std::move(loaded.compiled_data));
}

std::unique_ptr<aot::KernelTemplate> make_new_kernel_template(
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cuda/aot_module_builder_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace cuda {

LLVMCompiledData AotModuleBuilderImpl::compile_kernel(Kernel *kernel) {
auto cgen = KernelCodeGenCUDA(kernel);
return std::move(cgen.compile_kernel_to_module()[0]);
return cgen.compile_kernel_to_module();
}

} // namespace cuda
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/cuda/aot_module_loader_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class AotModuleImpl : public LlvmAotModule {

CUDAModuleToFunctionConverter converter{tlctx, executor_};
return converter.convert(name, loaded.args,
std::move(loaded.compiled_data_list));
std::move(loaded.compiled_data));
}

std::unique_ptr<aot::KernelTemplate> make_new_kernel_template(
Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/llvm/llvm_aot_module_builder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ void LlvmAotModuleBuilder::add_per_backend(const std::string &identifier,
auto compiled = compile_kernel(kernel);
LlvmOfflineCache::KernelCacheData kcache;
kcache.kernel_key = identifier;
kcache.compiled_data_list.push_back(std::move(compiled));
kcache.compiled_data = std::move(compiled);
kcache.args = infer_launch_args(kernel);
kcache.last_used_at = std::time(nullptr);
kcache.created_at = std::time(nullptr);
Expand Down
8 changes: 4 additions & 4 deletions taichi/runtime/llvm/llvm_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -889,9 +889,9 @@ TaichiLLVMContext::ThreadLocalData::~ThreadLocalData() {
thread_safe_llvm_context.reset();
}

std::unique_ptr<LLVMCompiledData> TaichiLLVMContext::link_compiled_tasks(
LLVMCompiledData TaichiLLVMContext::link_compiled_tasks(
std::vector<std::unique_ptr<LLVMCompiledData>> data_list) {
auto linked = std::make_unique<LLVMCompiledData>();
LLVMCompiledData linked;
std::unordered_set<int> used_tree_ids;
std::unordered_set<int> tls_sizes;
std::unordered_set<std::string> offloaded_names;
Expand All @@ -906,7 +906,7 @@ std::unique_ptr<LLVMCompiledData> TaichiLLVMContext::link_compiled_tasks(
}
for (auto &task : datum->tasks) {
offloaded_names.insert(task.name);
linked->tasks.push_back(std::move(task));
linked.tasks.push_back(std::move(task));
}
linker.linkInModule(clone_module_to_context(
datum->module.get(), linking_context_data->llvm_context));
Expand All @@ -927,7 +927,7 @@ std::unique_ptr<LLVMCompiledData> TaichiLLVMContext::link_compiled_tasks(
eliminate_unused_functions(mod.get(), [&](std::string func_name) -> bool {
return offloaded_names.count(func_name);
});
linked->module = std::move(mod);
linked.module = std::move(mod);
return linked;
}

Expand Down
2 changes: 1 addition & 1 deletion taichi/runtime/llvm/llvm_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class TaichiLLVMContext {

static std::string get_struct_for_func_name(int tls_size);

std::unique_ptr<LLVMCompiledData> link_compiled_tasks(
LLVMCompiledData link_compiled_tasks(
std::vector<std::unique_ptr<LLVMCompiledData>> data_list);

private:
Expand Down
Loading

0 comments on commit e3eb6be

Please sign in to comment.