From e6518a96c9b6babfbcbd8316110df30001b5ea7a Mon Sep 17 00:00:00 2001 From: sjwsl Date: Sat, 6 Nov 2021 03:02:53 +0800 Subject: [PATCH 1/4] [Refactor] Use wrapped create_call --- taichi/backends/cuda/codegen_cuda.cpp | 26 +++++++++----------- taichi/codegen/codegen_llvm.cpp | 34 +++++++++++++-------------- taichi/codegen/codegen_llvm.h | 4 ++-- taichi/llvm/llvm_codegen_utils.h | 5 ---- taichi/transforms/type_check.cpp | 15 ++++-------- 5 files changed, 34 insertions(+), 50 deletions(-) diff --git a/taichi/backends/cuda/codegen_cuda.cpp b/taichi/backends/cuda/codegen_cuda.cpp index 6cc5e017dd15f..f25dbf6803c41 100644 --- a/taichi/backends/cuda/codegen_cuda.cpp +++ b/taichi/backends/cuda/codegen_cuda.cpp @@ -205,12 +205,12 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { else if (op == UnaryOpType::x) { \ if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ llvm_val[stmt] = \ - builder->CreateCall(get_runtime_function("__nv_" #x "f"), input); \ + create_call("__nv_" #x "f", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ llvm_val[stmt] = \ - builder->CreateCall(get_runtime_function("__nv_" #x), input); \ + create_call("__nv_" #x, input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ - llvm_val[stmt] = builder->CreateCall(get_runtime_function(#x), input); \ + llvm_val[stmt] = create_call(#x, input); \ } else { \ TI_NOT_IMPLEMENTED \ } \ @@ -218,30 +218,30 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { if (op == UnaryOpType::abs) { if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("__nv_fabsf"), input); + create_call("__nv_fabsf", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("__nv_fabs"), input); + create_call("__nv_fabs", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("__nv_abs"), input); + create_call("__nv_abs", input); } else { TI_NOT_IMPLEMENTED } } else if (op == UnaryOpType::sqrt) { if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("__nv_sqrtf"), input); + create_call("__nv_sqrtf", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("__nv_sqrt"), input); + create_call("__nv_sqrt", input); } else { TI_NOT_IMPLEMENTED } } else if (op == UnaryOpType::logic_not) { if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { llvm_val[stmt] = - builder->CreateCall(get_runtime_function("logic_not_i32"), input); + create_call("logic_not_i32", input); } else { TI_NOT_IMPLEMENTED } @@ -366,11 +366,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { } TI_ASSERT(atomics.at(prim_type).find(op) != atomics.at(prim_type).end()); - return builder->CreateCall( - get_runtime_function(atomics.at(prim_type).at(op)), - {llvm_val[stmt->dest], llvm_val[stmt->val]}); - - return nullptr; + return create_call(atomics.at(prim_type).at(op), {llvm_val[stmt->dest], llvm_val[stmt->val]}); } void visit(AtomicOpStmt *stmt) override { @@ -591,7 +587,7 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; llvm_val[stmt] = - builder->CreateCall(get_runtime_function("Context_get_extra_args"), + create_call("Context_get_extra_args", {get_context(), tlctx->get_constant(arg_id), tlctx->get_constant(axis)}); } diff --git a/taichi/codegen/codegen_llvm.cpp b/taichi/codegen/codegen_llvm.cpp index e7b42f30159dd..548b421bc050f 100644 --- a/taichi/codegen/codegen_llvm.cpp +++ b/taichi/codegen/codegen_llvm.cpp @@ -191,13 +191,13 @@ void CodeGenLLVM::emit_extra_unary(UnaryOpStmt *stmt) { else if (op == UnaryOpType::x) { \ if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ llvm_val[stmt] = \ - builder->CreateCall(get_runtime_function(#x "_f32"), input); \ + create_call("_f32", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ llvm_val[stmt] = \ - builder->CreateCall(get_runtime_function(#x "_f64"), input); \ + create_call("_f64", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ llvm_val[stmt] = \ - builder->CreateCall(get_runtime_function(#x "_i32"), input); \ + create_call("_i32", input); \ } else { \ TI_NOT_IMPLEMENTED \ } \ @@ -762,7 +762,7 @@ llvm::Value *CodeGenLLVM::create_print(std::string tag, value = builder->CreateFPExt(value, tlctx->get_data_type(PrimitiveType::f64)); args.push_back(value); - return builder->CreateCall(runtime_printf, args); + return create_call(runtime_printf, args); } llvm::Value *CodeGenLLVM::create_print(std::string tag, llvm::Value *value) { @@ -822,7 +822,7 @@ void CodeGenLLVM::visit(PrintStmt *stmt) { args.insert(args.begin(), builder->CreateGlobalStringPtr(formats.c_str(), "format_string")); - llvm_val[stmt] = builder->CreateCall(runtime_printf, args); + llvm_val[stmt] = create_call(runtime_printf, args); } void CodeGenLLVM::visit(ConstStmt *stmt) { @@ -944,12 +944,12 @@ void CodeGenLLVM::emit_gc(OffloadedStmt *stmt) { } llvm::Value *CodeGenLLVM::create_call(llvm::Value *func, - std::vector args) { + llvm::ArrayRef args) { check_func_call_signature(func, args); return builder->CreateCall(func, args); } llvm::Value *CodeGenLLVM::create_call(std::string func_name, - std::vector args) { + llvm::ArrayRef args) { auto func = get_runtime_function(func_name); return create_call(func, args); } @@ -1090,7 +1090,7 @@ void CodeGenLLVM::visit(ReturnStmt *stmt) { auto extended = builder->CreateZExt( builder->CreateBitCast(llvm_val[stmt->value], intermediate_type), dest_ty); - builder->CreateCall(get_runtime_function("LLVMRuntime_store_result"), + create_call("LLVMRuntime_store_result", {get_runtime(), extended}); } } @@ -1211,11 +1211,11 @@ void CodeGenLLVM::visit(AtomicOpStmt *stmt) { llvm_val[stmt->val], llvm::AtomicOrdering::SequentiallyConsistent); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f32)) { old_value = - builder->CreateCall(get_runtime_function("atomic_min_f32"), + create_call("atomic_min_f32", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f64)) { old_value = - builder->CreateCall(get_runtime_function("atomic_min_f64"), + create_call("atomic_min_f64", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else { TI_NOT_IMPLEMENTED @@ -1227,11 +1227,11 @@ void CodeGenLLVM::visit(AtomicOpStmt *stmt) { llvm_val[stmt->val], llvm::AtomicOrdering::SequentiallyConsistent); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f32)) { old_value = - builder->CreateCall(get_runtime_function("atomic_max_f32"), + create_call("atomic_max_f32", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f64)) { old_value = - builder->CreateCall(get_runtime_function("atomic_max_f64"), + create_call("atomic_max_f64", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else { TI_NOT_IMPLEMENTED @@ -1527,8 +1527,7 @@ void CodeGenLLVM::visit(ExternalPtrStmt *stmt) { std::vector sizes(num_indices); for (int i = 0; i < num_indices; i++) { - auto raw_arg = builder->CreateCall( - get_runtime_function("Context_get_extra_args"), + auto raw_arg = create_call("Context_get_extra_args", {get_context(), tlctx->get_constant(arg_id), tlctx->get_constant(i)}); sizes[i] = raw_arg; } @@ -1550,8 +1549,7 @@ void CodeGenLLVM::visit(ExternalPtrStmt *stmt) { void CodeGenLLVM::visit(ExternalTensorShapeAlongAxisStmt *stmt) { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; - llvm_val[stmt] = builder->CreateCall( - get_runtime_function("Context_get_extra_args"), + llvm_val[stmt] = create_call("Context_get_extra_args", {get_context(), tlctx->get_constant(arg_id), tlctx->get_constant(axis)}); } @@ -2131,7 +2129,7 @@ void CodeGenLLVM::visit_call_bitcode(ExternalFuncCallStmt *stmt) { arg_values[i] = builder->CreatePointerCast(tmp_value, func_ptr->getArg(i)->getType()); } - builder->CreateCall(func_ptr, arg_values); + create_call(func_ptr, arg_values); } void CodeGenLLVM::visit_call_shared_object(ExternalFuncCallStmt *stmt) { @@ -2159,7 +2157,7 @@ void CodeGenLLVM::visit_call_shared_object(ExternalFuncCallStmt *stmt) { auto addr = tlctx->get_constant((std::size_t)stmt->so_func); auto func = builder->CreateIntToPtr(addr, func_ptr_type); - builder->CreateCall(func, arg_values); + create_call(func, arg_values); } void CodeGenLLVM::visit(ExternalFuncCallStmt *stmt) { diff --git a/taichi/codegen/codegen_llvm.h b/taichi/codegen/codegen_llvm.h index 9770f4794c849..a0e06a6c86b60 100644 --- a/taichi/codegen/codegen_llvm.h +++ b/taichi/codegen/codegen_llvm.h @@ -137,10 +137,10 @@ class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { void emit_gc(OffloadedStmt *stmt); llvm::Value *create_call(llvm::Value *func, - std::vector args = {}); + llvm::ArrayRef args = {}); llvm::Value *create_call(std::string func_name, - std::vector args = {}); + llvm::ArrayRef args = {}); llvm::Value *call(SNode *snode, llvm::Value *node_ptr, const std::string &method, diff --git a/taichi/llvm/llvm_codegen_utils.h b/taichi/llvm/llvm_codegen_utils.h index 601cff5bb158a..fcb941abde4f7 100644 --- a/taichi/llvm/llvm_codegen_utils.h +++ b/taichi/llvm/llvm_codegen_utils.h @@ -46,11 +46,6 @@ std::string type_name(llvm::Type *type); void check_func_call_signature(llvm::Value *func, std::vector arglist); -template -inline bool check_func_call_signature(llvm::Value *func, Args &&... args) { - return check_func_call_signature(func, {args...}); -} - class LLVMModuleBuilder { public: std::unique_ptr module{nullptr}; diff --git a/taichi/transforms/type_check.cpp b/taichi/transforms/type_check.cpp index a745a6ed1aeca..d15b0b35ee8ba 100644 --- a/taichi/transforms/type_check.cpp +++ b/taichi/transforms/type_check.cpp @@ -66,10 +66,8 @@ class TypeCheck : public IRVisitor { dst_type = cft->get_compute_type(); } if (stmt->val->ret_type != dst_type) { - TI_WARN("[{}] Atomic {} ({} to {}) may lose precision, at\n{}", - stmt->name(), atomic_op_type_name(stmt->op_type), - data_type_name(stmt->val->ret_type), data_type_name(dst_type), - stmt->tb); + TI_WARN("[{}] Atomic {} ({} to {}) may lose precision, at\n{}", stmt->name(), atomic_op_type_name(stmt->op_type), + data_type_name(stmt->val->ret_type), data_type_name(dst_type), stmt->tb); stmt->val = insert_type_cast_before(stmt, stmt->val, dst_type); } stmt->ret_type = dst_type; @@ -120,8 +118,7 @@ class TypeCheck : public IRVisitor { } if (dst_value_type != promoted && dst_value_type != stmt->val->ret_type) { TI_WARN("[{}] Local store may lose precision: {} <- {}, at\n{}", - stmt->name(), dst_value_type->to_string(), input_type, - stmt->tb); + stmt->name(), dst_value_type->to_string(), input_type, stmt->tb); } stmt->ret_type = dst_value_type; return; @@ -141,8 +138,7 @@ class TypeCheck : public IRVisitor { } if (stmt->dest->ret_type != common_container_type) { TI_WARN( - "[{}] Local store may lose precision (target = {}, value = {}), " - "at\n{}", + "[{}] Local store may lose precision (target = {}, value = {}), at\n{}", stmt->name(), stmt->dest->ret_data_type_name(), old_data->ret_data_type_name(), stmt->id, stmt->tb); } @@ -300,8 +296,7 @@ class TypeCheck : public IRVisitor { auto error = [&](std::string comment = "") { if (comment == "") { TI_WARN( - "[{}] Error: type mismatch (left = {}, right = {}, stmt_id = {}), " - "at\n{}", + "[{}] Error: type mismatch (left = {}, right = {}, stmt_id = {}), at\n{}", stmt->name(), stmt->lhs->ret_data_type_name(), stmt->rhs->ret_data_type_name(), stmt->id, stmt->tb); } else { From a8146ad9f2f9b563848348f3ad6da6176d3d5bc8 Mon Sep 17 00:00:00 2001 From: Taichi Gardener Date: Mon, 8 Nov 2021 05:02:06 +0000 Subject: [PATCH 2/4] Auto Format --- taichi/backends/cuda/codegen_cuda.cpp | 52 ++++++++++++--------------- taichi/codegen/codegen_llvm.cpp | 30 +++++++--------- taichi/transforms/type_check.cpp | 15 +++++--- 3 files changed, 44 insertions(+), 53 deletions(-) diff --git a/taichi/backends/cuda/codegen_cuda.cpp b/taichi/backends/cuda/codegen_cuda.cpp index f25dbf6803c41..8cad9b9378bed 100644 --- a/taichi/backends/cuda/codegen_cuda.cpp +++ b/taichi/backends/cuda/codegen_cuda.cpp @@ -201,47 +201,39 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { auto op = stmt->op_type; -#define UNARY_STD(x) \ - else if (op == UnaryOpType::x) { \ - if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ - llvm_val[stmt] = \ - create_call("__nv_" #x "f", input); \ - } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ - llvm_val[stmt] = \ - create_call("__nv_" #x, input); \ - } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ - llvm_val[stmt] = create_call(#x, input); \ - } else { \ - TI_NOT_IMPLEMENTED \ - } \ +#define UNARY_STD(x) \ + else if (op == UnaryOpType::x) { \ + if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ + llvm_val[stmt] = create_call("__nv_" #x "f", input); \ + } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ + llvm_val[stmt] = create_call("__nv_" #x, input); \ + } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ + llvm_val[stmt] = create_call(#x, input); \ + } else { \ + TI_NOT_IMPLEMENTED \ + } \ } if (op == UnaryOpType::abs) { if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { - llvm_val[stmt] = - create_call("__nv_fabsf", input); + llvm_val[stmt] = create_call("__nv_fabsf", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { - llvm_val[stmt] = - create_call("__nv_fabs", input); + llvm_val[stmt] = create_call("__nv_fabs", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { - llvm_val[stmt] = - create_call("__nv_abs", input); + llvm_val[stmt] = create_call("__nv_abs", input); } else { TI_NOT_IMPLEMENTED } } else if (op == UnaryOpType::sqrt) { if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { - llvm_val[stmt] = - create_call("__nv_sqrtf", input); + llvm_val[stmt] = create_call("__nv_sqrtf", input); } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { - llvm_val[stmt] = - create_call("__nv_sqrt", input); + llvm_val[stmt] = create_call("__nv_sqrt", input); } else { TI_NOT_IMPLEMENTED } } else if (op == UnaryOpType::logic_not) { if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { - llvm_val[stmt] = - create_call("logic_not_i32", input); + llvm_val[stmt] = create_call("logic_not_i32", input); } else { TI_NOT_IMPLEMENTED } @@ -366,7 +358,8 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { } TI_ASSERT(atomics.at(prim_type).find(op) != atomics.at(prim_type).end()); - return create_call(atomics.at(prim_type).at(op), {llvm_val[stmt->dest], llvm_val[stmt->val]}); + return create_call(atomics.at(prim_type).at(op), + {llvm_val[stmt->dest], llvm_val[stmt->val]}); } void visit(AtomicOpStmt *stmt) override { @@ -586,10 +579,9 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { void visit(ExternalTensorShapeAlongAxisStmt *stmt) override { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; - llvm_val[stmt] = - create_call("Context_get_extra_args", - {get_context(), tlctx->get_constant(arg_id), - tlctx->get_constant(axis)}); + llvm_val[stmt] = create_call("Context_get_extra_args", + {get_context(), tlctx->get_constant(arg_id), + tlctx->get_constant(axis)}); } void visit(BinaryOpStmt *stmt) override { diff --git a/taichi/codegen/codegen_llvm.cpp b/taichi/codegen/codegen_llvm.cpp index 548b421bc050f..dadd1ef98ed4b 100644 --- a/taichi/codegen/codegen_llvm.cpp +++ b/taichi/codegen/codegen_llvm.cpp @@ -190,14 +190,11 @@ void CodeGenLLVM::emit_extra_unary(UnaryOpStmt *stmt) { #define UNARY_STD(x) \ else if (op == UnaryOpType::x) { \ if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ - llvm_val[stmt] = \ - create_call("_f32", input); \ + llvm_val[stmt] = create_call("_f32", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ - llvm_val[stmt] = \ - create_call("_f64", input); \ + llvm_val[stmt] = create_call("_f64", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ - llvm_val[stmt] = \ - create_call("_i32", input); \ + llvm_val[stmt] = create_call("_i32", input); \ } else { \ TI_NOT_IMPLEMENTED \ } \ @@ -1090,8 +1087,7 @@ void CodeGenLLVM::visit(ReturnStmt *stmt) { auto extended = builder->CreateZExt( builder->CreateBitCast(llvm_val[stmt->value], intermediate_type), dest_ty); - create_call("LLVMRuntime_store_result", - {get_runtime(), extended}); + create_call("LLVMRuntime_store_result", {get_runtime(), extended}); } } @@ -1210,12 +1206,10 @@ void CodeGenLLVM::visit(AtomicOpStmt *stmt) { llvm::AtomicRMWInst::BinOp::Min, llvm_val[stmt->dest], llvm_val[stmt->val], llvm::AtomicOrdering::SequentiallyConsistent); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f32)) { - old_value = - create_call("atomic_min_f32", + old_value = create_call("atomic_min_f32", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f64)) { - old_value = - create_call("atomic_min_f64", + old_value = create_call("atomic_min_f64", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else { TI_NOT_IMPLEMENTED @@ -1226,12 +1220,10 @@ void CodeGenLLVM::visit(AtomicOpStmt *stmt) { llvm::AtomicRMWInst::BinOp::Max, llvm_val[stmt->dest], llvm_val[stmt->val], llvm::AtomicOrdering::SequentiallyConsistent); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f32)) { - old_value = - create_call("atomic_max_f32", + old_value = create_call("atomic_max_f32", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else if (stmt->val->ret_type->is_primitive(PrimitiveTypeID::f64)) { - old_value = - create_call("atomic_max_f64", + old_value = create_call("atomic_max_f64", {llvm_val[stmt->dest], llvm_val[stmt->val]}); } else { TI_NOT_IMPLEMENTED @@ -1527,7 +1519,8 @@ void CodeGenLLVM::visit(ExternalPtrStmt *stmt) { std::vector sizes(num_indices); for (int i = 0; i < num_indices; i++) { - auto raw_arg = create_call("Context_get_extra_args", + auto raw_arg = create_call( + "Context_get_extra_args", {get_context(), tlctx->get_constant(arg_id), tlctx->get_constant(i)}); sizes[i] = raw_arg; } @@ -1549,7 +1542,8 @@ void CodeGenLLVM::visit(ExternalPtrStmt *stmt) { void CodeGenLLVM::visit(ExternalTensorShapeAlongAxisStmt *stmt) { const auto arg_id = stmt->arg_id; const auto axis = stmt->axis; - llvm_val[stmt] = create_call("Context_get_extra_args", + llvm_val[stmt] = create_call( + "Context_get_extra_args", {get_context(), tlctx->get_constant(arg_id), tlctx->get_constant(axis)}); } diff --git a/taichi/transforms/type_check.cpp b/taichi/transforms/type_check.cpp index d15b0b35ee8ba..a745a6ed1aeca 100644 --- a/taichi/transforms/type_check.cpp +++ b/taichi/transforms/type_check.cpp @@ -66,8 +66,10 @@ class TypeCheck : public IRVisitor { dst_type = cft->get_compute_type(); } if (stmt->val->ret_type != dst_type) { - TI_WARN("[{}] Atomic {} ({} to {}) may lose precision, at\n{}", stmt->name(), atomic_op_type_name(stmt->op_type), - data_type_name(stmt->val->ret_type), data_type_name(dst_type), stmt->tb); + TI_WARN("[{}] Atomic {} ({} to {}) may lose precision, at\n{}", + stmt->name(), atomic_op_type_name(stmt->op_type), + data_type_name(stmt->val->ret_type), data_type_name(dst_type), + stmt->tb); stmt->val = insert_type_cast_before(stmt, stmt->val, dst_type); } stmt->ret_type = dst_type; @@ -118,7 +120,8 @@ class TypeCheck : public IRVisitor { } if (dst_value_type != promoted && dst_value_type != stmt->val->ret_type) { TI_WARN("[{}] Local store may lose precision: {} <- {}, at\n{}", - stmt->name(), dst_value_type->to_string(), input_type, stmt->tb); + stmt->name(), dst_value_type->to_string(), input_type, + stmt->tb); } stmt->ret_type = dst_value_type; return; @@ -138,7 +141,8 @@ class TypeCheck : public IRVisitor { } if (stmt->dest->ret_type != common_container_type) { TI_WARN( - "[{}] Local store may lose precision (target = {}, value = {}), at\n{}", + "[{}] Local store may lose precision (target = {}, value = {}), " + "at\n{}", stmt->name(), stmt->dest->ret_data_type_name(), old_data->ret_data_type_name(), stmt->id, stmt->tb); } @@ -296,7 +300,8 @@ class TypeCheck : public IRVisitor { auto error = [&](std::string comment = "") { if (comment == "") { TI_WARN( - "[{}] Error: type mismatch (left = {}, right = {}, stmt_id = {}), at\n{}", + "[{}] Error: type mismatch (left = {}, right = {}, stmt_id = {}), " + "at\n{}", stmt->name(), stmt->lhs->ret_data_type_name(), stmt->rhs->ret_data_type_name(), stmt->id, stmt->tb); } else { From 2bf12f21cf835ee3eb071233cc77e923ff80082d Mon Sep 17 00:00:00 2001 From: sjwsl Date: Mon, 8 Nov 2021 13:08:42 +0800 Subject: [PATCH 3/4] fix --- taichi/codegen/codegen_llvm.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/taichi/codegen/codegen_llvm.cpp b/taichi/codegen/codegen_llvm.cpp index dadd1ef98ed4b..60d3b2160a633 100644 --- a/taichi/codegen/codegen_llvm.cpp +++ b/taichi/codegen/codegen_llvm.cpp @@ -190,11 +190,14 @@ void CodeGenLLVM::emit_extra_unary(UnaryOpStmt *stmt) { #define UNARY_STD(x) \ else if (op == UnaryOpType::x) { \ if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ - llvm_val[stmt] = create_call("_f32", input); \ + llvm_val[stmt] = \ + create_call(#x "_f32", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ - llvm_val[stmt] = create_call("_f64", input); \ + llvm_val[stmt] = \ + create_call(#x "_f64", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ - llvm_val[stmt] = create_call("_i32", input); \ + llvm_val[stmt] = \ + create_call(#x "_i32", input); \ } else { \ TI_NOT_IMPLEMENTED \ } \ From 677b7c7d96fde9e69e6328aaaf9852dd1133e34d Mon Sep 17 00:00:00 2001 From: Taichi Gardener Date: Mon, 8 Nov 2021 05:12:36 +0000 Subject: [PATCH 4/4] Auto Format --- taichi/codegen/codegen_llvm.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/taichi/codegen/codegen_llvm.cpp b/taichi/codegen/codegen_llvm.cpp index 60d3b2160a633..43bcac6214931 100644 --- a/taichi/codegen/codegen_llvm.cpp +++ b/taichi/codegen/codegen_llvm.cpp @@ -190,14 +190,11 @@ void CodeGenLLVM::emit_extra_unary(UnaryOpStmt *stmt) { #define UNARY_STD(x) \ else if (op == UnaryOpType::x) { \ if (input_taichi_type->is_primitive(PrimitiveTypeID::f32)) { \ - llvm_val[stmt] = \ - create_call(#x "_f32", input); \ + llvm_val[stmt] = create_call(#x "_f32", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::f64)) { \ - llvm_val[stmt] = \ - create_call(#x "_f64", input); \ + llvm_val[stmt] = create_call(#x "_f64", input); \ } else if (input_taichi_type->is_primitive(PrimitiveTypeID::i32)) { \ - llvm_val[stmt] = \ - create_call(#x "_i32", input); \ + llvm_val[stmt] = create_call(#x "_i32", input); \ } else { \ TI_NOT_IMPLEMENTED \ } \