Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Lang] Matrix/Vector refactor: support basic matrix ops #6077

Merged
merged 21 commits into from
Sep 24, 2022
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion python/taichi/lang/expr.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from typing import Iterable

import numpy as np
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.exception import TaichiTypeError
from taichi.lang.exception import TaichiCompilationError, TaichiTypeError
from taichi.lang.util import is_taichi_class, to_numpy_type
from taichi.types import primitive_types
from taichi.types.primitive_types import integer_types, real_types
Expand Down Expand Up @@ -39,6 +41,20 @@ def __init__(self, *args, tb=None, dtype=None):
self.ptr.set_tb(self.tb)
self.ptr.type_check(impl.get_runtime().prog.config())

def __getitem__(self, indices):
AD1024 marked this conversation as resolved.
Show resolved Hide resolved
if not isinstance(indices, Iterable):
indices = (indices, )
return impl.subscript(self, *indices)

def is_tensor(self):
return self.ptr.is_tensor()

def get_shape(self):
if not self.is_tensor():
raise TaichiCompilationError(
f"Getting shape of non-tensor type: {self.ptr.get_ret_type()}")
return self.ptr.get_shape()

def __hash__(self):
return self.ptr.get_raw_address()

Expand Down
252 changes: 204 additions & 48 deletions taichi/codegen/llvm/codegen_llvm.cpp

Large diffs are not rendered by default.

10 changes: 10 additions & 0 deletions taichi/codegen/llvm/codegen_llvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,16 @@ class TaskCodeGenLLVM : public IRVisitor, public LLVMModuleBuilder {
llvm::Value *node_meta,
SNode *snode);

void create_elementwise_binary(
BinaryOpStmt *stmt,
std::function<llvm::Value *(llvm::Value *lhs, llvm::Value *rhs)> f);

void create_elementwise_cast(
UnaryOpStmt *stmt,
llvm::Type *to_ty,
std::function<llvm::Value *(llvm::Value *, llvm::Type *)> f,
bool on_self = false);

std::unique_ptr<RuntimeObject> emit_struct_meta_object(SNode *snode);

llvm::Value *emit_struct_meta(SNode *snode);
Expand Down
82 changes: 72 additions & 10 deletions taichi/ir/frontend_ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,32 @@ void UnaryOpExpression::flatten(FlattenContext *ctx) {
ctx->push_back(std::move(unary));
}

Expr to_broadcast_tensor(const Expr &elt, const DataType &dt) {
TI_ASSERT(dt->is<TensorType>());
if (elt->ret_type == dt) {
return elt;
}
auto tensor_type = dt->as<TensorType>();
auto elt_type = tensor_type->get_element_type();
AD1024 marked this conversation as resolved.
Show resolved Hide resolved
TI_ASSERT_INFO(elt_type->is<PrimitiveType>(),
"Only primitive types are supported in Tensors, got {}",
elt_type->to_string());
std::vector<Expr> broadcast_values(tensor_type->get_num_elements(), elt);
return Expr::make<MatrixExpression>(broadcast_values,
tensor_type->get_shape(), elt->ret_type);
}

std::tuple<Expr, Expr> unify_binop_operands(const Expr &e1, const Expr &e2) {
if (e1->ret_type->is<PrimitiveType>() && e2->ret_type->is<TensorType>()) {
return std::tuple(to_broadcast_tensor(e1, e2->ret_type), e2);
} else if (e1->ret_type->is<TensorType>() &&
e2->ret_type->is<PrimitiveType>()) {
return std::tuple(e1, to_broadcast_tensor(e2, e1->ret_type));
} else {
return std::tuple(e1, e2);
}
}

void BinaryOpExpression::type_check(CompileConfig *config) {
TI_ASSERT_TYPE_CHECKED(lhs);
TI_ASSERT_TYPE_CHECKED(rhs);
Expand All @@ -217,17 +243,53 @@ void BinaryOpExpression::type_check(CompileConfig *config) {

if ((lhs_type->is<PrimitiveType>() && rhs_type->is<TensorType>()) ||
(lhs_type->is<TensorType>() && rhs_type->is<PrimitiveType>())) {
TI_NOT_IMPLEMENTED;
// convert Tensor/Scalar | Scalar/Tensor operations to broadcasting
auto [unified_l, unified_r] = unify_binop_operands(lhs, rhs);
lhs = unified_l;
rhs = unified_r;
if (lhs->ret_type == PrimitiveType::unknown)
strongoier marked this conversation as resolved.
Show resolved Hide resolved
lhs.type_check(config);
if (rhs->ret_type == PrimitiveType::unknown)
rhs.type_check(config);
TI_ASSERT(lhs->ret_type->is<TensorType>());
jim19930609 marked this conversation as resolved.
Show resolved Hide resolved
TI_ASSERT(rhs->ret_type->is<TensorType>());
lhs_type = lhs->ret_type;
rhs_type = rhs->ret_type;
}

bool is_tensor_op = false;

if (lhs_type->is<TensorType>()) {
is_tensor_op = true;
auto rhs_tensor_type = rhs_type->cast<TensorType>();
if (rhs_tensor_type->get_shape() !=
lhs_type->cast<TensorType>()->get_shape())
// current assume element-wise binary op
error();
}

auto make_dt = [&is_tensor_op, this](DataType dt) {
if (is_tensor_op) {
return TypeFactory::create_tensor_type(
this->lhs->ret_type->cast<TensorType>()->get_shape(), dt);
} else {
return dt;
}
};

if (binary_is_bitwise(type) &&
(!is_integral(lhs_type) || !is_integral(rhs_type)))
(!is_integral(lhs_type) || !is_integral(rhs_type)) &&
(!is_integral_tensor(lhs_type) || !is_integral_tensor(rhs_type)))
error();
if (binary_is_logical(type) &&
(lhs_type != PrimitiveType::i32 || rhs_type != PrimitiveType::i32))
(lhs_type != PrimitiveType::i32 || rhs_type != PrimitiveType::i32) &&
(!is_tensor_op || (lhs_type->cast<TensorType>()->get_element_type() !=
PrimitiveType::i32 ||
rhs_type->cast<TensorType>()->get_element_type() !=
PrimitiveType::i32)))
error();
if (is_comparison(type) || binary_is_logical(type)) {
ret_type = PrimitiveType::i32;
ret_type = make_dt(PrimitiveType::i32);
return;
}
if (is_shift_op(type) ||
Expand All @@ -240,20 +302,20 @@ void BinaryOpExpression::type_check(CompileConfig *config) {
// Try not promoting to fp64 unless necessary
if (type == BinaryOpType::atan2) {
if (lhs_type == PrimitiveType::f64 || rhs_type == PrimitiveType::f64) {
ret_type = PrimitiveType::f64;
ret_type = make_dt(PrimitiveType::f64);
} else {
ret_type = PrimitiveType::f32;
ret_type = make_dt(PrimitiveType::f32);
}
return;
}

if (type == BinaryOpType::truediv) {
auto default_fp = config->default_fp;
if (!is_real(lhs_type)) {
lhs_type = default_fp;
if (!is_real(lhs_type) || !is_real_tensor(lhs_type)) {
lhs_type = make_dt(default_fp);
}
if (!is_real(rhs_type)) {
rhs_type = default_fp;
if (!is_real(rhs_type) || !is_real_tensor(rhs_type)) {
rhs_type = make_dt(default_fp);
}
}
ret_type = promoted_type(lhs_type, rhs_type);
Expand Down
16 changes: 15 additions & 1 deletion taichi/ir/type_factory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,25 @@ static DataType to_primitive_type(DataType d) {
};
} // namespace

DataType promoted_type(DataType x, DataType y) {
DataType promoted_primitive_type(DataType x, DataType y) {
if (compare_types(to_primitive_type(x), to_primitive_type(y)))
return x;
else
return y;
}

DataType promoted_type(DataType a, DataType b) {
if (a->is<TensorType>() || b->is<TensorType>()) {
TI_ASSERT(a->is<TensorType>() && b->is<TensorType>());
auto tensor_ty_a = a->cast<TensorType>();
auto tensor_ty_b = b->cast<TensorType>();
auto promoted_dt = promoted_type(tensor_ty_a->get_element_type(),
tensor_ty_b->get_element_type());
return TypeFactory::create_tensor_type(tensor_ty_a->get_shape(),
promoted_dt);
} else {
return promoted_primitive_type(a, b);
}
};

TLANG_NAMESPACE_END
13 changes: 13 additions & 0 deletions taichi/ir/type_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,19 @@ inline TypedConstant get_min_value(DataType dt) {
}
}

#define DEFINE_TENSOR_CHECK(func_name) \
AD1024 marked this conversation as resolved.
Show resolved Hide resolved
inline bool func_name##_tensor(DataType dt) { \
if (auto tensor_type = dt->cast<TensorType>()) { \
return func_name(tensor_type->get_element_type()); \
} \
return false; \
}

DEFINE_TENSOR_CHECK(is_quant);
DEFINE_TENSOR_CHECK(is_real);
DEFINE_TENSOR_CHECK(is_integral);
DEFINE_TENSOR_CHECK(is_signed);

class BitStructTypeBuilder {
public:
explicit BitStructTypeBuilder(int max_num_bits) {
Expand Down
10 changes: 10 additions & 0 deletions taichi/python/export_lang.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -778,6 +778,16 @@ void export_lang(py::module &m) {
},
py::return_value_policy::reference)
.def("get_ret_type", &Expr::get_ret_type)
.def("is_tensor",
[](Expr *expr) { return expr->expr->ret_type->is<TensorType>(); })
.def("get_shape",
[](Expr *expr) -> std::optional<std::vector<int>> {
if (expr->expr->ret_type->is<TensorType>()) {
return std::optional<std::vector<int>>(
expr->expr->ret_type->cast<TensorType>()->get_shape());
}
return std::nullopt;
})
.def("type_check", &Expr::type_check)
.def("get_expr_name",
[](Expr *expr) { return expr->cast<FieldExpression>()->name; })
Expand Down
122 changes: 89 additions & 33 deletions taichi/transforms/demote_operations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,51 @@ class DemoteOperations : public BasicStmtVisitor {
DemoteOperations() : BasicStmtVisitor() {
}

std::unique_ptr<Stmt> demote_ifloordiv(BinaryOpStmt *stmt,
Stmt *lhs,
Stmt *rhs) {
auto ret = Stmt::make<BinaryOpStmt>(BinaryOpType::div, lhs, rhs);
auto zero = Stmt::make<ConstStmt>(TypedConstant(0));
auto lhs_ltz =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_lt, lhs, zero.get());
auto rhs_ltz =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_lt, rhs, zero.get());
auto rhs_mul_ret =
Stmt::make<BinaryOpStmt>(BinaryOpType::mul, rhs, ret.get());
auto cond1 = Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne, lhs_ltz.get(),
rhs_ltz.get());
auto cond2 =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne, lhs, zero.get());
auto cond3 =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne, rhs_mul_ret.get(), lhs);
auto cond12 = Stmt::make<BinaryOpStmt>(BinaryOpType::bit_and, cond1.get(),
cond2.get());
auto cond = Stmt::make<BinaryOpStmt>(BinaryOpType::bit_and, cond12.get(),
cond3.get());
auto real_ret =
Stmt::make<BinaryOpStmt>(BinaryOpType::add, ret.get(), cond.get());
modifier.insert_before(stmt, std::move(ret));
modifier.insert_before(stmt, std::move(zero));
modifier.insert_before(stmt, std::move(lhs_ltz));
modifier.insert_before(stmt, std::move(rhs_ltz));
modifier.insert_before(stmt, std::move(rhs_mul_ret));
modifier.insert_before(stmt, std::move(cond1));
modifier.insert_before(stmt, std::move(cond2));
modifier.insert_before(stmt, std::move(cond3));
modifier.insert_before(stmt, std::move(cond12));
modifier.insert_before(stmt, std::move(cond));
return real_ret;
}

std::unique_ptr<Stmt> demote_ffloor(BinaryOpStmt *stmt,
Stmt *lhs,
Stmt *rhs) {
auto div = Stmt::make<BinaryOpStmt>(BinaryOpType::div, lhs, rhs);
auto floor = Stmt::make<UnaryOpStmt>(UnaryOpType::floor, div.get());
modifier.insert_before(stmt, std::move(div));
return floor;
}

void visit(BitExtractStmt *stmt) override {
// @ti.func
// def bit_extract(input, begin, end):
Expand Down Expand Up @@ -64,38 +109,9 @@ class DemoteOperations : public BasicStmtVisitor {
// + + f = f (f&t)
//
// the situation of `b = 0` is ignored since we get FPE anyway.
auto ret = Stmt::make<BinaryOpStmt>(BinaryOpType::div, lhs, rhs);
auto zero = Stmt::make<ConstStmt>(TypedConstant(0));
auto lhs_ltz =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_lt, lhs, zero.get());
auto rhs_ltz =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_lt, rhs, zero.get());
auto rhs_mul_ret =
Stmt::make<BinaryOpStmt>(BinaryOpType::mul, rhs, ret.get());
auto cond1 = Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne,
lhs_ltz.get(), rhs_ltz.get());
auto cond2 =
Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne, lhs, zero.get());
auto cond3 = Stmt::make<BinaryOpStmt>(BinaryOpType::cmp_ne,
rhs_mul_ret.get(), lhs);
auto cond12 = Stmt::make<BinaryOpStmt>(BinaryOpType::bit_and,
cond1.get(), cond2.get());
auto cond = Stmt::make<BinaryOpStmt>(BinaryOpType::bit_and,
cond12.get(), cond3.get());
auto real_ret =
Stmt::make<BinaryOpStmt>(BinaryOpType::add, ret.get(), cond.get());
auto real_ret = demote_ifloordiv(stmt, lhs, rhs);
real_ret->ret_type = stmt->ret_type;
stmt->replace_usages_with(real_ret.get());
modifier.insert_before(stmt, std::move(ret));
modifier.insert_before(stmt, std::move(zero));
modifier.insert_before(stmt, std::move(lhs_ltz));
modifier.insert_before(stmt, std::move(rhs_ltz));
modifier.insert_before(stmt, std::move(rhs_mul_ret));
modifier.insert_before(stmt, std::move(cond1));
modifier.insert_before(stmt, std::move(cond2));
modifier.insert_before(stmt, std::move(cond3));
modifier.insert_before(stmt, std::move(cond12));
modifier.insert_before(stmt, std::move(cond));
modifier.insert_before(stmt, std::move(real_ret));
modifier.erase(stmt);

Expand All @@ -104,13 +120,53 @@ class DemoteOperations : public BasicStmtVisitor {
// def ffloordiv(a, b):
// r = ti.raw_div(a, b)
// return ti.floor(r)
auto div = Stmt::make<BinaryOpStmt>(BinaryOpType::div, lhs, rhs);
auto floor = Stmt::make<UnaryOpStmt>(UnaryOpType::floor, div.get());
auto floor = demote_ffloor(stmt, lhs, rhs);
floor->ret_type = stmt->ret_type;
stmt->replace_usages_with(floor.get());
modifier.insert_before(stmt, std::move(div));
modifier.insert_before(stmt, std::move(floor));
modifier.erase(stmt);
} else if (lhs->ret_type->is<TensorType>() &&
rhs->ret_type->is<TensorType>()) {
bool use_integral = is_integral_tensor(lhs->ret_type) &&
is_integral_tensor(rhs->ret_type);
std::vector<Stmt *> ret_stmts;
auto lhs_tensor_ty = lhs->ret_type->cast<TensorType>();
auto rhs_tensor_ty = rhs->ret_type->cast<TensorType>();
auto lhs_alloca = Stmt::make<AllocaStmt>(lhs_tensor_ty);
auto rhs_alloca = Stmt::make<AllocaStmt>(rhs_tensor_ty);
auto lhs_store =
Stmt::make<LocalStoreStmt>(lhs_alloca.get(), stmt->lhs);
auto rhs_store =
Stmt::make<LocalStoreStmt>(rhs_alloca.get(), stmt->rhs);
auto lhs_ptr = lhs_alloca.get();
auto rhs_ptr = rhs_alloca.get();
modifier.insert_before(stmt, std::move(lhs_alloca));
modifier.insert_before(stmt, std::move(rhs_alloca));
modifier.insert_before(stmt, std::move(lhs_store));
modifier.insert_before(stmt, std::move(rhs_store));
for (int i = 0; i < lhs_tensor_ty->get_num_elements(); i++) {
auto idx = Stmt::make<ConstStmt>(TypedConstant(i));
auto lhs_i = Stmt::make<PtrOffsetStmt>(lhs_ptr, idx.get());
auto rhs_i = Stmt::make<PtrOffsetStmt>(rhs_ptr, idx.get());
auto lhs_load = Stmt::make<LocalLoadStmt>(lhs_i.get());
auto rhs_load = Stmt::make<LocalLoadStmt>(rhs_i.get());
auto cur_lhs = lhs_load.get();
auto cur_rhs = rhs_load.get();
modifier.insert_before(stmt, std::move(idx));
modifier.insert_before(stmt, std::move(lhs_i));
modifier.insert_before(stmt, std::move(rhs_i));
modifier.insert_before(stmt, std::move(lhs_load));
modifier.insert_before(stmt, std::move(rhs_load));
auto ret_i = use_integral ? demote_ifloordiv(stmt, cur_lhs, cur_rhs)
: demote_ffloor(stmt, cur_lhs, cur_rhs);
ret_stmts.push_back(ret_i.get());
modifier.insert_before(stmt, std::move(ret_i));
}
auto new_matrix = Stmt::make<MatrixInitStmt>(ret_stmts);
new_matrix->ret_type = stmt->ret_type;
stmt->replace_usages_with(new_matrix.get());
modifier.insert_before(stmt, std::move(new_matrix));
modifier.erase(stmt);
}
} else if (stmt->op_type == BinaryOpType::bit_shr &&
is_integral(lhs->element_type()) &&
Expand Down
Loading