Skip to content

Commit

Permalink
[lang] Replaced usage of taichi.lang.core with taichi.core.util.ti_co…
Browse files Browse the repository at this point in the history
…re. (#2245)

* [misc] Deleted 3 debug messages in codegen_cc.cpp

* [ir] [transforms] [docs] Added assertion that indices won't cause overflow under debug mode. Fixed one typo in write_test docs.

* Update tests/python/test_indices_assert.py to test cpu only

Co-authored-by: Ye Kuang <[email protected]>

* [skip ci] enforce code format

* added comment

* [python] [lang] Removed taichi.lang.core.

* [python] [lang] Replaced usage of taichi.lang.core with taichi.core.util.ti_core.

* formated python code

Co-authored-by: Leon Zhang <[email protected]>
Co-authored-by: Ye Kuang <[email protected]>
Co-authored-by: Taichi Gardener <[email protected]>
  • Loading branch information
4 people authored Apr 4, 2021
1 parent b99122a commit 6a2c017
Show file tree
Hide file tree
Showing 12 changed files with 147 additions and 154 deletions.
2 changes: 1 addition & 1 deletion python/taichi/lang/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import taichi as ti

# TODO(#2223): Remove
core = taichi_lang_core
core = _ti_core

runtime = impl.get_runtime()

Expand Down
3 changes: 0 additions & 3 deletions python/taichi/lang/core.py

This file was deleted.

30 changes: 14 additions & 16 deletions python/taichi/lang/expr.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from taichi.core.util import ti_core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.core import taichi_lang_core
from taichi.lang.util import (is_taichi_class, python_scope, to_numpy_type,
to_pytorch_type)
from taichi.misc.util import deprecated
Expand All @@ -16,7 +16,7 @@ def __init__(self, *args, tb=None):
self.setter = None
self.tb = tb
if len(args) == 1:
if isinstance(args[0], taichi_lang_core.Expr):
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
Expand Down Expand Up @@ -50,8 +50,7 @@ def __setitem__(self, key, value):
if not isinstance(key, (tuple, list)):
key = (key, )
assert len(key) == len(self.shape)
key = key + ((0, ) *
(taichi_lang_core.get_max_num_indices() - len(key)))
key = key + ((0, ) * (_ti_core.get_max_num_indices() - len(key)))
self.setter(value, *key)

@python_scope
Expand All @@ -62,8 +61,7 @@ def __getitem__(self, key):
key = ()
if not isinstance(key, (tuple, list)):
key = (key, )
key = key + ((0, ) *
(taichi_lang_core.get_max_num_indices() - len(key)))
key = key + ((0, ) * (_ti_core.get_max_num_indices() - len(key)))
return self.getter(*key)

def loop_range(self):
Expand All @@ -82,29 +80,29 @@ def initialize_accessor(self):
return
snode = self.ptr.snode()

if taichi_lang_core.is_real(self.dtype):
if _ti_core.is_real(self.dtype):

def getter(*key):
assert len(key) == taichi_lang_core.get_max_num_indices()
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_float(key)

def setter(value, *key):
assert len(key) == taichi_lang_core.get_max_num_indices()
assert len(key) == _ti_core.get_max_num_indices()
snode.write_float(key, value)
else:
if taichi_lang_core.is_signed(self.dtype):
if _ti_core.is_signed(self.dtype):

def getter(*key):
assert len(key) == taichi_lang_core.get_max_num_indices()
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_int(key)
else:

def getter(*key):
assert len(key) == taichi_lang_core.get_max_num_indices()
assert len(key) == _ti_core.get_max_num_indices()
return snode.read_uint(key)

def setter(value, *key):
assert len(key) == taichi_lang_core.get_max_num_indices()
assert len(key) == _ti_core.get_max_num_indices()
snode.write_int(key, value)

self.getter = getter
Expand All @@ -130,7 +128,7 @@ def fill(self, val):

def parent(self, n=1):
p = self.snode.parent(n)
return Expr(taichi_lang_core.global_var_expr_from_snode(p.ptr))
return Expr(_ti_core.global_var_expr_from_snode(p.ptr))

def is_global(self):
return self.ptr.is_global_var() or self.ptr.is_external_var()
Expand Down Expand Up @@ -230,7 +228,7 @@ def __repr__(self):
def make_var_vector(size):
exprs = []
for _ in range(size):
exprs.append(taichi_lang_core.make_id_expr(''))
exprs.append(_ti_core.make_id_expr(''))
return ti.Vector(exprs)


Expand All @@ -242,7 +240,7 @@ def make_expr_group(*exprs):
mat = exprs[0]
assert mat.m == 1
exprs = mat.entries
expr_group = taichi_lang_core.ExprGroup()
expr_group = _ti_core.ExprGroup()
for i in exprs:
expr_group.push_back(Expr(i).ptr)
return expr_group
58 changes: 29 additions & 29 deletions python/taichi/lang/impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import warnings

import numpy as np
from taichi.core import util as cutil
from taichi.core.util import ti_core as _ti_core
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.expr import Expr, make_expr_group
from taichi.lang.snode import SNode
Expand All @@ -18,7 +18,7 @@
@taichi_scope
def expr_init(rhs):
if rhs is None:
return Expr(cutil.ti_core.expr_alloca())
return Expr(_ti_core.expr_alloca())
if is_taichi_class(rhs):
return rhs.variable()
else:
Expand All @@ -28,14 +28,14 @@ def expr_init(rhs):
return tuple(expr_init(e) for e in rhs)
elif isinstance(rhs, dict):
return dict((key, expr_init(val)) for key, val in rhs.items())
elif isinstance(rhs, cutil.ti_core.DataType):
elif isinstance(rhs, _ti_core.DataType):
return rhs
elif isinstance(rhs, ti.ndrange):
return rhs
elif hasattr(rhs, '_data_oriented'):
return rhs
else:
return Expr(cutil.ti_core.expr_var(Expr(rhs).ptr))
return Expr(_ti_core.expr_var(Expr(rhs).ptr))


@taichi_scope
Expand Down Expand Up @@ -77,7 +77,7 @@ def begin_frontend_struct_for(group, loop_range):
f'({group.size()} != {len(loop_range.shape)}). Maybe you wanted to '
'use "for I in ti.grouped(x)" to group all indices into a single vector I?'
)
cutil.ti_core.begin_frontend_struct_for(group, loop_range.ptr)
_ti_core.begin_frontend_struct_for(group, loop_range.ptr)


def begin_frontend_if(cond):
Expand All @@ -88,7 +88,7 @@ def begin_frontend_if(cond):
' if all(x == y):\n'
'or\n'
' if any(x != y):\n')
cutil.ti_core.begin_frontend_if(Expr(cond).ptr)
_ti_core.begin_frontend_if(Expr(cond).ptr)


def wrap_scalar(x):
Expand Down Expand Up @@ -138,7 +138,7 @@ def subscript(value, *indices):
raise IndexError(
f'Field with dim {field_dim} accessed with indices of dim {index_dim}'
)
return Expr(cutil.ti_core.subscript(value.ptr, indices_expr_group))
return Expr(_ti_core.subscript(value.ptr, indices_expr_group))
else:
return value[indices]

Expand Down Expand Up @@ -222,7 +222,7 @@ def set_default_ip(self, ip):

def create_program(self):
if self.prog is None:
self.prog = cutil.ti_core.Program()
self.prog = _ti_core.Program()

def materialize(self):
if self.materialized:
Expand All @@ -236,7 +236,7 @@ def layout():
func()

ti.trace('Materializing layout...')
cutil.ti_core.layout(layout)
_ti_core.layout(layout)
self.materialized = True
not_placed = []
for var in self.global_vars:
Expand Down Expand Up @@ -311,19 +311,19 @@ def make_constant_expr(val):
# the LHS, but at least this makes assigning constant to unsigned
# int work. See https://github.com/taichi-dev/taichi/issues/2060
return Expr(
cutil.ti_core.make_const_expr_i32(
_ti_core.make_const_expr_i32(
_clamp_unsigned_to_range(np.int32, val)))
elif pytaichi.default_ip in {ti.i64, ti.u64}:
return Expr(
cutil.ti_core.make_const_expr_i64(
_ti_core.make_const_expr_i64(
_clamp_unsigned_to_range(np.int64, val)))
else:
assert False
elif isinstance(val, (float, np.floating, np.ndarray)):
if pytaichi.default_fp == ti.f32:
return Expr(cutil.ti_core.make_const_expr_f32(val))
return Expr(_ti_core.make_const_expr_f32(val))
elif pytaichi.default_fp == ti.f64:
return Expr(cutil.ti_core.make_const_expr_f64(val))
return Expr(_ti_core.make_const_expr_f64(val))
else:
assert False
else:
Expand All @@ -337,7 +337,7 @@ def reset():
pytaichi = PyTaichi(old_kernels)
for k in old_kernels:
k.reset()
cutil.ti_core.reset_default_compile_config()
_ti_core.reset_default_compile_config()


@taichi_scope
Expand Down Expand Up @@ -415,16 +415,16 @@ def field(dtype, shape=None, offset=None, needs_grad=False):
del _taichi_skip_traceback

# primal
x = Expr(cutil.ti_core.make_id_expr(""))
x = Expr(_ti_core.make_id_expr(""))
x.declaration_tb = get_traceback(stacklevel=2)
x.ptr = cutil.ti_core.global_new(x.ptr, dtype)
x.ptr = _ti_core.global_new(x.ptr, dtype)
x.ptr.set_is_primal(True)
pytaichi.global_vars.append(x)

if cutil.ti_core.needs_grad(dtype):
if _ti_core.needs_grad(dtype):
# adjoint
x_grad = Expr(cutil.ti_core.make_id_expr(""))
x_grad.ptr = cutil.ti_core.global_new(x_grad.ptr, dtype)
x_grad = Expr(_ti_core.make_id_expr(""))
x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype)
x_grad.ptr.set_is_primal(False)
x.set_grad(x_grad)

Expand Down Expand Up @@ -508,14 +508,14 @@ def fused_string(entries):
entries = vars2entries(vars)
entries = fused_string(entries)
contentries = [entry2content(entry) for entry in entries]
cutil.ti_core.create_print(contentries)
_ti_core.create_print(contentries)


@taichi_scope
def ti_assert(cond, msg, extra_args):
# Mostly a wrapper to help us convert from Expr (defined in Python) to
# cutil.ti_core.Expr (defined in C++)
cutil.ti_core.create_assert_stmt(
# _ti_core.Expr (defined in C++)
_ti_core.create_assert_stmt(
Expr(cond).ptr, msg, [Expr(x).ptr for x in extra_args])


Expand Down Expand Up @@ -550,16 +550,16 @@ def one(x):

@taichi_scope
def get_external_tensor_dim(var):
return cutil.ti_core.get_external_tensor_dim(var)
return _ti_core.get_external_tensor_dim(var)


@taichi_scope
def get_external_tensor_shape_along_axis(var, i):
return cutil.ti_core.get_external_tensor_shape_along_axis(var, i)
return _ti_core.get_external_tensor_shape_along_axis(var, i)


def indices(*x):
return [cutil.ti_core.Index(i) for i in x]
return [_ti_core.Index(i) for i in x]


index = indices
Expand Down Expand Up @@ -593,16 +593,16 @@ def grouped(x):


def stop_grad(x):
cutil.ti_core.stop_grad(x.snode.ptr)
_ti_core.stop_grad(x.snode.ptr)


def current_cfg():
return cutil.ti_core.current_compile_config()
return _ti_core.current_compile_config()


def default_cfg():
return cutil.ti_core.default_compile_config()
return _ti_core.default_compile_config()


def call_internal(name):
cutil.ti_core.create_internal_func_stmt(name)
_ti_core.create_internal_func_stmt(name)
14 changes: 7 additions & 7 deletions python/taichi/lang/kernel_arguments.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from taichi.lang.core import taichi_lang_core
from taichi.core.util import ti_core as _ti_core
from taichi.lang.expr import Expr
from taichi.lang.snode import SNode
from taichi.lang.util import cook_dtype, to_taichi_type
Expand All @@ -25,7 +25,7 @@ def extract(self, x):
return x.ptr
if isinstance(x, Expr):
return x.ptr.get_underlying_ptr_address()
if isinstance(x, taichi_lang_core.Expr):
if isinstance(x, _ti_core.Expr):
return x.get_underlying_ptr_address()
if isinstance(x, tuple):
return tuple(self.extract(item) for item in x)
Expand All @@ -37,16 +37,16 @@ def extract(self, x):

def decl_scalar_arg(dtype):
dtype = cook_dtype(dtype)
arg_id = taichi_lang_core.decl_arg(dtype, False)
return Expr(taichi_lang_core.make_arg_load_expr(arg_id, dtype))
arg_id = _ti_core.decl_arg(dtype, False)
return Expr(_ti_core.make_arg_load_expr(arg_id, dtype))


def decl_ext_arr_arg(dtype, dim):
dtype = cook_dtype(dtype)
arg_id = taichi_lang_core.decl_arg(dtype, True)
return Expr(taichi_lang_core.make_external_tensor_expr(dtype, dim, arg_id))
arg_id = _ti_core.decl_arg(dtype, True)
return Expr(_ti_core.make_external_tensor_expr(dtype, dim, arg_id))


def decl_scalar_ret(dtype):
dtype = cook_dtype(dtype)
return taichi_lang_core.decl_ret(dtype)
return _ti_core.decl_ret(dtype)
11 changes: 5 additions & 6 deletions python/taichi/lang/kernel_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from taichi.core import primitive_types
from taichi.lang import impl, util
from taichi.lang.ast_checker import KernelSimplicityASTChecker
from taichi.lang.core import taichi_lang_core
from taichi.core.util import ti_core as _ti_core
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.kernel_arguments import ext_arr, template
from taichi.lang.shell import _shell_pop_print, oinspect
Expand Down Expand Up @@ -352,8 +352,7 @@ def materialize(self, key=None, args=None, arg_features=None):
mode='exec'), global_vars, local_vars)
compiled = local_vars[self.func.__name__]

taichi_kernel = taichi_lang_core.create_kernel(kernel_name,
self.is_grad)
taichi_kernel = _ti_core.create_kernel(kernel_name, self.is_grad)

# Do not change the name of 'taichi_ast_generator'
# The warning system needs this identifier to remove unnecessary messages
Expand Down Expand Up @@ -426,22 +425,22 @@ def call_back():

if str(v.device).startswith('cuda'):
# External tensor on cuda
if taichi_arch != taichi_lang_core.Arch.cuda:
if taichi_arch != _ti_core.Arch.cuda:
# copy data back to cpu
host_v = v.to(device='cpu', copy=True)
tmp = host_v
callbacks.append(get_call_back(v, host_v))
else:
# External tensor on cpu
if taichi_arch == taichi_lang_core.Arch.cuda:
if taichi_arch == _ti_core.Arch.cuda:
gpu_v = v.cuda()
tmp = gpu_v
callbacks.append(get_call_back(v, gpu_v))
launch_ctx.set_arg_nparray(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement())
shape = v.shape
max_num_indices = taichi_lang_core.get_max_num_indices()
max_num_indices = _ti_core.get_max_num_indices()
assert len(
shape
) <= max_num_indices, "External array cannot have > {} indices".format(
Expand Down
Loading

0 comments on commit 6a2c017

Please sign in to comment.