From a3170caff1c673a8424adaab2103c4059cf3c3f3 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Fri, 30 Jul 2021 11:29:50 +0800 Subject: [PATCH 01/10] Add Field class --- python/taichi/lang/expr.py | 5 +--- python/taichi/lang/field.py | 55 +++++++++++++++++++++++++++++++++++++ python/taichi/lang/impl.py | 6 ++-- python/taichi/lang/snode.py | 8 +++--- 4 files changed, 64 insertions(+), 10 deletions(-) create mode 100644 python/taichi/lang/field.py diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index 2714be7f912e3..e215186ee942e 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -176,10 +176,7 @@ def is_global(self): """ return self.ptr.is_global_var() or self.ptr.is_external_var() - @property - def snode(self): - from taichi.lang.snode import SNode - return SNode(self.ptr.snode()) + def __hash__(self): return self.ptr.get_raw_address() diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py new file mode 100644 index 0000000000000..e8c2d76017755 --- /dev/null +++ b/python/taichi/lang/field.py @@ -0,0 +1,55 @@ +from functools import reduce +from operator import mul + +class Field: + """Taichi field abstract class.""" + + @property + def shape(self): + raise Exception("Abstract Field class should not be directly used") + + @property + def dtype(self): + raise Exception("Abstract Field class should not be directly used") + + @property + def tensor_shape(self): + raise Exception("Abstract Field class should not be directly used") + + @property + def is_tensor(self): + return len(self.tensor_shape) > 0 + + +class SNodeField(Field): + """Taichi field with SNode implementation. + + Args: + vars (List[Expr]): Field members wrapping corresponding C++ GlobalVariableExpressions. + tensor_shape (tuple): Tensor shape, () if scalar. + """ + def __init__(self, vars, tensor_shape): + assert len(vars) == reduce(mul, tensor_shape, 1), "Tensor shape doesn't match number of vars" + assert len(tensor_shape) <= 2, "Only scalars, vectors and matrices are supported" + self.vars = vars + self.tshape = tensor_shape + + @property + def shape(self): + raise self.snode.shape + + @property + def dtype(self): + return self.snode.dtype + + @property + def tensor_shape(self): + return self.tshape + + @property + def snode(self): + from taichi.lang.snode import SNode + return SNode(self.vars[0].ptr.snode()) + + def get_field_members(self): + return self.vars diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index d59ebc11f0bb7..5781f347de6db 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -6,6 +6,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang.exception import InvalidOperationError, TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.field import SNodeField from taichi.lang.snode import SNode from taichi.lang.tape import TapeImpl from taichi.lang.util import (cook_dtype, is_taichi_class, python_scope, @@ -439,7 +440,7 @@ def var(dt, shape=None, offset=None, needs_grad=False): @python_scope -def field(dtype, shape=None, name="", offset=None, needs_grad=False): +def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=True): _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) @@ -460,6 +461,7 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False): del _taichi_skip_traceback + assert use_snode, "Only SNode Field is supported now" # primal x = Expr(_ti_core.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=2) @@ -481,7 +483,7 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False): root.dense(index_nd(dim), shape).place(x, offset=offset) if needs_grad: root.dense(index_nd(dim), shape).place(x.grad) - return x + return SNodeField([x], ()) class Layout: diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index cba2523379f81..3994621d6025e 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -7,6 +7,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.expr import Expr +from taichi.lang.field import SNodeField from taichi.lang.util import is_taichi_class from taichi.misc.util import deprecated @@ -134,13 +135,12 @@ def place(self, *args, offset=None, shared_exponent=False): self.ptr.begin_shared_exp_placement() for arg in args: - if isinstance(arg, Expr): - self.ptr.place(Expr(arg).ptr, offset) + if isinstance(arg, SNodeField): + for var in arg.get_field_members(): + self.ptr.place(var.ptr, offset) elif isinstance(arg, list): for x in arg: self.place(x, offset=offset) - elif is_taichi_class(arg): - self.place(arg.get_field_members(), offset=offset) else: raise ValueError(f'{arg} cannot be placed') if shared_exponent: From e7a41c7561ec5fc3dccf59f92fb594c0586fe696 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Mon, 2 Aug 2021 10:41:49 +0800 Subject: [PATCH 02/10] Finish scalar field --- python/taichi/lang/expr.py | 127 ------------------------ python/taichi/lang/field.py | 127 +++++++++++++++++++++++- python/taichi/lang/impl.py | 39 +++++--- python/taichi/lang/snode.py | 28 ++---- python/taichi/lang/stmt_builder.py | 6 +- tests/python/test_struct_for_non_pot.py | 4 +- 6 files changed, 160 insertions(+), 171 deletions(-) diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index e215186ee942e..e09f82612b49c 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -3,7 +3,6 @@ from taichi.lang.common_ops import TaichiOperations from taichi.lang.util import (is_taichi_class, python_scope, to_numpy_type, to_pytorch_type) -from taichi.misc.util import deprecated import taichi as ti @@ -13,8 +12,6 @@ class Expr(TaichiOperations): """A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class.""" def __init__(self, *args, tb=None): _taichi_skip_traceback = 1 - self.getter = None - self.setter = None self.tb = tb if len(args) == 1: if isinstance(args[0], _ti_core.Expr): @@ -42,99 +39,6 @@ def __init__(self, *args, tb=None): self.grad = None self.val = self - @python_scope - def __setitem__(self, key, value): - """Set value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This will not be directly called from python for vector/matrix fields. - Python Matrix class will decompose operations into scalar-level first. - - Args: - key (Union[List[int], int, None]): indices to set - value (Union[int, float]): value to set - """ - impl.get_runtime().materialize() - self.initialize_accessor() - if key is None: - key = () - if not isinstance(key, (tuple, list)): - key = (key, ) - assert len(key) == len(self.shape) - key = key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) - self.setter(value, *key) - - @python_scope - def __getitem__(self, key): - """Get value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This will not be directly called from python for vector/matrix fields. - Python Matrix class will decompose operations into scalar-level first. - - Args: - key (Union[List[int], int, None]): indices to get. - - Returns: - Value retrieved with specified key. - """ - impl.get_runtime().materialize() - self.initialize_accessor() - if key is None: - key = () - if not isinstance(key, (tuple, list)): - key = (key, ) - key = key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) - return self.getter(*key) - - def loop_range(self): - return self - - def get_field_members(self): - """Get a list of involving fields when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.get_field_members`. - - Returns: - A list containing itself. - """ - return [self] - - @deprecated('x.get_tensor_members()', 'x.get_field_members()') - def get_tensor_members(self): - return self.get_field_members() - - @python_scope - def initialize_accessor(self): - if self.getter: - return - snode = self.ptr.snode() - - if _ti_core.is_real(self.dtype): - - def getter(*key): - assert len(key) == _ti_core.get_max_num_indices() - return snode.read_float(key) - - def setter(value, *key): - assert len(key) == _ti_core.get_max_num_indices() - snode.write_float(key, value) - else: - if _ti_core.is_signed(self.dtype): - - def getter(*key): - assert len(key) == _ti_core.get_max_num_indices() - return snode.read_int(key) - else: - - def getter(*key): - assert len(key) == _ti_core.get_max_num_indices() - return snode.read_uint(key) - - def setter(value, *key): - assert len(key) == _ti_core.get_max_num_indices() - snode.write_int(key, value) - - self.getter = getter - self.setter = setter @python_scope def set_grad(self, grad): @@ -154,19 +58,7 @@ def fill(self, val): from taichi.lang.meta import fill_tensor fill_tensor(self, val) - def parent(self, n=1): - '''Create another Expr instance which represents one of the ancestors in SNode tree. - - The class it self must represent GlobalVariableExpression (field) internally. - - Args: - n (int): levels of the target ancestor higher than the current field's snode - Returns: - An Expr instance which represents the target SNode ancestor internally. - ''' - p = self.snode.parent(n) - return Expr(_ti_core.global_var_expr_from_snode(p.ptr)) def is_global(self): """Check whether the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. @@ -176,8 +68,6 @@ def is_global(self): """ return self.ptr.is_global_var() or self.ptr.is_external_var() - - def __hash__(self): return self.ptr.get_raw_address() @@ -201,23 +91,6 @@ def shape(self): return ret return self.snode.shape - @deprecated('x.dim()', 'len(x.shape)') - def dim(self): - return len(self.shape) - - @property - def dtype(self): - """The type of inside elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - Returns: - The type of inside elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - """ - return self.snode.dtype - - @deprecated('x.data_type()', 'x.dtype') - def data_type(self): - return self.snode.dtype - @python_scope def to_numpy(self): """Create a numpy array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index e8c2d76017755..c18872107e6a6 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -1,9 +1,12 @@ from functools import reduce from operator import mul +from taichi.core.util import ti_core as _ti_core +from taichi.lang import impl +from taichi.lang.util import python_scope + class Field: """Taichi field abstract class.""" - @property def shape(self): raise Exception("Abstract Field class should not be directly used") @@ -24,32 +27,148 @@ def is_tensor(self): class SNodeField(Field): """Taichi field with SNode implementation. + Each field element is a scalar, a vector, or a matrix. + A scalar field has 1 field member. A 3x3 matrix field has 9 field members. + A field member is a Python Expr wrapping a C++ GlobalVariableExpression. + A C++ GlobalVariableExpression wraps the corresponding SNode. + Args: - vars (List[Expr]): Field members wrapping corresponding C++ GlobalVariableExpressions. - tensor_shape (tuple): Tensor shape, () if scalar. + vars (List[Expr]): Field members. + tensor_shape (Tuple[Int]): Tensor shape of each field element, () if scalar. """ def __init__(self, vars, tensor_shape): assert len(vars) == reduce(mul, tensor_shape, 1), "Tensor shape doesn't match number of vars" assert len(tensor_shape) <= 2, "Only scalars, vectors and matrices are supported" self.vars = vars self.tshape = tensor_shape + self.getter = None + self.setter = None @property def shape(self): - raise self.snode.shape + """Gets field shape. + + Returns: + Tuple[Int]: Field shape. + """ + return self.snode.shape @property def dtype(self): + """Gets data type of each individual value. + + Returns: + DataType: Data type of each individual value. + """ return self.snode.dtype @property def tensor_shape(self): + """Gets tensor shape of each field element. + + Returns: + Tuple[Int]: Tensor shape of each field element, () if scalar. + """ return self.tshape @property def snode(self): + """Gets representative SNode for info purposes. + + Returns: + SNode: Representative SNode (SNode of first field member). + """ from taichi.lang.snode import SNode return SNode(self.vars[0].ptr.snode()) + def parent(self, n=1): + '''XY: To be fixed: + Create another Expr instance which represents one of the ancestors in SNode tree. + + The class it self must represent GlobalVariableExpression (field) internally. + + Args: + n (int): levels of the target ancestor higher than the current field's snode + + Returns: + An Expr instance which represents the target SNode ancestor internally. + ''' + return self.snode.parent(n) + def get_field_members(self): + """Gets field members. + + Returns: + List[Expr]: Field members. + """ return self.vars + + @python_scope + def __setitem__(self, key, value): + """XY: To be fixed: + Set value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. + + This will not be directly called from python for vector/matrix fields. + Python Matrix class will decompose operations into scalar-level first. + + Args: + key (Union[List[int], int, None]): indices to set + value (Union[int, float]): value to set + """ + self.initialize_accessor() + self.setter(value, *self.pad_key(key)) + + @python_scope + def __getitem__(self, key): + """XY: to fix + Get value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. + + This will not be directly called from python for vector/matrix fields. + Python Matrix class will decompose operations into scalar-level first. + + Args: + key (Union[List[int], int, None]): indices to get. + + Returns: + Value retrieved with specified key. + """ + self.initialize_accessor() + return self.getter(*self.pad_key(key)) + + @python_scope + def pad_key(self, key): + if key is None: + key = () + if not isinstance(key, (tuple, list)): + key = (key, ) + return key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) + + @python_scope + def initialize_accessor(self): + if self.getter: + return + impl.get_runtime().materialize() + snode = self.snode.ptr + if _ti_core.is_real(self.dtype): + def getter(*key): + assert len(key) == _ti_core.get_max_num_indices() + return snode.read_float(key) + + def setter(value, *key): + assert len(key) == _ti_core.get_max_num_indices() + snode.write_float(key, value) + else: + if _ti_core.is_signed(self.dtype): + def getter(*key): + assert len(key) == _ti_core.get_max_num_indices() + return snode.read_int(key) + else: + def getter(*key): + assert len(key) == _ti_core.get_max_num_indices() + return snode.read_uint(key) + + def setter(value, *key): + assert len(key) == _ti_core.get_max_num_indices() + snode.write_int(key, value) + self.getter = getter + self.setter = setter diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index c16e0f9f8e15d..6eeee3c4c37e6 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -72,16 +72,21 @@ def expr_init_func( return expr_init(rhs) -def begin_frontend_struct_for(group, loop_range): - if not isinstance(loop_range, Expr) or not loop_range.is_global(): - raise TypeError('Can only iterate through global variables/fields') - if group.size() != len(loop_range.shape): +def begin_frontend_struct_for(group, loop_var): +# if not isinstance(loop_range, Expr) or not loop_range.is_global(): +# raise TypeError('Can only iterate through global variables/fields') + if group.size() != len(loop_var.shape): raise IndexError( 'Number of struct-for indices does not match loop variable dimensionality ' - f'({group.size()} != {len(loop_range.shape)}). Maybe you wanted to ' + f'({group.size()} != {len(loop_var.shape)}). Maybe you wanted to ' 'use "for I in ti.grouped(x)" to group all indices into a single vector I?' ) - _ti_core.begin_frontend_struct_for(group, loop_range.ptr) + if isinstance(loop_var, SNodeField): + _ti_core.begin_frontend_struct_for(group, loop_var.get_field_members()[0].ptr) + elif isinstance(loop_var, SNode): + _ti_core.begin_frontend_struct_for(group, _ti_core.global_var_expr_from_snode(loop_var.ptr)) + else: + raise Exception('Non-supported struct for') def begin_frontend_if(cond): @@ -121,7 +126,12 @@ def subscript(value, *indices): flattened_indices += ind indices = tuple(flattened_indices) - if is_taichi_class(value): + if isinstance(value, SNodeField): # XY: Not complete, see below + if isinstance(indices, tuple) and len(indices) == 1 and indices[0] is None: + indices = () + indices_expr_group = make_expr_group(*indices) + return Expr(_ti_core.subscript(value.get_field_members()[0].ptr, indices_expr_group)) + elif is_taichi_class(value): return value.subscript(*indices) elif isinstance(value, (Expr, SNode)): if isinstance(value, Expr): @@ -414,10 +424,6 @@ def parent(self, n=1): """Same as :func:`taichi.SNode.parent`""" return _root_fb.root.parent(n) - def loop_range(self, n=1): - """Same as :func:`taichi.SNode.loop_range`""" - return _root_fb.root.loop_range() - def get_children(self): """Same as :func:`taichi.SNode.get_children`""" return _root_fb.root.get_children() @@ -509,20 +515,23 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=T x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) + x_grad = None if _ti_core.needs_grad(dtype): # adjoint x_grad = Expr(_ti_core.make_id_expr("")) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) - x.set_grad(x_grad) +# x.set_grad(x_grad) + x.ptr.set_grad(x_grad.ptr) + x = SNodeField([x], ()) if shape is not None: dim = len(shape) root.dense(index_nd(dim), shape).place(x, offset=offset) - if needs_grad: - root.dense(index_nd(dim), shape).place(x.grad) - return SNodeField([x], ()) + if needs_grad and x_grad is not None: + root.dense(index_nd(dim), shape).place(SNodeField([x_grad], ())) + return x class Layout: diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index f51fcd8f012e5..cbe6fd36cd4dd 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -238,14 +238,6 @@ def __call__(self): def get_shape(self, i): return self.shape[i] - def loop_range(self): - """Wraps `self` into an :class:`~taichi.lang.Expr` to serve as loop range. - - Returns: - Expr: The wrapped result. - """ - return Expr(_ti_core.global_var_expr_from_snode(self.ptr)) - @property def name(self): """Gets the name of `self`. @@ -255,18 +247,14 @@ def name(self): """ return self.ptr.name() - @deprecated('x.snode()', 'x.snode') - def __call__(self): # TODO: remove this after v0.7.0 - return self - - @property - def snode(self): - """Gets `self`. - - Returns: - SNode: `self`. - """ - return self +# @property +# def snode(self): +# """Gets `self`. +# +# Returns: +# SNode: `self`. +# """ +# return self @property def needs_grad(self): diff --git a/python/taichi/lang/stmt_builder.py b/python/taichi/lang/stmt_builder.py index 8fe09a22b96ea..bcd99106f5ba6 100644 --- a/python/taichi/lang/stmt_builder.py +++ b/python/taichi/lang/stmt_builder.py @@ -432,9 +432,9 @@ def build_struct_for(ctx, node, is_grouped): template = ''' if 1: ___loop_var = 0 - {} = ti.lang.expr.make_var_vector(size=len(___loop_var.loop_range().shape)) + {} = ti.lang.expr.make_var_vector(size=len(___loop_var.shape)) ___expr_group = ti.lang.expr.make_expr_group({}) - ti.begin_frontend_struct_for(___expr_group, ___loop_var.loop_range()) + ti.begin_frontend_struct_for(___expr_group, ___loop_var) ti.core.end_frontend_range_for() '''.format(vars, vars) t = ast.parse(template).body[0] @@ -447,7 +447,7 @@ def build_struct_for(ctx, node, is_grouped): {} ___loop_var = 0 ___expr_group = ti.lang.expr.make_expr_group({}) - ti.begin_frontend_struct_for(___expr_group, ___loop_var.loop_range()) + ti.begin_frontend_struct_for(___expr_group, ___loop_var) ti.core.end_frontend_range_for() '''.format(var_decl, vars) t = ast.parse(template).body[0] diff --git a/tests/python/test_struct_for_non_pot.py b/tests/python/test_struct_for_non_pot.py index a4516297c3de9..5426c1fed4e1f 100644 --- a/tests/python/test_struct_for_non_pot.py +++ b/tests/python/test_struct_for_non_pot.py @@ -13,7 +13,7 @@ def _test_1d(): @ti.kernel def accumulate(): for i in x: - ti.atomic_add(sum, i) + ti.atomic_add(sum[None], i) accumulate() @@ -44,7 +44,7 @@ def _test_2d(): @ti.kernel def accumulate(): for i, j in x: - ti.atomic_add(sum, i + j * 2) + ti.atomic_add(sum[None], i + j * 2) gt = 0 for i in range(n): From 34f908be956f3bc81ac8624f05ad62f77bd54757 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Tue, 3 Aug 2021 14:14:21 +0800 Subject: [PATCH 03/10] Finish Matrix field --- python/taichi/lang/expr.py | 102 +------- python/taichi/lang/field.py | 206 +++++++++++++++-- python/taichi/lang/impl.py | 54 +++-- python/taichi/lang/matrix.py | 309 ++++++------------------- python/taichi/lang/meta.py | 5 +- python/taichi/lang/snode.py | 16 +- tests/python/test_fill.py | 2 +- tests/python/test_tensor_reflection.py | 27 --- 8 files changed, 307 insertions(+), 414 deletions(-) diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index e09f82612b49c..0f3d2f4d684c8 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -1,8 +1,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.common_ops import TaichiOperations -from taichi.lang.util import (is_taichi_class, python_scope, to_numpy_type, - to_pytorch_type) +from taichi.lang.util import (is_taichi_class, python_scope) import taichi as ti @@ -36,29 +35,6 @@ def __init__(self, *args, tb=None): assert False if self.tb: self.ptr.set_tb(self.tb) - self.grad = None - self.val = self - - - @python_scope - def set_grad(self, grad): - self.grad = grad - self.ptr.set_grad(grad.ptr) - - @python_scope - def fill(self, val): - """Fill the whole field with value `val` when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.fill`. - - Args: - val (Union[int, float]): value to fill - """ - # TODO: avoid too many template instantiations - from taichi.lang.meta import fill_tensor - fill_tensor(self, val) - - def is_global(self): """Check whether the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. @@ -91,82 +67,6 @@ def shape(self): return ret return self.snode.shape - @python_scope - def to_numpy(self): - """Create a numpy array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.to_numpy`. - - Returns: - The numpy array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - """ - import numpy as np - from taichi.lang.meta import tensor_to_ext_arr - arr = np.zeros(shape=self.shape, dtype=to_numpy_type(self.dtype)) - tensor_to_ext_arr(self, arr) - ti.sync() - return arr - - @python_scope - def to_torch(self, device=None): - """Create a torch array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.to_torch`. - - Args: - device (DeviceType): The device type as a parameter passed into torch.zeros(). - - Returns: - The torch array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - """ - import torch - from taichi.lang.meta import tensor_to_ext_arr - arr = torch.zeros(size=self.shape, - dtype=to_pytorch_type(self.dtype), - device=device) - tensor_to_ext_arr(self, arr) - ti.sync() - return arr - - @python_scope - def from_numpy(self, arr): - """Load all elements from a numpy array when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.from_numpy`. - The numpy array's shape need to be the same as the internal data structure. - - Args: - arr (NumpyArray): The numpy array containing the elements to load. - """ - assert len(self.shape) == len(arr.shape) - s = self.shape - for i in range(len(self.shape)): - assert s[i] == arr.shape[i] - from taichi.lang.meta import ext_arr_to_tensor - if hasattr(arr, 'contiguous'): - arr = arr.contiguous() - ext_arr_to_tensor(arr, self) - ti.sync() - - @python_scope - def from_torch(self, arr): - """Load all elements from a torch array when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This is an unified interface to match :func:`taichi.lang.Matrix.from_torch`. - The torch array's shape need to be the same as the internal data structure. - - Args: - arr (TorchArray): The torch array containing the elements to load. - """ - self.from_numpy(arr.contiguous()) - - @python_scope - def copy_from(self, other): - assert isinstance(other, Expr) - from taichi.lang.meta import tensor_to_tensor - assert len(self.shape) == len(other.shape) - tensor_to_tensor(self, other) - def __str__(self): """Python scope field print support.""" if impl.inside_kernel(): diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index c18872107e6a6..2623ed03eb8fa 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -2,7 +2,9 @@ from operator import mul from taichi.core.util import ti_core as _ti_core from taichi.lang import impl -from taichi.lang.util import python_scope +from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type +from taichi.misc.util import warning +import taichi as ti class Field: @@ -38,11 +40,11 @@ class SNodeField(Field): """ def __init__(self, vars, tensor_shape): assert len(vars) == reduce(mul, tensor_shape, 1), "Tensor shape doesn't match number of vars" - assert len(tensor_shape) <= 2, "Only scalars, vectors and matrices are supported" + assert len(tensor_shape) in [0, 2], "Only scalars, vectors and matrices are supported" self.vars = vars self.tshape = tensor_shape - self.getter = None - self.setter = None + self.host_accessors = None + self.grad = None @property def shape(self): @@ -71,6 +73,15 @@ def tensor_shape(self): """ return self.tshape + @property + def name(self): + """Gets field name. + + Returns: + str: Field name. + """ + return self.snode.name + @property def snode(self): """Gets representative SNode for info purposes. @@ -103,6 +114,148 @@ def get_field_members(self): """ return self.vars + @python_scope + def set_grad(self, grad): + self.grad = grad + + @property + def n(self): + assert self.is_tensor + return self.tensor_shape[0] + + @property + def m(self): + assert self.is_tensor + return self.tensor_shape[1] + + @python_scope + def fill(self, val): + """Fills the whole field with a specific value. + + Args: + val (Union[int, float]): Value to fill. + """ + # TODO: avoid too many template instantiations + from taichi.lang.meta import fill_tensor + fill_tensor(self, val) + + @python_scope + def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): + """Converts the taichi field to a numpy array. + + Args: + keep_dims (bool, optional): Whether to keep the dimension after conversion. + When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. + When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. + For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. + as_vector (bool, deprecated): Whether to make the returned numpy array as a vector, i.e., with shape (n,) rather than (n, 1). + Note that this argument has been deprecated. + More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. + dtype (DataType, optional): The desired data type of returned numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + if as_vector is not None: + warning( + 'v.to_numpy(as_vector=True) is deprecated, ' + 'please use v.to_numpy() directly instead', + DeprecationWarning, + stacklevel=3) + if dtype is None: + dtype = to_numpy_type(self.dtype) + import numpy as np + if self.is_tensor: + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + arr = np.zeros(self.shape + shape_ext, dtype=dtype) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + else: + from taichi.lang.meta import tensor_to_ext_arr + arr = np.zeros(shape=self.shape, dtype=dtype) + tensor_to_ext_arr(self, arr) + ti.sync() + return arr + + @python_scope + def to_torch(self, device=None, keep_dims=False): + """Converts the taichi field to a torch tensor. + + Args: + device (torch.device, optional): The desired device of returned tensor. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + See :meth:`~taichi.lang.field.Field.to_numpy` for more detailed explanation. + + Returns: + torch.tensor: The result torch tensor. + """ + import torch + if self.is_tensor: + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + arr = torch.empty(self.shape + shape_ext, + dtype=to_pytorch_type(self.dtype), + device=device) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + else: + arr = torch.zeros(size=self.shape, + dtype=to_pytorch_type(self.dtype), + device=device) + from taichi.lang.meta import tensor_to_ext_arr + tensor_to_ext_arr(self, arr) + ti.sync() + return arr + + @python_scope + def from_numpy(self, arr): + """Loads all elements from a numpy array. + + The shape of the numpy array needs to be the same as the internal data structure. + + Args: + arr (numpy.ndarray): The source numpy array. + """ + if self.is_tensor: + if len(arr.shape) == len(self.shape) + 1: + as_vector = True + assert self.m == 1, "This is not a vector field" + else: + as_vector = False + assert len(arr.shape) == len(self.shape) + 2 + dim_ext = 1 if as_vector else 2 + assert len(arr.shape) == len(self.shape) + dim_ext + from taichi.lang.meta import ext_arr_to_matrix + ext_arr_to_matrix(arr, self, as_vector) + else: + assert len(self.shape) == len(arr.shape) + for i in range(len(self.shape)): + assert self.shape[i] == arr.shape[i] + from taichi.lang.meta import ext_arr_to_tensor + if hasattr(arr, 'contiguous'): + arr = arr.contiguous() + ext_arr_to_tensor(arr, self) + ti.sync() + + @python_scope + def from_torch(self, arr): + """Loads all elements from a torch tensor. + + The shape of the torch tensor needs to be the same as the internal data structure. + + Args: + arr (torch.tensor): The source torch tensor. + """ + self.from_numpy(arr.contiguous()) + + @python_scope + def copy_from(self, other): + assert isinstance(other, SNodeField) + from taichi.lang.meta import tensor_to_tensor + assert len(self.shape) == len(other.shape) + tensor_to_tensor(self, other) + @python_scope def __setitem__(self, key, value): """XY: To be fixed: @@ -115,8 +268,17 @@ def __setitem__(self, key, value): key (Union[List[int], int, None]): indices to set value (Union[int, float]): value to set """ - self.initialize_accessor() - self.setter(value, *self.pad_key(key)) + self.initialize_host_accessors() + if self.is_tensor: + if not isinstance(value, (list, tuple)): + value = list(value) + if not isinstance(value[0], (list, tuple)): + value = [[i] for i in value] + for i in range(self.n): + for j in range(self.m): + self[key][i, j] = value[i][j] + else: + self.host_accessors[0].setter(value, *self.pad_key(key)) @python_scope def __getitem__(self, key): @@ -132,11 +294,15 @@ def __getitem__(self, key): Returns: Value retrieved with specified key. """ - self.initialize_accessor() - return self.getter(*self.pad_key(key)) + self.initialize_host_accessors() + key = self.pad_key(key) + if self.is_tensor: + return ti.Matrix.with_entries(*self.tensor_shape, [SNodeFieldHostAccess(e, key) for e in self.host_accessors]) + else: + return self.host_accessors[0].getter(*key) - @python_scope - def pad_key(self, key): + @classmethod + def pad_key(cls, key): if key is None: key = () if not isinstance(key, (tuple, list)): @@ -144,12 +310,16 @@ def pad_key(self, key): return key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) @python_scope - def initialize_accessor(self): - if self.getter: + def initialize_host_accessors(self): + if self.host_accessors: return impl.get_runtime().materialize() - snode = self.snode.ptr - if _ti_core.is_real(self.dtype): + self.host_accessors = [SNodeFieldHostAccessor(e.ptr.snode()) for e in self.vars] + + +class SNodeFieldHostAccessor: + def __init__(self, snode): + if _ti_core.is_real(snode.data_type()): def getter(*key): assert len(key) == _ti_core.get_max_num_indices() return snode.read_float(key) @@ -158,7 +328,7 @@ def setter(value, *key): assert len(key) == _ti_core.get_max_num_indices() snode.write_float(key, value) else: - if _ti_core.is_signed(self.dtype): + if _ti_core.is_signed(snode.data_type()): def getter(*key): assert len(key) == _ti_core.get_max_num_indices() return snode.read_int(key) @@ -172,3 +342,9 @@ def setter(value, *key): snode.write_int(key, value) self.getter = getter self.setter = setter + + +class SNodeFieldHostAccess: + def __init__(self, accessor, key): + self.accessor = accessor + self.key = key diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 1174e43333c5b..2034a26daff4f 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -130,7 +130,10 @@ def subscript(value, *indices): if isinstance(indices, tuple) and len(indices) == 1 and indices[0] is None: indices = () indices_expr_group = make_expr_group(*indices) - return Expr(_ti_core.subscript(value.get_field_members()[0].ptr, indices_expr_group)) + if value.is_tensor: + return ti.Matrix.with_entries(*value.tensor_shape, [Expr(_ti_core.subscript(e.ptr, indices_expr_group)) for e in value.get_field_members()]) + else: + return Expr(_ti_core.subscript(value.get_field_members()[0].ptr, indices_expr_group)) elif is_taichi_class(value): return value.subscript(*indices) elif isinstance(value, (Expr, SNode)): @@ -457,6 +460,28 @@ def __repr__(self): >>> ti.root.pointer(ti.ij, 4).dense(ti.ij, 8).place(x) """ +@python_scope +def create_field_member(dtype, name): + dtype = cook_dtype(dtype) + + # primal + x = Expr(_ti_core.make_id_expr("")) + x.declaration_tb = get_traceback(stacklevel=2) + x.ptr = _ti_core.global_new(x.ptr, dtype) + x.ptr.set_name(name) + x.ptr.set_is_primal(True) + pytaichi.global_vars.append(x) + + x_grad = None + if _ti_core.needs_grad(dtype): + # adjoint + x_grad = Expr(_ti_core.make_id_expr("")) + x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) + x_grad.ptr.set_name(name + ".grad") + x_grad.ptr.set_is_primal(False) + x.ptr.set_grad(x_grad.ptr) + + return x, x_grad @deprecated('ti.var', 'ti.field') def var(dt, shape=None, offset=None, needs_grad=False): @@ -495,8 +520,6 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=T """ _taichi_skip_traceback = 1 - dtype = cook_dtype(dtype) - if isinstance(shape, numbers.Number): shape = (shape, ) @@ -514,30 +537,15 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=T del _taichi_skip_traceback assert use_snode, "Only SNode Field is supported now" - # primal - x = Expr(_ti_core.make_id_expr("")) - x.declaration_tb = get_traceback(stacklevel=2) - x.ptr = _ti_core.global_new(x.ptr, dtype) - x.ptr.set_name(name) - x.ptr.set_is_primal(True) - pytaichi.global_vars.append(x) - - x_grad = None - if _ti_core.needs_grad(dtype): - # adjoint - x_grad = Expr(_ti_core.make_id_expr("")) - x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) - x_grad.ptr.set_name(name + ".grad") - x_grad.ptr.set_is_primal(False) -# x.set_grad(x_grad) - x.ptr.set_grad(x_grad.ptr) + x, x_grad = create_field_member(dtype, name) + x, x_grad = SNodeField([x], ()), SNodeField([x_grad], ()) + x.set_grad(x_grad) - x = SNodeField([x], ()) if shape is not None: dim = len(shape) root.dense(index_nd(dim), shape).place(x, offset=offset) - if needs_grad and x_grad is not None: - root.dense(index_nd(dim), shape).place(SNodeField([x_grad], ())) + if needs_grad: + root.dense(index_nd(dim), shape).place(x_grad) return x diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 6f3339b9215cc..5086535888102 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -8,6 +8,7 @@ from taichi.lang import ops as ops_mod from taichi.lang.common_ops import TaichiOperations from taichi.lang.exception import TaichiSyntaxError +from taichi.lang.field import SNodeField, SNodeFieldHostAccess from taichi.lang.util import (in_python_scope, is_taichi_class, python_scope, taichi_scope, to_numpy_type, to_pytorch_type) from taichi.misc.util import deprecated, warning @@ -256,56 +257,36 @@ def linearize_entry_id(self, *args): def __call__(self, *args, **kwargs): _taichi_skip_traceback = 1 assert kwargs == {} - return self.entries[self.linearize_entry_id(*args)] - - def get_field_members(self): - """Get matrix elements list. - - Returns: - A list of matrix elements. - """ - return self.entries - - @deprecated('x.get_tensor_members()', 'x.get_field_members()') - def get_tensor_members(self): - return self.get_field_members() - - def get_entry(self, *args, **kwargs): - assert kwargs == {} - return self.entries[self.linearize_entry_id(*args)] + ret = self.entries[self.linearize_entry_id(*args)] + if isinstance(ret, SNodeFieldHostAccess): + ret = ret.accessor.getter(*ret.key) + return ret def set_entry(self, i, j, e): idx = self.linearize_entry_id(i, j) if impl.inside_kernel(): self.entries[idx].assign(e) else: - self.entries[idx] = e - - def place(self, snode): - for e in self.entries: - snode.place(e) + if isinstance(self.entries[idx], SNodeFieldHostAccess): + self.entries[idx].accessor.setter(e, *self.entries[idx].key) + else: + self.entries[idx] = e @taichi_scope def subscript(self, *indices): _taichi_skip_traceback = 1 - if self.is_global(): - ret = self.empty_copy() - for i, e in enumerate(self.entries): - ret.entries[i] = impl.subscript(e, *indices) - return ret + assert len(indices) in [1, 2] + i = indices[0] + j = 0 if len(indices) == 1 else indices[1] + # ptr.is_global_ptr() will check whether it's an element in the field (which is different from ptr.is_global_var()). + if isinstance(self.entries[0], + ti.Expr) and self.entries[0].ptr.is_global_ptr( + ) and ti.is_extension_supported( + ti.cfg.arch, ti.extension.dynamic_index): + return ti.subscript_with_offset(self.entries[0], (i, j), + self.m, True) else: - assert len(indices) in [1, 2] - i = indices[0] - j = 0 if len(indices) == 1 else indices[1] - # ptr.is_global_ptr() will check whether it's an element in the field (which is different from ptr.is_global_var()). - if isinstance(self.entries[0], - ti.Expr) and self.entries[0].ptr.is_global_ptr( - ) and ti.is_extension_supported( - ti.cfg.arch, ti.extension.dynamic_index): - return ti.subscript_with_offset(self.entries[0], (i, j), - self.m, True) - else: - return self(i, j) + return self(i, j) @property def x(self): @@ -368,64 +349,15 @@ def w(self, value): _taichi_skip_traceback = 1 self[3] = value - class Proxy: - def __init__(self, mat, index): - """Proxy when a tensor of Matrices is accessed by host.""" - self.mat = mat - self.index = index - - @python_scope - def __getitem__(self, item): - if not isinstance(item, (list, tuple)): - item = [item] - return self.mat(*item)[self.index] - - @python_scope - def __setitem__(self, key, value): - if not isinstance(key, (list, tuple)): - key = [key] - self.mat(*key)[self.index] = value - - @property - def x(self): - return self[0] - - @property - def y(self): - return self[1] - - @property - def z(self): - return self[2] - - @property - def w(self): - return self[3] - - @x.setter - def x(self, value): - self[0] = value - - @y.setter - def y(self, value): - self[1] = value - - @z.setter - def z(self, value): - self[2] = value - - @w.setter - def w(self, value): - self[3] = value - - @property - def value(self): - ret = self.mat.empty_copy() - for i in range(self.mat.n): - for j in range(self.mat.m): - ret.entries[i * self.mat.m + j] = self.mat(i, - j)[self.index] - return ret + @property + @python_scope + def value(self): + assert isinstance(self.entries[0], SNodeFieldHostAccess) + ret = self.empty_copy() + for i in range(self.n): + for j in range(self.m): + ret.entries[i * self.m + j] = self(i, j) + return ret # host access & python scope operation @python_scope @@ -439,9 +371,6 @@ def __getitem__(self, indices): The value of the element at a specific position of a matrix. """ - if self.is_global(): - return Matrix.Proxy(self, indices) - if not isinstance(indices, (list, tuple)): indices = [indices] assert len(indices) in [1, 2] @@ -457,16 +386,6 @@ def __setitem__(self, indices, item): indices (Sequence[Expr]): the indices of a element. """ - if self.is_global(): - if not isinstance(item, (list, tuple)): - item = list(item) - if not isinstance(item[0], (list, tuple)): - item = [[i] for i in item] - for i in range(self.n): - for j in range(self.m): - self(i, j)[indices] = item[i][j] - return - if not isinstance(indices, (list, tuple)): indices = [indices] assert len(indices) in [1, 2] @@ -730,11 +649,11 @@ def data_type(self): def snode(self): return self.loop_range().snode - def make_grad(self): - ret = self.empty_copy() - for i in range(len(ret.entries)): - ret.entries[i] = self.entries[i].grad - return ret +# def make_grad(self): +# ret = self.empty_copy() +# for i in range(len(ret.entries)): +# ret.entries[i] = self.entries[i].grad +# return ret def sum(self): """Return the sum of all elements.""" @@ -852,7 +771,7 @@ def assign_renamed(x, y): for i in range(val.n): row = [] for j in range(val.m): - row.append(val.get_entry(i, j)) + row.append(val(i, j)) row = tuple(row) val_tuple.append(row) val = tuple(val_tuple) @@ -862,106 +781,19 @@ def assign_renamed(x, y): fill_matrix(self, val) @python_scope - def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): - """Convert the taichi matrix to a numpy.ndarray. - - Args: - keep_dims (bool, optional): Whether keep the dimension after conversion. - When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. - When keep_dims=False, the resulting numpy array should skip the dimensionality with only 1 element, on the matrix shape dimensionalities. - For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. - as_vector (bool, deprecated): Make the returned numpy array as a vector i.e., has a shape (n,) rather than (n, 1) - Note that this argument has been deprecated. - More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. - dtype (DataType, optional): The desired data type of returned numpy array. - - Returns: - numpy.ndarray: The numpy array that converted from the matrix field. - - """ - # Discussion: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858 - if as_vector is not None: - warning( - 'v.to_numpy(as_vector=True) is deprecated, ' - 'please use v.to_numpy() directly instead', - DeprecationWarning, - stacklevel=3) - as_vector = self.m == 1 and not keep_dims - shape_ext = (self.n, ) if as_vector else (self.n, self.m) - - if not self.is_global(): - return np.array(self.entries).reshape(shape_ext) - - if dtype is None: - dtype = to_numpy_type(self.dtype) - ret = np.zeros(self.shape + shape_ext, dtype=dtype) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, ret, as_vector) - return ret - - @python_scope - def to_torch(self, device=None, keep_dims=False): - """Convert the taichi matrix to a torch tensor. + def to_numpy(self, keep_dims=False): + """Converts the Matrix to a numpy array. Args: - device (torch.device, optional): The desired device of returned tensor. - keep_dims (bool, optional): Whether keep the dimension after conversion. - See :meth:`~taichi.lang.matrix.Matrix.to_numpy` for more detailed explanation. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. Returns: - torch.tensor: The torch tensor that converted from the matrix field. - + numpy.ndarray: The result numpy array. """ - import torch as_vector = self.m == 1 and not keep_dims shape_ext = (self.n, ) if as_vector else (self.n, self.m) - ret = torch.empty(self.shape + shape_ext, - dtype=to_pytorch_type(self.dtype), - device=device) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, ret, as_vector) - ti.sync() - return ret - - @python_scope - def from_numpy(self, ndarray): - """Copy the values of a numpy ndarray to the Matrix. - - Args: - ndarray (numpy.ndarray): The numpy array to copy. - - """ - if len(ndarray.shape) == len(self.loop_range().shape) + 1: - as_vector = True - assert self.m == 1, "This matrix is not a vector" - else: - as_vector = False - assert len(ndarray.shape) == len(self.loop_range().shape) + 2 - dim_ext = 1 if as_vector else 2 - assert len(ndarray.shape) == len(self.loop_range().shape) + dim_ext - from taichi.lang.meta import ext_arr_to_matrix - ext_arr_to_matrix(ndarray, self, as_vector) - ti.sync() - - @python_scope - def from_torch(self, torch_tensor): - """Copy the values of a torch tensor to the Matrix. - - Args: - torch_tensor (torch.tensor): The torch tensor to copy. - - Returns: - Call :meth:`~taichi.lang.matrix.Matrix.from_numpy` with the input torch tensor as the argument - - """ - return self.from_numpy(torch_tensor.contiguous()) - - @python_scope - def copy_from(self, other): - assert isinstance(other, Matrix) - from taichi.lang.meta import tensor_to_tensor - assert len(self.shape) == len(other.shape) - tensor_to_tensor(self, other) + return np.array(self.entries).reshape(shape_ext) @taichi_scope def __ti_repr__(self): @@ -1103,12 +935,7 @@ def field(cls, :class:`~taichi.lang.matrix.Matrix`: A :class:`~taichi.lang.matrix.Matrix` instance serves as the data container. """ - self = cls.empty(n, m) - self.entries = [] - self.n = n - self.m = m - self.dt = dtype - + entries = [] if isinstance(dtype, (list, tuple, np.ndarray)): # set different dtype for each element in Matrix # see #2135 @@ -1117,18 +944,20 @@ def field(cls, dtype ) == n, f'Please set correct dtype list for Vector. The shape of dtype list should be ({n}, ) instead of {np.shape(dtype)}' for i in range(n): - self.entries.append(impl.field(dtype[i], name=name)) + entries.append(impl.create_field_member(dtype[i], name=name)) else: assert len(np.shape(dtype)) == 2 and len(dtype) == n and len( dtype[0] ) == m, f'Please set correct dtype list for Matrix. The shape of dtype list should be ({n}, {m}) instead of {np.shape(dtype)}' for i in range(n): for j in range(m): - self.entries.append(impl.field(dtype[i][j], name=name)) + entries.append(impl.create_field_member(dtype[i][j], name=name)) else: for _ in range(n * m): - self.entries.append(impl.field(dtype, name=name)) - self.grad = self.make_grad() + entries.append(impl.create_field_member(dtype, name=name)) + entries, entries_grad = zip(*entries) + entries, entries_grad = SNodeField(entries, (n, m)), SNodeField(entries_grad, (n, m)) + entries.set_grad(entries_grad) if layout is not None: assert shape is not None, 'layout is useless without shape' @@ -1151,27 +980,16 @@ def field(cls, dim = len(shape) if layout.soa: - for i, e in enumerate(self.entries): - ti.root.dense(impl.index_nd(dim), - shape).place(e, offset=offset) + for e in entries.get_field_members(): + ti.root.dense(impl.index_nd(dim), shape).place(SNodeField(e, ()), offset=offset) if needs_grad: - for i, e in enumerate(self.entries): - ti.root.dense(impl.index_nd(dim), - shape).place(e.grad, offset=offset) + for e in entries_grad.get_field_members(): + ti.root.dense(impl.index_nd(dim), shape).place(SNodeField(e, ()), offset=offset) else: - var_list = [] - for i, e in enumerate(self.entries): - var_list.append(e) - ti.root.dense(impl.index_nd(dim), - shape).place(*tuple(var_list), offset=offset) - grad_var_list = [] + ti.root.dense(impl.index_nd(dim), shape).place(entries, offset=offset) if needs_grad: - for i, e in enumerate(self.entries): - grad_var_list.append(e.grad) - ti.root.dense(impl.index_nd(dim), - shape).place(*tuple(grad_var_list), - offset=offset) - return self + ti.root.dense(impl.index_nd(dim), shape).place(entries_grad, offset=offset) + return entries @classmethod @python_scope @@ -1254,6 +1072,23 @@ def empty(cls, n, m): """ return cls([[None] * m for _ in range(n)]) + @classmethod + def with_entries(cls, n, m, entries): + """Construct a Matrix instance by giving all entries. + + Args: + n (int): Number of rows of the matrix. + m (int): Number of columns of the matrix. + entries (List[Any]): Given entries. + + Returns: + Matrix: A :class:`~taichi.lang.matrix.Matrix` instance filled with given entries. + """ + assert n * m == len(entries), "Number of entries doesn't match n * m" + mat = cls.empty(n, m) + mat.entries = entries + return mat + @classmethod def new(cls, n, m): if impl.inside_kernel(): diff --git a/python/taichi/lang/meta.py b/python/taichi/lang/meta.py index 73250fd4178d6..34b51fb137ee6 100644 --- a/python/taichi/lang/meta.py +++ b/python/taichi/lang/meta.py @@ -1,6 +1,7 @@ from taichi.core import settings from taichi.lang import impl from taichi.lang.expr import Expr +from taichi.lang.field import SNodeField from taichi.lang.kernel_arguments import ext_arr, template from taichi.lang.kernel_impl import kernel @@ -101,9 +102,9 @@ def ext_arr_to_matrix(arr: ext_arr(), mat: template(), as_vector: template()): @kernel def clear_gradients(vars: template()): - for I in ti.grouped(Expr(vars[0])): + for I in ti.grouped(SNodeField([Expr(vars[0])], ())): for s in ti.static(vars): - Expr(s)[I] = 0 + SNodeField([Expr(s)], ())[I] = 0 @kernel diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index bfe1845ba4cae..8b05fff45b97a 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -256,14 +256,14 @@ def name(self): """ return self.ptr.name() -# @property -# def snode(self): -# """Gets `self`. -# -# Returns: -# SNode: `self`. -# """ -# return self + @property + def snode(self): + """Gets `self`. + + Returns: + SNode: `self`. + """ + return self @property def needs_grad(self): diff --git a/tests/python/test_fill.py b/tests/python/test_fill.py index 8509af547c5a8..3e2f5f9b90254 100644 --- a/tests/python/test_fill.py +++ b/tests/python/test_fill.py @@ -67,4 +67,4 @@ def test_fill_matrix_matrix(): for j in range(m): for p in range(2): for q in range(3): - assert val[i, j][p, q] == mat.get_entry(p, q) + assert val[i, j][p, q] == mat(p, q) diff --git a/tests/python/test_tensor_reflection.py b/tests/python/test_tensor_reflection.py index 2795fa8e5ebe7..31d16ba38bcbd 100644 --- a/tests/python/test_tensor_reflection.py +++ b/tests/python/test_tensor_reflection.py @@ -90,33 +90,6 @@ def test_unordered_matrix(): assert val.loop_range().snode.parent(4) == ti.root -@pytest.mark.filterwarnings('ignore') -@ti.host_arch_only -def test_deprecated(): - val = ti.field(ti.f32) - mat = ti.Matrix.field(3, 2, ti.i32) - - n = 3 - m = 7 - p = 11 - - blk1 = ti.root.dense(ti.k, n) - blk2 = blk1.dense(ti.i, m) - blk3 = blk2.dense(ti.j, p) - blk3.place(val, mat) - - assert val.dim() == 3 - assert val.data_type() == ti.f32 - assert val.shape() == (n, m, p) - assert mat.dim() == 3 - assert mat.data_type() == ti.i32 - assert mat.shape() == (n, m, p) - assert blk3.dim() == 3 - assert blk3.shape() == (n, m, p) - assert val.snode().parent() == blk3 - assert mat.snode().parent() == blk3 - - @ti.all_archs def test_parent_exceeded(): val = ti.field(ti.f32) From 609d1da8c137b0a2c8a53af9683632183b2a28eb Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Wed, 4 Aug 2021 16:58:04 +0800 Subject: [PATCH 04/10] Make everything work --- python/taichi/aot/module.py | 7 +- python/taichi/lang/expr.py | 23 ++-- python/taichi/lang/field.py | 75 ++++++++++++- python/taichi/lang/impl.py | 68 ++++++------ python/taichi/lang/matrix.py | 101 ++---------------- python/taichi/lang/ops.py | 10 +- python/taichi/lang/snode.py | 8 ++ python/taichi/lang/stmt_builder.py | 6 +- python/taichi/misc/gui.py | 10 +- tests/python/test_compare.py | 52 ++++----- tests/python/test_custom_float.py | 4 +- .../test_custom_float_time_integration.py | 4 +- tests/python/test_kernel_templates.py | 2 +- tests/python/test_linalg.py | 18 ++-- tests/python/test_matrix_different_type.py | 12 +-- tests/python/test_no_grad.py | 2 +- tests/python/test_oop.py | 5 +- tests/python/test_tensor_reflection.py | 12 +-- 18 files changed, 196 insertions(+), 223 deletions(-) diff --git a/python/taichi/aot/module.py b/python/taichi/aot/module.py index 56ab7bf925571..e9697afcd3513 100644 --- a/python/taichi/aot/module.py +++ b/python/taichi/aot/module.py @@ -1,7 +1,7 @@ from contextlib import contextmanager from taichi.lang import expr, impl, kernel_arguments, kernel_impl, matrix - +from taichi.lang.field import SNodeField class KernelTemplate: def __init__(self, kernel_fn, aot_module): @@ -100,12 +100,11 @@ def add_field(self, name, field): self._fields[name] = field column_num = 1 row_num = 1 - if isinstance(field, matrix.Matrix): + assert isinstance(field, SNodeField) + if field.is_tensor: is_scalar = False row_num = field.m column_num = field.n - else: - assert isinstance(field, expr.Expr) self._aot_builder.add_field(name, is_scalar, field.dtype, field.snode.shape, row_num, column_num) diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index 0f3d2f4d684c8..e2e4ca77dca0f 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -36,6 +36,9 @@ def __init__(self, *args, tb=None): if self.tb: self.ptr.set_tb(self.tb) + def loop_range(self): + return self + def is_global(self): """Check whether the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. @@ -47,10 +50,6 @@ def is_global(self): def __hash__(self): return self.ptr.get_raw_address() - @property - def name(self): - return self.snode.name - @property def shape(self): """A list containing sizes for each dimension when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. @@ -65,22 +64,14 @@ def shape(self): for i in range(dim) ] return ret - return self.snode.shape + from taichi.lang.snode import SNode + return SNode(self.ptr.snode()).shape def __str__(self): - """Python scope field print support.""" - if impl.inside_kernel(): - return '' # make pybind11 happy, see Matrix.__str__ - else: - return str(self.to_numpy()) + return '' def __repr__(self): - # make interactive shell happy, prevent materialization - if self.is_global(): - # make interactive shell happy, prevent materialization - return '' - else: - return '' + return '' def make_var_vector(size): diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index 2623ed03eb8fa..736f4fef1001c 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -4,6 +4,7 @@ from taichi.lang import impl from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type from taichi.misc.util import warning +import numbers import taichi as ti @@ -114,10 +115,24 @@ def get_field_members(self): """ return self.vars + def loop_range(self): + return self.vars[0] + @python_scope def set_grad(self, grad): self.grad = grad + @python_scope + def get_scalar_field(self, *indices): + """Creates a scalar field using a field member + Only used for quant. + """ + assert self.is_tensor, "get_scalar_field can only be called on a Matrix field" + assert len(indices) in [1, 2] + i = indices[0] + j = 0 if len(indices) == 1 else indices[1] + return SNodeField([self.vars[i * self.m + j]], ()) + @property def n(self): assert self.is_tensor @@ -136,8 +151,46 @@ def fill(self, val): val (Union[int, float]): Value to fill. """ # TODO: avoid too many template instantiations - from taichi.lang.meta import fill_tensor - fill_tensor(self, val) + """Fill the element with values. + + Args: + val (Union[Number, List, Tuple, Matrix]): the dimension of val should be consistent with the dimension of element. + + Examples: + + Fill a scalar field: + + >>> v = ti.field(float,10) + >>> v.fill(10.0) + + Fill a vector field: + + >>> v = ti.Vector.field(2, float,4) + >>> v.fill([10.0,11.0]) + + """ + if self.is_tensor: + if isinstance(val, numbers.Number): + val = tuple([tuple([val for _ in range(self.m)]) for _ in range(self.n)]) + elif isinstance(val, (list, tuple)) and isinstance(val[0], numbers.Number): + assert self.m == 1 + val = tuple([(v, ) for v in val]) + elif isinstance(val, ti.Matrix): + val_tuple = [] + for i in range(val.n): + row = [] + for j in range(val.m): + row.append(val(i, j)) + row = tuple(row) + val_tuple.append(row) + val = tuple(val_tuple) + assert len(val) == self.n + assert len(val[0]) == self.m + from taichi.lang.meta import fill_matrix + fill_matrix(self, val) + else: + from taichi.lang.meta import fill_tensor + fill_tensor(self, val) @python_scope def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): @@ -256,6 +309,19 @@ def copy_from(self, other): assert len(self.shape) == len(other.shape) tensor_to_tensor(self, other) + def __str__(self): + if impl.inside_kernel(): + return self.__repr__() # make pybind11 happy, see Matrix.__str__ + else: + return str(self.to_numpy()) + + def __repr__(self): + # make interactive shell happy, prevent materialization + if self.is_tensor: + return f'<{self.n}x{self.m} ti.Matrix.field>' + else: + return '' + @python_scope def __setitem__(self, key, value): """XY: To be fixed: @@ -301,12 +367,13 @@ def __getitem__(self, key): else: return self.host_accessors[0].getter(*key) - @classmethod - def pad_key(cls, key): + @python_scope + def pad_key(self, key): if key is None: key = () if not isinstance(key, (tuple, list)): key = (key, ) + assert len(key) == len(self.shape) return key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) @python_scope diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 2034a26daff4f..82f16c8a05efd 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -65,28 +65,21 @@ def expr_init_list(xs, expected): @taichi_scope def expr_init_func( rhs): # temporary solution to allow passing in fields as arguments - if isinstance(rhs, Expr) and rhs.ptr.is_global_var(): - return rhs - if isinstance(rhs, ti.Matrix) and rhs.is_global(): + if isinstance(rhs, SNodeField): return rhs return expr_init(rhs) -def begin_frontend_struct_for(group, loop_var): -# if not isinstance(loop_range, Expr) or not loop_range.is_global(): -# raise TypeError('Can only iterate through global variables/fields') - if group.size() != len(loop_var.shape): +def begin_frontend_struct_for(group, loop_range): + if not isinstance(loop_range, Expr) or not loop_range.is_global(): + raise TypeError('Can only iterate through global variables/fields') + if group.size() != len(loop_range.shape): raise IndexError( 'Number of struct-for indices does not match loop variable dimensionality ' - f'({group.size()} != {len(loop_var.shape)}). Maybe you wanted to ' + f'({group.size()} != {len(loop_range.shape)}). Maybe you wanted to ' 'use "for I in ti.grouped(x)" to group all indices into a single vector I?' ) - if isinstance(loop_var, SNodeField): - _ti_core.begin_frontend_struct_for(group, loop_var.get_field_members()[0].ptr) - elif isinstance(loop_var, SNode): - _ti_core.begin_frontend_struct_for(group, _ti_core.global_var_expr_from_snode(loop_var.ptr)) - else: - raise Exception('Non-supported struct for') + _ti_core.begin_frontend_struct_for(group, loop_range.ptr) def begin_frontend_if(cond): @@ -125,47 +118,44 @@ def subscript(value, *indices): ind = [indices[i]] flattened_indices += ind indices = tuple(flattened_indices) - - if isinstance(value, SNodeField): # XY: Not complete, see below - if isinstance(indices, tuple) and len(indices) == 1 and indices[0] is None: - indices = () - indices_expr_group = make_expr_group(*indices) + if isinstance(indices, tuple) and len(indices) == 1 and indices[0] is None: + indices = () + indices_expr_group = make_expr_group(*indices) + index_dim = indices_expr_group.size() + + if isinstance(value, SNodeField): + var = value.get_field_members()[0].ptr + if var.snode() is None: + if var.is_primal(): + raise RuntimeError(f"{var.get_expr_name()} has not been placed.") + else: + raise RuntimeError(f"Gradient {var.get_expr_name()} has not been placed, check whether `needs_grad=True`") + field_dim = int(var.get_attribute("dim")) + if field_dim != index_dim: + raise IndexError(f'Field with dim {field_dim} accessed with indices of dim {index_dim}') if value.is_tensor: return ti.Matrix.with_entries(*value.tensor_shape, [Expr(_ti_core.subscript(e.ptr, indices_expr_group)) for e in value.get_field_members()]) else: - return Expr(_ti_core.subscript(value.get_field_members()[0].ptr, indices_expr_group)) + return Expr(_ti_core.subscript(var, indices_expr_group)) elif is_taichi_class(value): return value.subscript(*indices) elif isinstance(value, (Expr, SNode)): if isinstance(value, Expr): - if not value.is_global(): + if not value.ptr.is_external_var(): raise TypeError( 'Subscription (e.g., "a[i, j]") only works on fields or external arrays.' ) - if not value.ptr.is_external_var() and value.ptr.snode() is None: - if not value.ptr.is_primal(): - raise RuntimeError( - f"Gradient {value.ptr.get_expr_name()} has not been placed, check whether `needs_grad=True`" - ) - else: - raise RuntimeError( - f"{value.ptr.get_expr_name()} has not been placed.") field_dim = int(value.ptr.get_attribute("dim")) else: # When reading bit structure we only support the 0-D case for now. field_dim = 0 - if isinstance(indices, - tuple) and len(indices) == 1 and indices[0] is None: - indices = [] - indices_expr_group = make_expr_group(*indices) - index_dim = indices_expr_group.size() if field_dim != index_dim: raise IndexError( f'Field with dim {field_dim} accessed with indices of dim {index_dim}' ) return Expr(_ti_core.subscript(value.ptr, indices_expr_group)) else: - return value[indices] + raise TypeError('Subscription (e.g., "a[i, j]") only works on fields or external arrays.') @taichi_scope @@ -434,6 +424,10 @@ def parent(self, n=1): """Same as :func:`taichi.SNode.parent`""" return _root_fb.root.parent(n) + def loop_range(self, n=1): + """Same as :func:`taichi.SNode.loop_range`""" + return _root_fb.root.loop_range() + def get_children(self): """Same as :func:`taichi.SNode.get_children`""" return _root_fb.root.get_children() @@ -780,7 +774,9 @@ def static(x, *xs): (bool, int, float, range, list, tuple, enumerate, ti.ndrange, ti.GroupedNDRange, zip, filter, map)) or x is None: return x - elif isinstance(x, (Expr, ti.Matrix)) and x.is_global(): + elif isinstance(x, Expr) and x.is_global(): + return x + elif isinstance(x, SNodeField): return x elif isinstance(x, (types.FunctionType, types.MethodType)): return x diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 5086535888102..57163d8b87bd1 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -135,16 +135,6 @@ def __init__(self, UserWarning, stacklevel=2) - def is_global(self): - results = [False for _ in self.entries] - for i, e in enumerate(self.entries): - if isinstance(e, expr.Expr): - if e.is_global(): - results[i] = True - assert results[i] == results[0], \ - "Matrices with mixed global/local entries are not allowed" - return results[0] - def element_wise_binary(self, foo, other): _taichi_skip_traceback = 1 ret = self.empty_copy() @@ -618,43 +608,6 @@ def diag(dim, val): # TODO: need a more systematic way to create a "0" with the right type return ret - def loop_range(self): - return self.entries[0] - - @property - def shape(self): - """Return the shape of a matrix.""" - # Took `self.entries[0]` as a representation of this tensor-of-matrices. - # https://github.com/taichi-dev/taichi/issues/1069#issuecomment-635712140 - return self.loop_range().shape - - @deprecated('x.dim()', 'len(x.shape)') - def dim(self): - return len(self.shape) - - @property - def name(self): - return self.loop_range().name - - @property - def dtype(self): - """Return the date type of matrix elements.""" - return self.loop_range().dtype - - @deprecated('x.data_type()', 'x.dtype') - def data_type(self): - return self.dtype - - @property - def snode(self): - return self.loop_range().snode - -# def make_grad(self): -# ret = self.empty_copy() -# for i in range(len(ret.entries)): -# ret.entries[i] = self.entries[i].grad -# return ret - def sum(self): """Return the sum of all elements.""" ret = self.entries[0] @@ -733,52 +686,16 @@ def all(self): ret = ret + ti.cmp_ne(self.entries[i], 0) return -ti.cmp_eq(ret, -len(self.entries)) + @taichi_scope def fill(self, val): - """Fill the element with values. + """Fills the matrix with a specific value in Taichi scope. Args: - val (Union[Number, List, Tuple, Matrix]): the dimension of val should be consistent with the dimension of element. - - Examples: - - Fill a scalar field: - - >>> v = ti.field(float,10) - >>> v.fill(10.0) - - Fill a vector field: - - >>> v = ti.Vector.field(2, float,4) - >>> v.fill([10.0,11.0]) - + val (Union[int, float]): Value to fill. """ - if impl.inside_kernel(): - - def assign_renamed(x, y): - return ti.assign(x, y) - - return self.element_wise_writeback_binary(assign_renamed, val) - - if isinstance(val, numbers.Number): - val = tuple( - [tuple([val for _ in range(self.m)]) for _ in range(self.n)]) - elif isinstance(val, - (list, tuple)) and isinstance(val[0], numbers.Number): - assert self.m == 1 - val = tuple([(v, ) for v in val]) - if isinstance(val, Matrix): - val_tuple = [] - for i in range(val.n): - row = [] - for j in range(val.m): - row.append(val(i, j)) - row = tuple(row) - val_tuple.append(row) - val = tuple(val_tuple) - assert len(val) == self.n - assert len(val[0]) == self.m - from taichi.lang.meta import fill_matrix - fill_matrix(self, val) + def assign_renamed(x, y): + return ti.assign(x, y) + return self.element_wise_writeback_binary(assign_renamed, val) @python_scope def to_numpy(self, keep_dims=False): @@ -830,11 +747,7 @@ def __str__(self): return str(self.to_numpy()) def __repr__(self): - if self.is_global(): - # make interactive shell happy, prevent materialization - return f'<{self.n}x{self.m} ti.Matrix.field>' - else: - return str(self.to_numpy()) + return str(self.to_numpy()) @staticmethod @taichi_scope diff --git a/python/taichi/lang/ops.py b/python/taichi/lang/ops.py index 0a65f66a1d324..a34acfabb6ec4 100644 --- a/python/taichi/lang/ops.py +++ b/python/taichi/lang/ops.py @@ -9,6 +9,7 @@ from taichi.lang import impl, matrix from taichi.lang.exception import TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group +from taichi.lang.field import SNodeField from taichi.lang.util import cook_dtype, is_taichi_class, taichi_scope unary_ops = [] @@ -929,12 +930,9 @@ def rescale_index(a, b, I): rescaled grouped loop index """ - assert isinstance(a, Expr) and a.is_global(), \ - f"first arguement must be a field" - assert isinstance(b, Expr) and b.is_global(), \ - f"second arguement must be a field" - assert isinstance(I, matrix.Matrix) and not I.is_global(), \ - f"third arguement must be a grouped index" + assert isinstance(a, SNodeField), f"first argument must be a field" + assert isinstance(b, SNodeField), f"second argument must be a field" + assert isinstance(I, matrix.Matrix), f"third argument must be a grouped index" Ib = I.copy() for n in range(min(I.n, min(len(a.shape), len(b.shape)))): if a.shape[n] > b.shape[n]: diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index 8b05fff45b97a..3c2efd55b27dc 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -247,6 +247,14 @@ def __call__(self): def get_shape(self, i): return self.shape[i] + def loop_range(self): + """Wraps `self` into an :class:`~taichi.lang.Expr` to serve as loop range. + + Returns: + Expr: The wrapped result. + """ + return Expr(_ti_core.global_var_expr_from_snode(self.ptr)) + @property def name(self): """Gets the name of `self`. diff --git a/python/taichi/lang/stmt_builder.py b/python/taichi/lang/stmt_builder.py index bcd99106f5ba6..8fe09a22b96ea 100644 --- a/python/taichi/lang/stmt_builder.py +++ b/python/taichi/lang/stmt_builder.py @@ -432,9 +432,9 @@ def build_struct_for(ctx, node, is_grouped): template = ''' if 1: ___loop_var = 0 - {} = ti.lang.expr.make_var_vector(size=len(___loop_var.shape)) + {} = ti.lang.expr.make_var_vector(size=len(___loop_var.loop_range().shape)) ___expr_group = ti.lang.expr.make_expr_group({}) - ti.begin_frontend_struct_for(___expr_group, ___loop_var) + ti.begin_frontend_struct_for(___expr_group, ___loop_var.loop_range()) ti.core.end_frontend_range_for() '''.format(vars, vars) t = ast.parse(template).body[0] @@ -447,7 +447,7 @@ def build_struct_for(ctx, node, is_grouped): {} ___loop_var = 0 ___expr_group = ti.lang.expr.make_expr_group({}) - ti.begin_frontend_struct_for(___expr_group, ___loop_var) + ti.begin_frontend_struct_for(___expr_group, ___loop_var.loop_range()) ti.core.end_frontend_range_for() '''.format(var_decl, vars) t = ast.parse(template).body[0] diff --git a/python/taichi/misc/gui.py b/python/taichi/misc/gui.py index a6d8a451983df..ec7a156323355 100644 --- a/python/taichi/misc/gui.py +++ b/python/taichi/misc/gui.py @@ -231,7 +231,7 @@ def set_image(self, img): import taichi as ti if self.fast_gui: - assert isinstance(img, ti.Matrix), \ + assert isinstance(img, ti.SNodeField) and img.is_tensor, \ "Only ti.Vector.field is supported in GUI.set_image when fast_gui=True" assert img.shape == self.res, \ "Image resolution does not match GUI resolution" @@ -244,7 +244,7 @@ def set_image(self, img): vector_to_fast_image(img, self.img) return - if isinstance(img, ti.Expr): + if isinstance(img, ti.SNodeField) and not img.is_tensor: if _ti_core.is_integral(img.dtype) or len(img.shape) != 2: # Images of uint is not optimized by xxx_to_image self.img = self.cook_image(img.to_numpy()) @@ -256,7 +256,7 @@ def set_image(self, img): tensor_to_image(img, self.img) ti.sync() - elif isinstance(img, ti.Matrix): + elif isinstance(img, ti.SNodeField) and img.is_tensor: if _ti_core.is_integral(img.dtype): self.img = self.cook_image(img.to_numpy()) else: @@ -336,9 +336,9 @@ def circles(self, if palette is not None: assert palette_indices is not None, 'palette must be used together with palette_indices' - from taichi.lang.expr import Expr + from taichi.lang.field import SNodeField - if isinstance(palette_indices, Expr): + if isinstance(palette_indices, SNodeField): ind_int = palette_indices.to_numpy().astype(np.uint32) elif isinstance(palette_indices, list) or isinstance( palette_indices, np.ndarray): diff --git a/tests/python/test_compare.py b/tests/python/test_compare.py index d2849c4e23154..98a4fec906d11 100644 --- a/tests/python/test_compare.py +++ b/tests/python/test_compare.py @@ -13,18 +13,18 @@ def test_compare_basics(): def func(): b[None] = 3 c[None] = 5 - a[0] = b < c - a[1] = b <= c - a[2] = b > c - a[3] = b >= c - a[4] = b == c - a[5] = b != c - a[6] = c < b - a[7] = c <= b - a[8] = c > b - a[9] = c >= b - a[10] = c == b - a[11] = c != b + a[0] = b[None] < c[None] + a[1] = b[None] <= c[None] + a[2] = b[None] > c[None] + a[3] = b[None] >= c[None] + a[4] = b[None] == c[None] + a[5] = b[None] != c[None] + a[6] = c[None] < b[None] + a[7] = c[None] <= b[None] + a[8] = c[None] > b[None] + a[9] = c[None] >= b[None] + a[10] = c[None] == b[None] + a[11] = c[None] != b[None] func() assert a[0] @@ -53,18 +53,18 @@ def test_compare_equality(): def func(): b[None] = 3 c[None] = 3 - a[0] = b < c - a[1] = b <= c - a[2] = b > c - a[3] = b >= c - a[4] = b == c - a[5] = b != c - a[6] = c < b - a[7] = c <= b - a[8] = c > b - a[9] = c >= b - a[10] = c == b - a[11] = c != b + a[0] = b[None] < c[None] + a[1] = b[None] <= c[None] + a[2] = b[None] > c[None] + a[3] = b[None] >= c[None] + a[4] = b[None] == c[None] + a[5] = b[None] != c[None] + a[6] = c[None] < b[None] + a[7] = c[None] <= b[None] + a[8] = c[None] > b[None] + a[9] = c[None] >= b[None] + a[10] = c[None] == b[None] + a[11] = c[None] != b[None] func() assert not a[0] @@ -132,8 +132,8 @@ def func(): b[None] = 2 c[None] = 3 d[None] = 3 - a[0] = c == d != b < d > b >= b <= c - a[1] = b <= c != d > b == b + a[0] = c[None] == d[None] != b[None] < d[None] > b[None] >= b[None] <= c[None] + a[1] = b[None] <= c[None] != d[None] > b[None] == b[None] func() assert a[0] diff --git a/tests/python/test_custom_float.py b/tests/python/test_custom_float.py index 5e3280007b711..6e34bd4fa25cc 100644 --- a/tests/python/test_custom_float.py +++ b/tests/python/test_custom_float.py @@ -32,8 +32,8 @@ def test_custom_matrix_rotation(): x = ti.Matrix.field(2, 2, dtype=cft) - ti.root.bit_struct(num_bits=32).place(x(0, 0), x(0, 1)) - ti.root.bit_struct(num_bits=32).place(x(1, 0), x(1, 1)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(0, 0), x.get_scalar_field(0, 1)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(1, 0), x.get_scalar_field(1, 1)) x[None] = [[1.0, 0.0], [0.0, 1.0]] diff --git a/tests/python/test_custom_float_time_integration.py b/tests/python/test_custom_float_time_integration.py index 4b8271a478c4a..f32a887172aa5 100644 --- a/tests/python/test_custom_float_time_integration.py +++ b/tests/python/test_custom_float_time_integration.py @@ -22,8 +22,8 @@ def test_custom_float_time_integration(use_cft, use_exponent, use_shared_exp): if use_shared_exp: ti.root.bit_struct(num_bits=32).place(x, shared_exponent=True) else: - ti.root.bit_struct(num_bits=32).place(x(0)) - ti.root.bit_struct(num_bits=32).place(x(1)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(0)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(1)) else: cit = ti.quant.int(16, True) cft = ti.type_factory.custom_float(significand_type=cit, diff --git a/tests/python/test_kernel_templates.py b/tests/python/test_kernel_templates.py index bde25504e1690..accf52cb603f3 100644 --- a/tests/python/test_kernel_templates.py +++ b/tests/python/test_kernel_templates.py @@ -51,7 +51,7 @@ def double(a: ti.template(), b: ti.template()): @ti.kernel def compute_loss(): for i in range(16): - ti.atomic_add(loss, z[i]) + ti.atomic_add(loss[None], z[i]) for i in range(16): x[i] = i diff --git a/tests/python/test_linalg.py b/tests/python/test_linalg.py index cf5431f511541..2ea3a48552cf0 100644 --- a/tests/python/test_linalg.py +++ b/tests/python/test_linalg.py @@ -113,11 +113,11 @@ def test_dot(): def init(): a[None] = ti.Vector([1.0, 2.0, 3.0]) b[None] = ti.Vector([4.0, 5.0, 6.0]) - c[None] = a.dot(b) + c[None] = a[None].dot(b[None]) a2[None] = ti.Vector([1.0, 2.0]) b2[None] = ti.Vector([4.0, 5.0]) - c2[None] = a2.dot(b2) + c2[None] = a2[None].dot(b2[None]) init() assert c[None] == 32.0 @@ -138,13 +138,13 @@ def transpose(): for i in range(dim): for j in range(dim): - m(i, j)[None] = i * 2 + j * 7 + m[None][i, j] = i * 2 + j * 7 transpose() for i in range(dim): for j in range(dim): - assert m(j, i)[None] == approx(i * 2 + j * 7) + assert m[None][j, i] == approx(i * 2 + j * 7) def _test_polar_decomp(dim, dt): @@ -170,7 +170,7 @@ def V(i, j): for i in range(dim): for j in range(dim): - m(i, j)[None] = V(i, j) + m[None][i, j] = V(i, j) polar() @@ -178,9 +178,9 @@ def V(i, j): for i in range(dim): for j in range(dim): - assert m(i, j)[None] == approx(V(i, j), abs=tol) - assert I(i, j)[None] == approx(int(i == j), abs=tol) - assert D(i, j)[None] == approx(0, abs=tol) + assert m[None][i, j] == approx(V(i, j), abs=tol) + assert I[None][i, j] == approx(int(i == j), abs=tol) + assert D[None][i, j] == approx(0, abs=tol) def test_polar_decomp(): @@ -202,7 +202,7 @@ def test_matrix(): @ti.kernel def inc(): - for i in x(0, 0): + for i in x: delta = ti.Matrix([[3, 0], [0, 0]]) x[i][1, 1] = x[i][0, 0] + 1 x[i] = x[i] + delta diff --git a/tests/python/test_matrix_different_type.py b/tests/python/test_matrix_different_type.py index ee2de6f7f20c1..e77176505bb6a 100644 --- a/tests/python/test_matrix_different_type.py +++ b/tests/python/test_matrix_different_type.py @@ -77,12 +77,12 @@ def test_custom_type(): a = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) b = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) c = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a(0, 0), a(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a(1, 0), a(1, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b(0, 0), b(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b(1, 0), b(1, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c(0, 0), c(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c(1, 0), c(1, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a.get_scalar_field(0, 0), a.get_scalar_field(0, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a.get_scalar_field(1, 0), a.get_scalar_field(1, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b.get_scalar_field(0, 0), b.get_scalar_field(0, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b.get_scalar_field(1, 0), b.get_scalar_field(1, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c.get_scalar_field(0, 0), c.get_scalar_field(0, 1)) + ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c.get_scalar_field(1, 0), c.get_scalar_field(1, 1)) @ti.kernel def init(): diff --git a/tests/python/test_no_grad.py b/tests/python/test_no_grad.py index 4aba4558abef9..521a0d39c797c 100644 --- a/tests/python/test_no_grad.py +++ b/tests/python/test_no_grad.py @@ -18,7 +18,7 @@ def test_no_grad(): @ti.kernel def func(): for i in range(N): - ti.atomic_add(loss, x[i]**2) + ti.atomic_add(loss[None], x[i]**2) with ti.Tape(loss): func() diff --git a/tests/python/test_oop.py b/tests/python/test_oop.py index 83956f945dc03..0edbcb2174d4c 100644 --- a/tests/python/test_oop.py +++ b/tests/python/test_oop.py @@ -84,7 +84,7 @@ def reduce(self): @ti.kernel def double(): - double_total[None] = 2 * arr.total + double_total[None] = 2 * arr.total[None] with ti.Tape(loss=double_total): arr.reduce() @@ -171,7 +171,8 @@ def reduce(self): with ti.Tape(loss=arr.total): arr.reduce() for i in range(arr.n): - assert arr.val.grad[i] == 42 + for j in range(arr.n): + assert arr.val.grad[i, j] == 42 @ti.must_throw(ti.KernelDefError) diff --git a/tests/python/test_tensor_reflection.py b/tests/python/test_tensor_reflection.py index 31d16ba38bcbd..a992851ae5a82 100644 --- a/tests/python/test_tensor_reflection.py +++ b/tests/python/test_tensor_reflection.py @@ -82,12 +82,12 @@ def test_unordered_matrix(): assert val.shape == (n, m, p) assert val.dtype == ti.i32 - assert val.loop_range().snode.parent(0) == val.loop_range().snode - assert val.loop_range().snode.parent() == blk3 - assert val.loop_range().snode.parent(1) == blk3 - assert val.loop_range().snode.parent(2) == blk2 - assert val.loop_range().snode.parent(3) == blk1 - assert val.loop_range().snode.parent(4) == ti.root + assert val.snode.parent(0) == val.snode + assert val.snode.parent() == blk3 + assert val.snode.parent(1) == blk3 + assert val.snode.parent(2) == blk2 + assert val.snode.parent(3) == blk1 + assert val.snode.parent(4) == ti.root @ti.all_archs From daa370b23a8ede006c166592b57f37e87e71bc60 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Thu, 5 Aug 2021 13:30:34 +0800 Subject: [PATCH 05/10] Separate ScalarField and MatrixField --- python/taichi/aot/module.py | 9 +- python/taichi/lang/field.py | 508 +++++++++++++++++++---------------- python/taichi/lang/impl.py | 17 +- python/taichi/lang/matrix.py | 14 +- python/taichi/lang/meta.py | 6 +- python/taichi/lang/ops.py | 6 +- python/taichi/lang/snode.py | 5 +- python/taichi/misc/gui.py | 11 +- 8 files changed, 303 insertions(+), 273 deletions(-) diff --git a/python/taichi/aot/module.py b/python/taichi/aot/module.py index e9697afcd3513..78a6072686768 100644 --- a/python/taichi/aot/module.py +++ b/python/taichi/aot/module.py @@ -1,7 +1,7 @@ from contextlib import contextmanager -from taichi.lang import expr, impl, kernel_arguments, kernel_impl, matrix -from taichi.lang.field import SNodeField +from taichi.lang import impl, kernel_arguments, kernel_impl +from taichi.lang.field import ScalarField, MatrixField class KernelTemplate: def __init__(self, kernel_fn, aot_module): @@ -100,11 +100,12 @@ def add_field(self, name, field): self._fields[name] = field column_num = 1 row_num = 1 - assert isinstance(field, SNodeField) - if field.is_tensor: + if isinstance(field, MatrixField): is_scalar = False row_num = field.m column_num = field.n + else: + assert isinstance(field, ScalarField) self._aot_builder.add_field(name, is_scalar, field.dtype, field.snode.shape, row_num, column_num) diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index 736f4fef1001c..2ce44fc945063 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -1,5 +1,3 @@ -from functools import reduce -from operator import mul from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type @@ -9,44 +7,31 @@ class Field: - """Taichi field abstract class.""" - @property - def shape(self): - raise Exception("Abstract Field class should not be directly used") - - @property - def dtype(self): - raise Exception("Abstract Field class should not be directly used") - - @property - def tensor_shape(self): - raise Exception("Abstract Field class should not be directly used") - - @property - def is_tensor(self): - return len(self.tensor_shape) > 0 - - -class SNodeField(Field): """Taichi field with SNode implementation. - Each field element is a scalar, a vector, or a matrix. - A scalar field has 1 field member. A 3x3 matrix field has 9 field members. + A field is constructed by a list of field members. + For example, a scalar field has 1 field member, while a 3x3 matrix field has 9 field members. A field member is a Python Expr wrapping a C++ GlobalVariableExpression. A C++ GlobalVariableExpression wraps the corresponding SNode. Args: vars (List[Expr]): Field members. - tensor_shape (Tuple[Int]): Tensor shape of each field element, () if scalar. """ - def __init__(self, vars, tensor_shape): - assert len(vars) == reduce(mul, tensor_shape, 1), "Tensor shape doesn't match number of vars" - assert len(tensor_shape) in [0, 2], "Only scalars, vectors and matrices are supported" + def __init__(self, vars): self.vars = vars - self.tshape = tensor_shape self.host_accessors = None self.grad = None + @property + def snode(self): + """Gets representative SNode for info purposes. + + Returns: + SNode: Representative SNode (SNode of first field member). + """ + from taichi.lang.snode import SNode + return SNode(self.vars[0].ptr.snode()) + @property def shape(self): """Gets field shape. @@ -65,15 +50,6 @@ def dtype(self): """ return self.snode.dtype - @property - def tensor_shape(self): - """Gets tensor shape of each field element. - - Returns: - Tuple[Int]: Tensor shape of each field element, () if scalar. - """ - return self.tshape - @property def name(self): """Gets field name. @@ -83,28 +59,15 @@ def name(self): """ return self.snode.name - @property - def snode(self): - """Gets representative SNode for info purposes. - - Returns: - SNode: Representative SNode (SNode of first field member). - """ - from taichi.lang.snode import SNode - return SNode(self.vars[0].ptr.snode()) - def parent(self, n=1): - '''XY: To be fixed: - Create another Expr instance which represents one of the ancestors in SNode tree. - - The class it self must represent GlobalVariableExpression (field) internally. + """Gets an ancestor of the representative SNode in the SNode tree. Args: - n (int): levels of the target ancestor higher than the current field's snode + n (int): the number of levels going up from the representative SNode. Returns: - An Expr instance which represents the target SNode ancestor internally. - ''' + SNode: The n-th parent of the representative SNode. + """ return self.snode.parent(n) def get_field_members(self): @@ -116,186 +79,70 @@ def get_field_members(self): return self.vars def loop_range(self): + """Gets representative field member for loop range info. + + Returns: + Expr: Representative (first) field member. + """ return self.vars[0] - @python_scope def set_grad(self, grad): - self.grad = grad + """Sets corresponding gradient field. - @python_scope - def get_scalar_field(self, *indices): - """Creates a scalar field using a field member - Only used for quant. + Args: + grad (Field): Corresponding gradient field. """ - assert self.is_tensor, "get_scalar_field can only be called on a Matrix field" - assert len(indices) in [1, 2] - i = indices[0] - j = 0 if len(indices) == 1 else indices[1] - return SNodeField([self.vars[i * self.m + j]], ()) - - @property - def n(self): - assert self.is_tensor - return self.tensor_shape[0] - - @property - def m(self): - assert self.is_tensor - return self.tensor_shape[1] + self.grad = grad @python_scope def fill(self, val): - """Fills the whole field with a specific value. + """Fills `self` with a specific value. Args: val (Union[int, float]): Value to fill. """ - # TODO: avoid too many template instantiations - """Fill the element with values. - - Args: - val (Union[Number, List, Tuple, Matrix]): the dimension of val should be consistent with the dimension of element. - - Examples: - - Fill a scalar field: - - >>> v = ti.field(float,10) - >>> v.fill(10.0) - - Fill a vector field: - - >>> v = ti.Vector.field(2, float,4) - >>> v.fill([10.0,11.0]) - - """ - if self.is_tensor: - if isinstance(val, numbers.Number): - val = tuple([tuple([val for _ in range(self.m)]) for _ in range(self.n)]) - elif isinstance(val, (list, tuple)) and isinstance(val[0], numbers.Number): - assert self.m == 1 - val = tuple([(v, ) for v in val]) - elif isinstance(val, ti.Matrix): - val_tuple = [] - for i in range(val.n): - row = [] - for j in range(val.m): - row.append(val(i, j)) - row = tuple(row) - val_tuple.append(row) - val = tuple(val_tuple) - assert len(val) == self.n - assert len(val[0]) == self.m - from taichi.lang.meta import fill_matrix - fill_matrix(self, val) - else: - from taichi.lang.meta import fill_tensor - fill_tensor(self, val) + raise NotImplementedError() @python_scope - def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): - """Converts the taichi field to a numpy array. + def to_numpy(self, dtype=None): + """Converts `self` to a numpy array. Args: - keep_dims (bool, optional): Whether to keep the dimension after conversion. - When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. - When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. - For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. - as_vector (bool, deprecated): Whether to make the returned numpy array as a vector, i.e., with shape (n,) rather than (n, 1). - Note that this argument has been deprecated. - More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. dtype (DataType, optional): The desired data type of returned numpy array. Returns: numpy.ndarray: The result numpy array. """ - if as_vector is not None: - warning( - 'v.to_numpy(as_vector=True) is deprecated, ' - 'please use v.to_numpy() directly instead', - DeprecationWarning, - stacklevel=3) - if dtype is None: - dtype = to_numpy_type(self.dtype) - import numpy as np - if self.is_tensor: - as_vector = self.m == 1 and not keep_dims - shape_ext = (self.n, ) if as_vector else (self.n, self.m) - arr = np.zeros(self.shape + shape_ext, dtype=dtype) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, arr, as_vector) - else: - from taichi.lang.meta import tensor_to_ext_arr - arr = np.zeros(shape=self.shape, dtype=dtype) - tensor_to_ext_arr(self, arr) - ti.sync() - return arr + raise NotImplementedError() @python_scope - def to_torch(self, device=None, keep_dims=False): - """Converts the taichi field to a torch tensor. + def to_torch(self, device=None): + """Converts `self` to a torch tensor. Args: device (torch.device, optional): The desired device of returned tensor. - keep_dims (bool, optional): Whether to keep the dimension after conversion. - See :meth:`~taichi.lang.field.Field.to_numpy` for more detailed explanation. Returns: torch.tensor: The result torch tensor. """ - import torch - if self.is_tensor: - as_vector = self.m == 1 and not keep_dims - shape_ext = (self.n, ) if as_vector else (self.n, self.m) - arr = torch.empty(self.shape + shape_ext, - dtype=to_pytorch_type(self.dtype), - device=device) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, arr, as_vector) - else: - arr = torch.zeros(size=self.shape, - dtype=to_pytorch_type(self.dtype), - device=device) - from taichi.lang.meta import tensor_to_ext_arr - tensor_to_ext_arr(self, arr) - ti.sync() - return arr + raise NotImplementedError() @python_scope def from_numpy(self, arr): """Loads all elements from a numpy array. - The shape of the numpy array needs to be the same as the internal data structure. + The shape of the numpy array needs to be the same as `self`. Args: arr (numpy.ndarray): The source numpy array. """ - if self.is_tensor: - if len(arr.shape) == len(self.shape) + 1: - as_vector = True - assert self.m == 1, "This is not a vector field" - else: - as_vector = False - assert len(arr.shape) == len(self.shape) + 2 - dim_ext = 1 if as_vector else 2 - assert len(arr.shape) == len(self.shape) + dim_ext - from taichi.lang.meta import ext_arr_to_matrix - ext_arr_to_matrix(arr, self, as_vector) - else: - assert len(self.shape) == len(arr.shape) - for i in range(len(self.shape)): - assert self.shape[i] == arr.shape[i] - from taichi.lang.meta import ext_arr_to_tensor - if hasattr(arr, 'contiguous'): - arr = arr.contiguous() - ext_arr_to_tensor(arr, self) - ti.sync() + raise NotImplementedError() @python_scope def from_torch(self, arr): """Loads all elements from a torch tensor. - The shape of the torch tensor needs to be the same as the internal data structure. + The shape of the torch tensor needs to be the same as `self`. Args: arr (torch.tensor): The source torch tensor. @@ -304,70 +151,46 @@ def from_torch(self, arr): @python_scope def copy_from(self, other): - assert isinstance(other, SNodeField) + """Copies all elements from another field. + + The shape of the other field needs to be the same as `self`. + + Args: + other (Field): The source field. + """ + assert isinstance(other, Field) from taichi.lang.meta import tensor_to_tensor assert len(self.shape) == len(other.shape) tensor_to_tensor(self, other) - def __str__(self): - if impl.inside_kernel(): - return self.__repr__() # make pybind11 happy, see Matrix.__str__ - else: - return str(self.to_numpy()) - - def __repr__(self): - # make interactive shell happy, prevent materialization - if self.is_tensor: - return f'<{self.n}x{self.m} ti.Matrix.field>' - else: - return '' - @python_scope def __setitem__(self, key, value): - """XY: To be fixed: - Set value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This will not be directly called from python for vector/matrix fields. - Python Matrix class will decompose operations into scalar-level first. + """Sets field element in Python scope. Args: - key (Union[List[int], int, None]): indices to set - value (Union[int, float]): value to set + key (Union[List[int], int, None]): Coordinates of the field element. + value (element type): Value to set. """ - self.initialize_host_accessors() - if self.is_tensor: - if not isinstance(value, (list, tuple)): - value = list(value) - if not isinstance(value[0], (list, tuple)): - value = [[i] for i in value] - for i in range(self.n): - for j in range(self.m): - self[key][i, j] = value[i][j] - else: - self.host_accessors[0].setter(value, *self.pad_key(key)) + raise NotImplementedError() @python_scope def __getitem__(self, key): - """XY: to fix - Get value with specified key when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally. - - This will not be directly called from python for vector/matrix fields. - Python Matrix class will decompose operations into scalar-level first. + """Gets field element in Python scope. Args: - key (Union[List[int], int, None]): indices to get. + key (Union[List[int], int, None]): Coordinates of the field element. Returns: - Value retrieved with specified key. + element type: Value retrieved. """ - self.initialize_host_accessors() - key = self.pad_key(key) - if self.is_tensor: - return ti.Matrix.with_entries(*self.tensor_shape, [SNodeFieldHostAccess(e, key) for e in self.host_accessors]) + raise NotImplementedError() + + def __str__(self): + if impl.inside_kernel(): + return self.__repr__() # make pybind11 happy, see Matrix.__str__ else: - return self.host_accessors[0].getter(*key) + return str(self.to_numpy()) - @python_scope def pad_key(self, key): if key is None: key = () @@ -376,15 +199,222 @@ def pad_key(self, key): assert len(key) == len(self.shape) return key + ((0, ) * (_ti_core.get_max_num_indices() - len(key))) - @python_scope def initialize_host_accessors(self): if self.host_accessors: return impl.get_runtime().materialize() - self.host_accessors = [SNodeFieldHostAccessor(e.ptr.snode()) for e in self.vars] + self.host_accessors = [SNodeHostAccessor(e.ptr.snode()) for e in self.vars] + + +class ScalarField(Field): + """Taichi scalar field with SNode implementation. + + Args: + var (Expr): Field member. + """ + def __init__(self, var): + super().__init__([var]) + + @python_scope + def fill(self, val): + from taichi.lang.meta import fill_tensor + fill_tensor(self, val) + + @python_scope + def to_numpy(self, dtype=None): + if dtype is None: + dtype = to_numpy_type(self.dtype) + import numpy as np + arr = np.zeros(shape=self.shape, dtype=dtype) + from taichi.lang.meta import tensor_to_ext_arr + tensor_to_ext_arr(self, arr) + ti.sync() + return arr + + @python_scope + def to_torch(self, device=None): + import torch + arr = torch.zeros(size=self.shape, + dtype=to_pytorch_type(self.dtype), + device=device) + from taichi.lang.meta import tensor_to_ext_arr + tensor_to_ext_arr(self, arr) + ti.sync() + return arr + + @python_scope + def from_numpy(self, arr): + assert len(self.shape) == len(arr.shape) + for i in range(len(self.shape)): + assert self.shape[i] == arr.shape[i] + if hasattr(arr, 'contiguous'): + arr = arr.contiguous() + from taichi.lang.meta import ext_arr_to_tensor + ext_arr_to_tensor(arr, self) + ti.sync() + + @python_scope + def __setitem__(self, key, value): + self.initialize_host_accessors() + self.host_accessors[0].setter(value, *self.pad_key(key)) + + @python_scope + def __getitem__(self, key): + self.initialize_host_accessors() + return self.host_accessors[0].getter(*self.pad_key(key)) + + def __repr__(self): + # make interactive shell happy, prevent materialization + return '' + + +class MatrixField(Field): + """Taichi matrix field with SNode implementation. + + Args: + vars (Expr): Field members. + n (Int): Number of rows. + m (Int): Number of columns. + """ + def __init__(self, vars, n, m): + assert len(vars) == n * m + super().__init__(vars) + self.n = n + self.m = m + + def get_scalar_field(self, *indices): + """Creates a ScalarField using a specific field member. Only used for quant. + + Args: + indices (Tuple[Int]): Specified indices of the field member. + + Returns: + ScalarField: The result ScalarField. + """ + assert len(indices) in [1, 2] + i = indices[0] + j = 0 if len(indices) == 1 else indices[1] + return ScalarField(self.vars[i * self.m + j]) + + @python_scope + def fill(self, val): + """Fills `self` with specific values. + + Args: + val (Union[Number, List, Tuple, Matrix]): Values to fill, which should have dimension consistent with `self`. + """ + if isinstance(val, numbers.Number): + val = tuple([tuple([val for _ in range(self.m)]) for _ in range(self.n)]) + elif isinstance(val, (list, tuple)) and isinstance(val[0], numbers.Number): + assert self.m == 1 + val = tuple([(v, ) for v in val]) + elif isinstance(val, ti.Matrix): + val_tuple = [] + for i in range(val.n): + row = [] + for j in range(val.m): + row.append(val(i, j)) + row = tuple(row) + val_tuple.append(row) + val = tuple(val_tuple) + assert len(val) == self.n + assert len(val[0]) == self.m + from taichi.lang.meta import fill_matrix + fill_matrix(self, val) + + @python_scope + def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): + """Converts `self` to a numpy array. + + Args: + keep_dims (bool, optional): Whether to keep the dimension after conversion. + When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. + When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. + For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. + as_vector (bool, deprecated): Whether to make the returned numpy array as a vector, i.e., with shape (n,) rather than (n, 1). + Note that this argument has been deprecated. + More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. + dtype (DataType, optional): The desired data type of returned numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + if as_vector is not None: + warning( + 'v.to_numpy(as_vector=True) is deprecated, ' + 'please use v.to_numpy() directly instead', + DeprecationWarning, + stacklevel=3) + if dtype is None: + dtype = to_numpy_type(self.dtype) + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + import numpy as np + arr = np.zeros(self.shape + shape_ext, dtype=dtype) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + ti.sync() + return arr + + def to_torch(self, device=None, keep_dims=False): + """Converts `self` to a torch tensor. + + Args: + device (torch.device, optional): The desired device of returned tensor. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation. + + Returns: + torch.tensor: The result torch tensor. + """ + import torch + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + arr = torch.empty(self.shape + shape_ext, + dtype=to_pytorch_type(self.dtype), + device=device) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + ti.sync() + return arr + + @python_scope + def from_numpy(self, arr): + if len(arr.shape) == len(self.shape) + 1: + as_vector = True + assert self.m == 1, "This is not a vector field" + else: + as_vector = False + assert len(arr.shape) == len(self.shape) + 2 + dim_ext = 1 if as_vector else 2 + assert len(arr.shape) == len(self.shape) + dim_ext + from taichi.lang.meta import ext_arr_to_matrix + ext_arr_to_matrix(arr, self, as_vector) + ti.sync() + + @python_scope + def __setitem__(self, key, value): + self.initialize_host_accessors() + if not isinstance(value, (list, tuple)): + value = list(value) + if not isinstance(value[0], (list, tuple)): + value = [[i] for i in value] + for i in range(self.n): + for j in range(self.m): + self[key][i, j] = value[i][j] + + @python_scope + def __getitem__(self, key): + self.initialize_host_accessors() + key = self.pad_key(key) + return ti.Matrix.with_entries(self.n, self.m, [SNodeHostAccess(e, key) for e in self.host_accessors]) + + def __repr__(self): + # make interactive shell happy, prevent materialization + return f'<{self.n}x{self.m} ti.Matrix.field>' -class SNodeFieldHostAccessor: +class SNodeHostAccessor: def __init__(self, snode): if _ti_core.is_real(snode.data_type()): def getter(*key): @@ -411,7 +441,7 @@ def setter(value, *key): self.setter = setter -class SNodeFieldHostAccess: +class SNodeHostAccess: def __init__(self, accessor, key): self.accessor = accessor self.key = key diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 82f16c8a05efd..9875580ccda73 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -6,7 +6,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang.exception import InvalidOperationError, TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group -from taichi.lang.field import SNodeField +from taichi.lang.field import Field, ScalarField, MatrixField from taichi.lang.snode import SNode from taichi.lang.tape import TapeImpl from taichi.lang.util import (cook_dtype, is_taichi_class, python_scope, @@ -65,7 +65,7 @@ def expr_init_list(xs, expected): @taichi_scope def expr_init_func( rhs): # temporary solution to allow passing in fields as arguments - if isinstance(rhs, SNodeField): + if isinstance(rhs, Field): return rhs return expr_init(rhs) @@ -123,7 +123,7 @@ def subscript(value, *indices): indices_expr_group = make_expr_group(*indices) index_dim = indices_expr_group.size() - if isinstance(value, SNodeField): + if isinstance(value, Field): var = value.get_field_members()[0].ptr if var.snode() is None: if var.is_primal(): @@ -133,8 +133,8 @@ def subscript(value, *indices): field_dim = int(var.get_attribute("dim")) if field_dim != index_dim: raise IndexError(f'Field with dim {field_dim} accessed with indices of dim {index_dim}') - if value.is_tensor: - return ti.Matrix.with_entries(*value.tensor_shape, [Expr(_ti_core.subscript(e.ptr, indices_expr_group)) for e in value.get_field_members()]) + if isinstance(value, MatrixField): + return ti.Matrix.with_entries(value.n, value.m, [Expr(_ti_core.subscript(e.ptr, indices_expr_group)) for e in value.get_field_members()]) else: return Expr(_ti_core.subscript(var, indices_expr_group)) elif is_taichi_class(value): @@ -484,7 +484,7 @@ def var(dt, shape=None, offset=None, needs_grad=False): @python_scope -def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=True): +def field(dtype, shape=None, name="", offset=None, needs_grad=False): """Defines a Taichi field A Taichi field can be viewed as an abstract N-dimensional array, hiding away @@ -530,9 +530,8 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False, use_snode=T del _taichi_skip_traceback - assert use_snode, "Only SNode Field is supported now" x, x_grad = create_field_member(dtype, name) - x, x_grad = SNodeField([x], ()), SNodeField([x_grad], ()) + x, x_grad = ScalarField(x), ScalarField(x_grad) x.set_grad(x_grad) if shape is not None: @@ -776,7 +775,7 @@ def static(x, *xs): return x elif isinstance(x, Expr) and x.is_global(): return x - elif isinstance(x, SNodeField): + elif isinstance(x, Field): return x elif isinstance(x, (types.FunctionType, types.MethodType)): return x diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 57163d8b87bd1..a88678ce7bd3c 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -8,7 +8,7 @@ from taichi.lang import ops as ops_mod from taichi.lang.common_ops import TaichiOperations from taichi.lang.exception import TaichiSyntaxError -from taichi.lang.field import SNodeField, SNodeFieldHostAccess +from taichi.lang.field import ScalarField, MatrixField, SNodeHostAccess from taichi.lang.util import (in_python_scope, is_taichi_class, python_scope, taichi_scope, to_numpy_type, to_pytorch_type) from taichi.misc.util import deprecated, warning @@ -248,7 +248,7 @@ def __call__(self, *args, **kwargs): _taichi_skip_traceback = 1 assert kwargs == {} ret = self.entries[self.linearize_entry_id(*args)] - if isinstance(ret, SNodeFieldHostAccess): + if isinstance(ret, SNodeHostAccess): ret = ret.accessor.getter(*ret.key) return ret @@ -257,7 +257,7 @@ def set_entry(self, i, j, e): if impl.inside_kernel(): self.entries[idx].assign(e) else: - if isinstance(self.entries[idx], SNodeFieldHostAccess): + if isinstance(self.entries[idx], SNodeHostAccess): self.entries[idx].accessor.setter(e, *self.entries[idx].key) else: self.entries[idx] = e @@ -342,7 +342,7 @@ def w(self, value): @property @python_scope def value(self): - assert isinstance(self.entries[0], SNodeFieldHostAccess) + assert isinstance(self.entries[0], SNodeHostAccess) ret = self.empty_copy() for i in range(self.n): for j in range(self.m): @@ -869,7 +869,7 @@ def field(cls, for _ in range(n * m): entries.append(impl.create_field_member(dtype, name=name)) entries, entries_grad = zip(*entries) - entries, entries_grad = SNodeField(entries, (n, m)), SNodeField(entries_grad, (n, m)) + entries, entries_grad = MatrixField(entries, n, m), MatrixField(entries_grad, n, m) entries.set_grad(entries_grad) if layout is not None: @@ -894,10 +894,10 @@ def field(cls, dim = len(shape) if layout.soa: for e in entries.get_field_members(): - ti.root.dense(impl.index_nd(dim), shape).place(SNodeField(e, ()), offset=offset) + ti.root.dense(impl.index_nd(dim), shape).place(ScalarField(e), offset=offset) if needs_grad: for e in entries_grad.get_field_members(): - ti.root.dense(impl.index_nd(dim), shape).place(SNodeField(e, ()), offset=offset) + ti.root.dense(impl.index_nd(dim), shape).place(ScalarField(e), offset=offset) else: ti.root.dense(impl.index_nd(dim), shape).place(entries, offset=offset) if needs_grad: diff --git a/python/taichi/lang/meta.py b/python/taichi/lang/meta.py index 34b51fb137ee6..899b06b07e9e0 100644 --- a/python/taichi/lang/meta.py +++ b/python/taichi/lang/meta.py @@ -1,7 +1,7 @@ from taichi.core import settings from taichi.lang import impl from taichi.lang.expr import Expr -from taichi.lang.field import SNodeField +from taichi.lang.field import ScalarField from taichi.lang.kernel_arguments import ext_arr, template from taichi.lang.kernel_impl import kernel @@ -102,9 +102,9 @@ def ext_arr_to_matrix(arr: ext_arr(), mat: template(), as_vector: template()): @kernel def clear_gradients(vars: template()): - for I in ti.grouped(SNodeField([Expr(vars[0])], ())): + for I in ti.grouped(ScalarField(Expr(vars[0]))): for s in ti.static(vars): - SNodeField([Expr(s)], ())[I] = 0 + ScalarField(Expr(s))[I] = 0 @kernel diff --git a/python/taichi/lang/ops.py b/python/taichi/lang/ops.py index a34acfabb6ec4..96fda35217d94 100644 --- a/python/taichi/lang/ops.py +++ b/python/taichi/lang/ops.py @@ -9,7 +9,7 @@ from taichi.lang import impl, matrix from taichi.lang.exception import TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group -from taichi.lang.field import SNodeField +from taichi.lang.field import Field from taichi.lang.util import cook_dtype, is_taichi_class, taichi_scope unary_ops = [] @@ -930,8 +930,8 @@ def rescale_index(a, b, I): rescaled grouped loop index """ - assert isinstance(a, SNodeField), f"first argument must be a field" - assert isinstance(b, SNodeField), f"second argument must be a field" + assert isinstance(a, Field), f"first argument must be a field" + assert isinstance(b, Field), f"second argument must be a field" assert isinstance(I, matrix.Matrix), f"third argument must be a grouped index" Ib = I.copy() for n in range(min(I.n, min(len(a.shape), len(b.shape)))): diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index 3c2efd55b27dc..0cd3cbeb75c86 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -7,8 +7,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.expr import Expr -from taichi.lang.field import SNodeField -from taichi.lang.util import is_taichi_class +from taichi.lang.field import Field from taichi.misc.util import deprecated @@ -156,7 +155,7 @@ def place(self, *args, offset=None, shared_exponent=False): self.ptr.begin_shared_exp_placement() for arg in args: - if isinstance(arg, SNodeField): + if isinstance(arg, Field): for var in arg.get_field_members(): self.ptr.place(var.ptr, offset) elif isinstance(arg, list): diff --git a/python/taichi/misc/gui.py b/python/taichi/misc/gui.py index ec7a156323355..922851ec99882 100644 --- a/python/taichi/misc/gui.py +++ b/python/taichi/misc/gui.py @@ -229,9 +229,10 @@ def set_image(self, img): import numpy as np import taichi as ti + from taichi.lang.field import ScalarField, MatrixField if self.fast_gui: - assert isinstance(img, ti.SNodeField) and img.is_tensor, \ + assert isinstance(img, MatrixField), \ "Only ti.Vector.field is supported in GUI.set_image when fast_gui=True" assert img.shape == self.res, \ "Image resolution does not match GUI resolution" @@ -244,7 +245,7 @@ def set_image(self, img): vector_to_fast_image(img, self.img) return - if isinstance(img, ti.SNodeField) and not img.is_tensor: + if isinstance(img, ScalarField): if _ti_core.is_integral(img.dtype) or len(img.shape) != 2: # Images of uint is not optimized by xxx_to_image self.img = self.cook_image(img.to_numpy()) @@ -256,7 +257,7 @@ def set_image(self, img): tensor_to_image(img, self.img) ti.sync() - elif isinstance(img, ti.SNodeField) and img.is_tensor: + elif isinstance(img, MatrixField): if _ti_core.is_integral(img.dtype): self.img = self.cook_image(img.to_numpy()) else: @@ -336,9 +337,9 @@ def circles(self, if palette is not None: assert palette_indices is not None, 'palette must be used together with palette_indices' - from taichi.lang.field import SNodeField + from taichi.lang.field import Field - if isinstance(palette_indices, SNodeField): + if isinstance(palette_indices, Field): ind_int = palette_indices.to_numpy().astype(np.uint32) elif isinstance(palette_indices, list) or isinstance( palette_indices, np.ndarray): From b0e9bd1c0c2c273fc80a889a29ed512100eaff44 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Thu, 5 Aug 2021 13:37:49 +0800 Subject: [PATCH 06/10] Nit --- python/taichi/lang/impl.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 9875580ccda73..058b53d3c57d1 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -501,7 +501,6 @@ def field(dtype, shape=None, name="", offset=None, needs_grad=False): offset (Union[int, tuple[int]], optional): offset of the field domain needs_grad (bool, optional): whether this field participates in autodiff and thus needs an adjoint field to store the gradients. - use_snode (bool, optional): whether to use SNode as underlying implementation Example: The code below shows how a Taichi field can be declared and defined:: From 7f6942be800243d848dfba56e77bfe34048323d9 Mon Sep 17 00:00:00 2001 From: Taichi Gardener Date: Thu, 5 Aug 2021 06:03:16 +0000 Subject: [PATCH 07/10] Auto Format --- python/taichi/aot/module.py | 3 ++- python/taichi/lang/expr.py | 2 +- python/taichi/lang/field.py | 22 ++++++++++++---- python/taichi/lang/impl.py | 24 +++++++++++++----- python/taichi/lang/matrix.py | 29 ++++++++++++++-------- python/taichi/lang/ops.py | 3 ++- python/taichi/misc/gui.py | 2 +- tests/python/test_compare.py | 3 ++- tests/python/test_custom_float.py | 6 +++-- tests/python/test_matrix_different_type.py | 24 +++++++++++++----- 10 files changed, 84 insertions(+), 34 deletions(-) diff --git a/python/taichi/aot/module.py b/python/taichi/aot/module.py index 78a6072686768..f4b13b7c053eb 100644 --- a/python/taichi/aot/module.py +++ b/python/taichi/aot/module.py @@ -1,7 +1,8 @@ from contextlib import contextmanager from taichi.lang import impl, kernel_arguments, kernel_impl -from taichi.lang.field import ScalarField, MatrixField +from taichi.lang.field import MatrixField, ScalarField + class KernelTemplate: def __init__(self, kernel_fn, aot_module): diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index e2e4ca77dca0f..6af204a2ece8a 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -1,7 +1,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.common_ops import TaichiOperations -from taichi.lang.util import (is_taichi_class, python_scope) +from taichi.lang.util import is_taichi_class, python_scope import taichi as ti diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index 2ce44fc945063..bd234e5bd1766 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -1,8 +1,10 @@ +import numbers + from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type from taichi.misc.util import warning -import numbers + import taichi as ti @@ -203,7 +205,9 @@ def initialize_host_accessors(self): if self.host_accessors: return impl.get_runtime().materialize() - self.host_accessors = [SNodeHostAccessor(e.ptr.snode()) for e in self.vars] + self.host_accessors = [ + SNodeHostAccessor(e.ptr.snode()) for e in self.vars + ] class ScalarField(Field): @@ -304,8 +308,10 @@ def fill(self, val): val (Union[Number, List, Tuple, Matrix]): Values to fill, which should have dimension consistent with `self`. """ if isinstance(val, numbers.Number): - val = tuple([tuple([val for _ in range(self.m)]) for _ in range(self.n)]) - elif isinstance(val, (list, tuple)) and isinstance(val[0], numbers.Number): + val = tuple( + [tuple([val for _ in range(self.m)]) for _ in range(self.n)]) + elif isinstance(val, + (list, tuple)) and isinstance(val[0], numbers.Number): assert self.m == 1 val = tuple([(v, ) for v in val]) elif isinstance(val, ti.Matrix): @@ -407,7 +413,9 @@ def __setitem__(self, key, value): def __getitem__(self, key): self.initialize_host_accessors() key = self.pad_key(key) - return ti.Matrix.with_entries(self.n, self.m, [SNodeHostAccess(e, key) for e in self.host_accessors]) + return ti.Matrix.with_entries( + self.n, self.m, + [SNodeHostAccess(e, key) for e in self.host_accessors]) def __repr__(self): # make interactive shell happy, prevent materialization @@ -417,6 +425,7 @@ def __repr__(self): class SNodeHostAccessor: def __init__(self, snode): if _ti_core.is_real(snode.data_type()): + def getter(*key): assert len(key) == _ti_core.get_max_num_indices() return snode.read_float(key) @@ -426,10 +435,12 @@ def setter(value, *key): snode.write_float(key, value) else: if _ti_core.is_signed(snode.data_type()): + def getter(*key): assert len(key) == _ti_core.get_max_num_indices() return snode.read_int(key) else: + def getter(*key): assert len(key) == _ti_core.get_max_num_indices() return snode.read_uint(key) @@ -437,6 +448,7 @@ def getter(*key): def setter(value, *key): assert len(key) == _ti_core.get_max_num_indices() snode.write_int(key, value) + self.getter = getter self.setter = setter diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 951549db81481..059cf6619d0a5 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -6,7 +6,7 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang.exception import InvalidOperationError, TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group -from taichi.lang.field import Field, ScalarField, MatrixField +from taichi.lang.field import Field, MatrixField, ScalarField from taichi.lang.snode import SNode from taichi.lang.tape import TapeImpl from taichi.lang.util import (cook_dtype, is_taichi_class, python_scope, @@ -127,14 +127,22 @@ def subscript(value, *indices): var = value.get_field_members()[0].ptr if var.snode() is None: if var.is_primal(): - raise RuntimeError(f"{var.get_expr_name()} has not been placed.") + raise RuntimeError( + f"{var.get_expr_name()} has not been placed.") else: - raise RuntimeError(f"Gradient {var.get_expr_name()} has not been placed, check whether `needs_grad=True`") + raise RuntimeError( + f"Gradient {var.get_expr_name()} has not been placed, check whether `needs_grad=True`" + ) field_dim = int(var.get_attribute("dim")) if field_dim != index_dim: - raise IndexError(f'Field with dim {field_dim} accessed with indices of dim {index_dim}') + raise IndexError( + f'Field with dim {field_dim} accessed with indices of dim {index_dim}' + ) if isinstance(value, MatrixField): - return ti.Matrix.with_entries(value.n, value.m, [Expr(_ti_core.subscript(e.ptr, indices_expr_group)) for e in value.get_field_members()]) + return ti.Matrix.with_entries(value.n, value.m, [ + Expr(_ti_core.subscript(e.ptr, indices_expr_group)) + for e in value.get_field_members() + ]) else: return Expr(_ti_core.subscript(var, indices_expr_group)) elif is_taichi_class(value): @@ -155,7 +163,9 @@ def subscript(value, *indices): ) return Expr(_ti_core.subscript(value.ptr, indices_expr_group)) else: - raise TypeError('Subscription (e.g., "a[i, j]") only works on fields or external arrays.') + raise TypeError( + 'Subscription (e.g., "a[i, j]") only works on fields or external arrays.' + ) @taichi_scope @@ -454,6 +464,7 @@ def __repr__(self): >>> ti.root.pointer(ti.ij, 4).dense(ti.ij, 8).place(x) """ + @python_scope def create_field_member(dtype, name): dtype = cook_dtype(dtype) @@ -477,6 +488,7 @@ def create_field_member(dtype, name): return x, x_grad + @deprecated('ti.var', 'ti.field') def var(dt, shape=None, offset=None, needs_grad=False): _taichi_skip_traceback = 1 diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index a88678ce7bd3c..8db446050f780 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -8,7 +8,7 @@ from taichi.lang import ops as ops_mod from taichi.lang.common_ops import TaichiOperations from taichi.lang.exception import TaichiSyntaxError -from taichi.lang.field import ScalarField, MatrixField, SNodeHostAccess +from taichi.lang.field import MatrixField, ScalarField, SNodeHostAccess from taichi.lang.util import (in_python_scope, is_taichi_class, python_scope, taichi_scope, to_numpy_type, to_pytorch_type) from taichi.misc.util import deprecated, warning @@ -273,8 +273,8 @@ def subscript(self, *indices): ti.Expr) and self.entries[0].ptr.is_global_ptr( ) and ti.is_extension_supported( ti.cfg.arch, ti.extension.dynamic_index): - return ti.subscript_with_offset(self.entries[0], (i, j), - self.m, True) + return ti.subscript_with_offset(self.entries[0], (i, j), self.m, + True) else: return self(i, j) @@ -695,6 +695,7 @@ def fill(self, val): """ def assign_renamed(x, y): return ti.assign(x, y) + return self.element_wise_writeback_binary(assign_renamed, val) @python_scope @@ -857,19 +858,22 @@ def field(cls, dtype ) == n, f'Please set correct dtype list for Vector. The shape of dtype list should be ({n}, ) instead of {np.shape(dtype)}' for i in range(n): - entries.append(impl.create_field_member(dtype[i], name=name)) + entries.append( + impl.create_field_member(dtype[i], name=name)) else: assert len(np.shape(dtype)) == 2 and len(dtype) == n and len( dtype[0] ) == m, f'Please set correct dtype list for Matrix. The shape of dtype list should be ({n}, {m}) instead of {np.shape(dtype)}' for i in range(n): for j in range(m): - entries.append(impl.create_field_member(dtype[i][j], name=name)) + entries.append( + impl.create_field_member(dtype[i][j], name=name)) else: for _ in range(n * m): entries.append(impl.create_field_member(dtype, name=name)) entries, entries_grad = zip(*entries) - entries, entries_grad = MatrixField(entries, n, m), MatrixField(entries_grad, n, m) + entries, entries_grad = MatrixField(entries, n, m), MatrixField( + entries_grad, n, m) entries.set_grad(entries_grad) if layout is not None: @@ -894,14 +898,19 @@ def field(cls, dim = len(shape) if layout.soa: for e in entries.get_field_members(): - ti.root.dense(impl.index_nd(dim), shape).place(ScalarField(e), offset=offset) + ti.root.dense(impl.index_nd(dim), + shape).place(ScalarField(e), offset=offset) if needs_grad: for e in entries_grad.get_field_members(): - ti.root.dense(impl.index_nd(dim), shape).place(ScalarField(e), offset=offset) + ti.root.dense(impl.index_nd(dim), + shape).place(ScalarField(e), + offset=offset) else: - ti.root.dense(impl.index_nd(dim), shape).place(entries, offset=offset) + ti.root.dense(impl.index_nd(dim), shape).place(entries, + offset=offset) if needs_grad: - ti.root.dense(impl.index_nd(dim), shape).place(entries_grad, offset=offset) + ti.root.dense(impl.index_nd(dim), + shape).place(entries_grad, offset=offset) return entries @classmethod diff --git a/python/taichi/lang/ops.py b/python/taichi/lang/ops.py index 96fda35217d94..01a42e4d34bb9 100644 --- a/python/taichi/lang/ops.py +++ b/python/taichi/lang/ops.py @@ -932,7 +932,8 @@ def rescale_index(a, b, I): """ assert isinstance(a, Field), f"first argument must be a field" assert isinstance(b, Field), f"second argument must be a field" - assert isinstance(I, matrix.Matrix), f"third argument must be a grouped index" + assert isinstance(I, + matrix.Matrix), f"third argument must be a grouped index" Ib = I.copy() for n in range(min(I.n, min(len(a.shape), len(b.shape)))): if a.shape[n] > b.shape[n]: diff --git a/python/taichi/misc/gui.py b/python/taichi/misc/gui.py index 922851ec99882..5301afcab6b3a 100644 --- a/python/taichi/misc/gui.py +++ b/python/taichi/misc/gui.py @@ -227,9 +227,9 @@ def set_image(self, img): """ import numpy as np + from taichi.lang.field import MatrixField, ScalarField import taichi as ti - from taichi.lang.field import ScalarField, MatrixField if self.fast_gui: assert isinstance(img, MatrixField), \ diff --git a/tests/python/test_compare.py b/tests/python/test_compare.py index 98a4fec906d11..ed6dc1c16a1cb 100644 --- a/tests/python/test_compare.py +++ b/tests/python/test_compare.py @@ -132,7 +132,8 @@ def func(): b[None] = 2 c[None] = 3 d[None] = 3 - a[0] = c[None] == d[None] != b[None] < d[None] > b[None] >= b[None] <= c[None] + a[0] = c[None] == d[None] != b[None] < d[None] > b[None] >= b[ + None] <= c[None] a[1] = b[None] <= c[None] != d[None] > b[None] == b[None] func() diff --git a/tests/python/test_custom_float.py b/tests/python/test_custom_float.py index 6e34bd4fa25cc..1066396decc5d 100644 --- a/tests/python/test_custom_float.py +++ b/tests/python/test_custom_float.py @@ -32,8 +32,10 @@ def test_custom_matrix_rotation(): x = ti.Matrix.field(2, 2, dtype=cft) - ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(0, 0), x.get_scalar_field(0, 1)) - ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(1, 0), x.get_scalar_field(1, 1)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(0, 0), + x.get_scalar_field(0, 1)) + ti.root.bit_struct(num_bits=32).place(x.get_scalar_field(1, 0), + x.get_scalar_field(1, 1)) x[None] = [[1.0, 0.0], [0.0, 1.0]] diff --git a/tests/python/test_matrix_different_type.py b/tests/python/test_matrix_different_type.py index e77176505bb6a..30aeebe7fc01e 100644 --- a/tests/python/test_matrix_different_type.py +++ b/tests/python/test_matrix_different_type.py @@ -77,12 +77,24 @@ def test_custom_type(): a = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) b = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) c = ti.Matrix.field(len(type_list), len(type_list[0]), dtype=type_list) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a.get_scalar_field(0, 0), a.get_scalar_field(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(a.get_scalar_field(1, 0), a.get_scalar_field(1, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b.get_scalar_field(0, 0), b.get_scalar_field(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(b.get_scalar_field(1, 0), b.get_scalar_field(1, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c.get_scalar_field(0, 0), c.get_scalar_field(0, 1)) - ti.root.dense(ti.i, 1).bit_struct(num_bits=32).place(c.get_scalar_field(1, 0), c.get_scalar_field(1, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(a.get_scalar_field(0, 0), + a.get_scalar_field(0, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(a.get_scalar_field(1, 0), + a.get_scalar_field(1, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(b.get_scalar_field(0, 0), + b.get_scalar_field(0, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(b.get_scalar_field(1, 0), + b.get_scalar_field(1, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(c.get_scalar_field(0, 0), + c.get_scalar_field(0, 1)) + ti.root.dense(ti.i, + 1).bit_struct(num_bits=32).place(c.get_scalar_field(1, 0), + c.get_scalar_field(1, 1)) @ti.kernel def init(): From efa7196f02ebc18e8875458eb621d55c0c274f18 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Fri, 6 Aug 2021 13:04:39 +0800 Subject: [PATCH 08/10] Put MatrixField in matrix.py --- python/taichi/aot/module.py | 3 +- python/taichi/lang/field.py | 153 ---------------------------------- python/taichi/lang/impl.py | 3 +- python/taichi/lang/matrix.py | 156 ++++++++++++++++++++++++++++++++++- python/taichi/misc/gui.py | 3 +- 5 files changed, 161 insertions(+), 157 deletions(-) diff --git a/python/taichi/aot/module.py b/python/taichi/aot/module.py index f4b13b7c053eb..1acce54131b97 100644 --- a/python/taichi/aot/module.py +++ b/python/taichi/aot/module.py @@ -1,7 +1,8 @@ from contextlib import contextmanager from taichi.lang import impl, kernel_arguments, kernel_impl -from taichi.lang.field import MatrixField, ScalarField +from taichi.lang.field import ScalarField +from taichi.lang.matrix import MatrixField class KernelTemplate: diff --git a/python/taichi/lang/field.py b/python/taichi/lang/field.py index bd234e5bd1766..659d240070fd2 100644 --- a/python/taichi/lang/field.py +++ b/python/taichi/lang/field.py @@ -1,9 +1,6 @@ -import numbers - from taichi.core.util import ti_core as _ti_core from taichi.lang import impl from taichi.lang.util import python_scope, to_numpy_type, to_pytorch_type -from taichi.misc.util import warning import taichi as ti @@ -272,156 +269,6 @@ def __repr__(self): return '' -class MatrixField(Field): - """Taichi matrix field with SNode implementation. - - Args: - vars (Expr): Field members. - n (Int): Number of rows. - m (Int): Number of columns. - """ - def __init__(self, vars, n, m): - assert len(vars) == n * m - super().__init__(vars) - self.n = n - self.m = m - - def get_scalar_field(self, *indices): - """Creates a ScalarField using a specific field member. Only used for quant. - - Args: - indices (Tuple[Int]): Specified indices of the field member. - - Returns: - ScalarField: The result ScalarField. - """ - assert len(indices) in [1, 2] - i = indices[0] - j = 0 if len(indices) == 1 else indices[1] - return ScalarField(self.vars[i * self.m + j]) - - @python_scope - def fill(self, val): - """Fills `self` with specific values. - - Args: - val (Union[Number, List, Tuple, Matrix]): Values to fill, which should have dimension consistent with `self`. - """ - if isinstance(val, numbers.Number): - val = tuple( - [tuple([val for _ in range(self.m)]) for _ in range(self.n)]) - elif isinstance(val, - (list, tuple)) and isinstance(val[0], numbers.Number): - assert self.m == 1 - val = tuple([(v, ) for v in val]) - elif isinstance(val, ti.Matrix): - val_tuple = [] - for i in range(val.n): - row = [] - for j in range(val.m): - row.append(val(i, j)) - row = tuple(row) - val_tuple.append(row) - val = tuple(val_tuple) - assert len(val) == self.n - assert len(val[0]) == self.m - from taichi.lang.meta import fill_matrix - fill_matrix(self, val) - - @python_scope - def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): - """Converts `self` to a numpy array. - - Args: - keep_dims (bool, optional): Whether to keep the dimension after conversion. - When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. - When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. - For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. - as_vector (bool, deprecated): Whether to make the returned numpy array as a vector, i.e., with shape (n,) rather than (n, 1). - Note that this argument has been deprecated. - More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. - dtype (DataType, optional): The desired data type of returned numpy array. - - Returns: - numpy.ndarray: The result numpy array. - """ - if as_vector is not None: - warning( - 'v.to_numpy(as_vector=True) is deprecated, ' - 'please use v.to_numpy() directly instead', - DeprecationWarning, - stacklevel=3) - if dtype is None: - dtype = to_numpy_type(self.dtype) - as_vector = self.m == 1 and not keep_dims - shape_ext = (self.n, ) if as_vector else (self.n, self.m) - import numpy as np - arr = np.zeros(self.shape + shape_ext, dtype=dtype) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, arr, as_vector) - ti.sync() - return arr - - def to_torch(self, device=None, keep_dims=False): - """Converts `self` to a torch tensor. - - Args: - device (torch.device, optional): The desired device of returned tensor. - keep_dims (bool, optional): Whether to keep the dimension after conversion. - See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation. - - Returns: - torch.tensor: The result torch tensor. - """ - import torch - as_vector = self.m == 1 and not keep_dims - shape_ext = (self.n, ) if as_vector else (self.n, self.m) - arr = torch.empty(self.shape + shape_ext, - dtype=to_pytorch_type(self.dtype), - device=device) - from taichi.lang.meta import matrix_to_ext_arr - matrix_to_ext_arr(self, arr, as_vector) - ti.sync() - return arr - - @python_scope - def from_numpy(self, arr): - if len(arr.shape) == len(self.shape) + 1: - as_vector = True - assert self.m == 1, "This is not a vector field" - else: - as_vector = False - assert len(arr.shape) == len(self.shape) + 2 - dim_ext = 1 if as_vector else 2 - assert len(arr.shape) == len(self.shape) + dim_ext - from taichi.lang.meta import ext_arr_to_matrix - ext_arr_to_matrix(arr, self, as_vector) - ti.sync() - - @python_scope - def __setitem__(self, key, value): - self.initialize_host_accessors() - if not isinstance(value, (list, tuple)): - value = list(value) - if not isinstance(value[0], (list, tuple)): - value = [[i] for i in value] - for i in range(self.n): - for j in range(self.m): - self[key][i, j] = value[i][j] - - @python_scope - def __getitem__(self, key): - self.initialize_host_accessors() - key = self.pad_key(key) - return ti.Matrix.with_entries( - self.n, self.m, - [SNodeHostAccess(e, key) for e in self.host_accessors]) - - def __repr__(self): - # make interactive shell happy, prevent materialization - return f'<{self.n}x{self.m} ti.Matrix.field>' - - class SNodeHostAccessor: def __init__(self, snode): if _ti_core.is_real(snode.data_type()): diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 059cf6619d0a5..77ea2fce5f2ae 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -6,7 +6,8 @@ from taichi.core.util import ti_core as _ti_core from taichi.lang.exception import InvalidOperationError, TaichiSyntaxError from taichi.lang.expr import Expr, make_expr_group -from taichi.lang.field import Field, MatrixField, ScalarField +from taichi.lang.field import Field, ScalarField +from taichi.lang.matrix import MatrixField from taichi.lang.snode import SNode from taichi.lang.tape import TapeImpl from taichi.lang.util import (cook_dtype, is_taichi_class, python_scope, diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 8db446050f780..46ac1b722943e 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -8,7 +8,7 @@ from taichi.lang import ops as ops_mod from taichi.lang.common_ops import TaichiOperations from taichi.lang.exception import TaichiSyntaxError -from taichi.lang.field import MatrixField, ScalarField, SNodeHostAccess +from taichi.lang.field import Field, ScalarField, SNodeHostAccess from taichi.lang.util import (in_python_scope, is_taichi_class, python_scope, taichi_scope, to_numpy_type, to_pytorch_type) from taichi.misc.util import deprecated, warning @@ -1124,3 +1124,157 @@ def Vector(n, dt=None, shape=None, offset=None, **kwargs): Vector.outer_product = Matrix.outer_product Vector.unit = Matrix.unit Vector.normalized = Matrix.normalized + + +class MatrixField(Field): + """Taichi matrix field with SNode implementation. + + Args: + vars (Expr): Field members. + n (Int): Number of rows. + m (Int): Number of columns. + """ + def __init__(self, vars, n, m): + assert len(vars) == n * m + super().__init__(vars) + self.n = n + self.m = m + + @deprecated('x(i, j)', 'x.get_scalar_field(i, j)') + def __call__(self, *indices): + return self.get_scalar_field(*indices) + + def get_scalar_field(self, *indices): + """Creates a ScalarField using a specific field member. Only used for quant. + + Args: + indices (Tuple[Int]): Specified indices of the field member. + + Returns: + ScalarField: The result ScalarField. + """ + assert len(indices) in [1, 2] + i = indices[0] + j = 0 if len(indices) == 1 else indices[1] + return ScalarField(self.vars[i * self.m + j]) + + @python_scope + def fill(self, val): + """Fills `self` with specific values. + + Args: + val (Union[Number, List, Tuple, Matrix]): Values to fill, which should have dimension consistent with `self`. + """ + if isinstance(val, numbers.Number): + val = tuple( + [tuple([val for _ in range(self.m)]) for _ in range(self.n)]) + elif isinstance(val, + (list, tuple)) and isinstance(val[0], numbers.Number): + assert self.m == 1 + val = tuple([(v, ) for v in val]) + elif isinstance(val, ti.Matrix): + val_tuple = [] + for i in range(val.n): + row = [] + for j in range(val.m): + row.append(val(i, j)) + row = tuple(row) + val_tuple.append(row) + val = tuple(val_tuple) + assert len(val) == self.n + assert len(val[0]) == self.m + from taichi.lang.meta import fill_matrix + fill_matrix(self, val) + + @python_scope + def to_numpy(self, keep_dims=False, as_vector=None, dtype=None): + """Converts `self` to a numpy array. + + Args: + keep_dims (bool, optional): Whether to keep the dimension after conversion. + When keep_dims=True, on an n-D matrix field, the numpy array always has n+2 dims, even for 1x1, 1xn, nx1 matrix fields. + When keep_dims=False, the resulting numpy array should skip the matrix dims with size 1. + For example, a 4x1 or 1x4 matrix field with 5x6x7 elements results in an array of shape 5x6x7x4. + as_vector (bool, deprecated): Whether to make the returned numpy array as a vector, i.e., with shape (n,) rather than (n, 1). + Note that this argument has been deprecated. + More discussion about `as_vector`: https://github.com/taichi-dev/taichi/pull/1046#issuecomment-633548858. + dtype (DataType, optional): The desired data type of returned numpy array. + + Returns: + numpy.ndarray: The result numpy array. + """ + if as_vector is not None: + warning( + 'v.to_numpy(as_vector=True) is deprecated, ' + 'please use v.to_numpy() directly instead', + DeprecationWarning, + stacklevel=3) + if dtype is None: + dtype = to_numpy_type(self.dtype) + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + import numpy as np + arr = np.zeros(self.shape + shape_ext, dtype=dtype) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + ti.sync() + return arr + + def to_torch(self, device=None, keep_dims=False): + """Converts `self` to a torch tensor. + + Args: + device (torch.device, optional): The desired device of returned tensor. + keep_dims (bool, optional): Whether to keep the dimension after conversion. + See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation. + + Returns: + torch.tensor: The result torch tensor. + """ + import torch + as_vector = self.m == 1 and not keep_dims + shape_ext = (self.n, ) if as_vector else (self.n, self.m) + arr = torch.empty(self.shape + shape_ext, + dtype=to_pytorch_type(self.dtype), + device=device) + from taichi.lang.meta import matrix_to_ext_arr + matrix_to_ext_arr(self, arr, as_vector) + ti.sync() + return arr + + @python_scope + def from_numpy(self, arr): + if len(arr.shape) == len(self.shape) + 1: + as_vector = True + assert self.m == 1, "This is not a vector field" + else: + as_vector = False + assert len(arr.shape) == len(self.shape) + 2 + dim_ext = 1 if as_vector else 2 + assert len(arr.shape) == len(self.shape) + dim_ext + from taichi.lang.meta import ext_arr_to_matrix + ext_arr_to_matrix(arr, self, as_vector) + ti.sync() + + @python_scope + def __setitem__(self, key, value): + self.initialize_host_accessors() + if not isinstance(value, (list, tuple)): + value = list(value) + if not isinstance(value[0], (list, tuple)): + value = [[i] for i in value] + for i in range(self.n): + for j in range(self.m): + self[key][i, j] = value[i][j] + + @python_scope + def __getitem__(self, key): + self.initialize_host_accessors() + key = self.pad_key(key) + return Matrix.with_entries( + self.n, self.m, + [SNodeHostAccess(e, key) for e in self.host_accessors]) + + def __repr__(self): + # make interactive shell happy, prevent materialization + return f'<{self.n}x{self.m} ti.Matrix.field>' diff --git a/python/taichi/misc/gui.py b/python/taichi/misc/gui.py index 5301afcab6b3a..bf22e13997430 100644 --- a/python/taichi/misc/gui.py +++ b/python/taichi/misc/gui.py @@ -227,7 +227,8 @@ def set_image(self, img): """ import numpy as np - from taichi.lang.field import MatrixField, ScalarField + from taichi.lang.field import ScalarField + from taichi.lang.matrix import MatrixField import taichi as ti From f1dbc6f1d1ecb915856b227ded0e977dfe5e6730 Mon Sep 17 00:00:00 2001 From: Yi Xu Date: Fri, 6 Aug 2021 13:20:54 +0800 Subject: [PATCH 09/10] Fix examples --- examples/algorithm/print_offset.py | 4 ++-- examples/rendering/sdf2d.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/algorithm/print_offset.py b/examples/algorithm/print_offset.py index 1dcf758629e11..b905126bd4f9b 100644 --- a/examples/algorithm/print_offset.py +++ b/examples/algorithm/print_offset.py @@ -12,8 +12,8 @@ @ti.kernel def fill(): for i, j in a: - base = ti.get_addr(a.snode(), [0, 0]) - a[i, j] = int(ti.get_addr(a.snode(), [i, j]) - base) // 4 + base = ti.get_addr(a.snode, [0, 0]) + a[i, j] = int(ti.get_addr(a.snode, [i, j]) - base) // 4 fill() diff --git a/examples/rendering/sdf2d.py b/examples/rendering/sdf2d.py index 6b301fb2fc6d4..bd89696243a58 100644 --- a/examples/rendering/sdf2d.py +++ b/examples/rendering/sdf2d.py @@ -51,8 +51,8 @@ def subtract(a, b): @ti.func def sdf_moon(p): # EMI, RFL, RFR - d1 = vres((p - light_pos + vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) - d2 = vres((p - light_pos - vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) + d1 = vres((p - light_pos[None] + vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) + d2 = vres((p - light_pos[None] - vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) d3 = vres(p[1] - 0.6, 0.0, 1.0, 0.0) d4 = vres((p - vec2(0.5, 0.6)).norm() - 0.3, 0.0, 1.0, 0.0) return union(subtract(d1, d2), subtract(d3, d4)) @@ -63,7 +63,7 @@ def sdf_lens(p): # EMI, RFL, RFR d1 = vres((p - vec2(0.5, 0.28)).norm() - 0.2, 0.0, 0.3, 1.0) d2 = vres((p - vec2(0.5, 0.6)).norm() - 0.2, 0.0, 0.3, 1.0) - d3 = vres((p - light_pos).norm() - 0.05, 5.0, 0.0, 0.0) + d3 = vres((p - light_pos[None]).norm() - 0.05, 5.0, 0.0, 0.0) return union(intersect(d1, d2), d3) From a54c957825a3d4aa043f942bd5613a861a419008 Mon Sep 17 00:00:00 2001 From: Taichi Gardener Date: Fri, 6 Aug 2021 05:23:13 +0000 Subject: [PATCH 10/10] Auto Format --- examples/rendering/sdf2d.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/rendering/sdf2d.py b/examples/rendering/sdf2d.py index bd89696243a58..6d004aa6d01a2 100644 --- a/examples/rendering/sdf2d.py +++ b/examples/rendering/sdf2d.py @@ -51,8 +51,10 @@ def subtract(a, b): @ti.func def sdf_moon(p): # EMI, RFL, RFR - d1 = vres((p - light_pos[None] + vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) - d2 = vres((p - light_pos[None] - vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, 0.0) + d1 = vres((p - light_pos[None] + vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, + 0.0) + d2 = vres((p - light_pos[None] - vec2(0.05, 0.0)).norm() - 0.1, 1.0, 0.0, + 0.0) d3 = vres(p[1] - 0.6, 0.0, 1.0, 0.0) d4 = vres((p - vec2(0.5, 0.6)).norm() - 0.3, 0.0, 1.0, 0.0) return union(subtract(d1, d2), subtract(d3, d4))