From 4e9f1d56b9df21001589259bf58c12da72b6c389 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Mon, 29 Jun 2020 17:07:20 +0800 Subject: [PATCH 01/11] [Refactor] [lang] x.data_type() is deprecated, use x.dtype instead --- .gitignore | 2 ++ python/taichi/lang/expr.py | 15 ++++++++++----- python/taichi/lang/snode.py | 7 ++++++- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 50ccd25a77145..da03592f06274 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ *.swp *.swo /.vs +/.*_localrc +/tags /Debug *.sdf /x64 diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index 9a6fb90f40dff..09befef209304 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -68,7 +68,7 @@ def initialize_accessor(self): return snode = self.ptr.snode() - if self.snode().data_type() == f32 or self.snode().data_type() == f64: + if self.dtype == f32 or self.dtype == f64: def getter(*key): assert len(key) == taichi_lang_core.get_max_num_indices() @@ -78,7 +78,7 @@ def setter(value, *key): assert len(key) == taichi_lang_core.get_max_num_indices() snode.write_float(key, value) else: - if taichi_lang_core.is_signed(self.snode().data_type()): + if taichi_lang_core.is_signed(self.dtype): def getter(*key): assert len(key) == taichi_lang_core.get_max_num_indices() @@ -135,15 +135,20 @@ def shape(self): def dim(self): return len(self.shape) + @property + def dtype(self): + return self.snode().dtype + + @deprecated('x.data_type()', 'x.dtype'): def data_type(self): - return self.snode().data_type() + return self.snode().dtype @python_scope def to_numpy(self): from .meta import tensor_to_ext_arr import numpy as np arr = np.zeros(shape=self.shape, - dtype=to_numpy_type(self.snode().data_type())) + dtype=to_numpy_type(self.dtype)) tensor_to_ext_arr(self, arr) import taichi as ti ti.sync() @@ -154,7 +159,7 @@ def to_torch(self, device=None): from .meta import tensor_to_ext_arr import torch arr = torch.zeros(size=self.shape, - dtype=to_pytorch_type(self.snode().data_type()), + dtype=to_pytorch_type(self.dtype), device=device) tensor_to_ext_arr(self, arr) import taichi as ti diff --git a/python/taichi/lang/snode.py b/python/taichi/lang/snode.py index 0c0cc54113493..3b84d328625ce 100644 --- a/python/taichi/lang/snode.py +++ b/python/taichi/lang/snode.py @@ -64,9 +64,14 @@ def parent(self, n=1): return impl.root return SNode(p) - def data_type(self): + @property + def dtype(self): return self.ptr.data_type() + @deprecated('x.data_type()', 'x.dtype') + def data_type(self): + return self.dtype + @deprecated('x.dim()', 'len(x.shape)') def dim(self): return len(self.shape) From 356999ec93b97b739db226659aeac43da3e304c0 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Mon, 29 Jun 2020 17:50:16 +0800 Subject: [PATCH 02/11] improve doc --- docs/meta.rst | 131 ++++++++++++++++++++++++++----------- docs/scalar_tensor.rst | 28 ++------ docs/snode.rst | 50 ++++---------- docs/vector.rst | 3 + python/taichi/lang/expr.py | 2 +- 5 files changed, 118 insertions(+), 96 deletions(-) diff --git a/docs/meta.rst b/docs/meta.rst index 3ced4c60ed97e..d79761de5a166 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -17,6 +17,9 @@ Taichi kernels are *lazily instantiated* and a lot of computation can happen at Template metaprogramming ------------------------ +Taichi tensors oftenly are used as globals. But you may use ``ti.template()`` +as type hint to pass a tensor as argument. For example: + .. code-block:: python @ti.kernel @@ -24,48 +27,114 @@ Template metaprogramming for i in x: y[i] = x[i] + a = ti.var(ti.f32, 4) + b = ti.var(ti.f32, 4) + c = ti.var(ti.f32, 12) + d = ti.var(ti.f32, 12) + copy(a, b) + copy(c, d) + + +As shown by the above example, template programming may enable us to reuse our +code and improve better flexibility. + Dimensionality-independent programming using grouped indices ------------------------------------------------------------ +However, the ``copy`` template shown above is not perfect, i.e., it can only be +used to copy 1D tensors. What if we want to copy 2D tensors? Do we have to write +another kernel? + +.. code-block:: python + + @ti.kernel + def copy2d(x: ti.template(), y: ti.template()): + for i, j in x: + y[i, j] = x[i, j] + +Not necessary! Taichi provides ``ti.grouped`` syntax which enable you to get +loop indices into a grouped vector, therefore unify different dimensionalities. +For example: + .. code-block:: python @ti.kernel def copy(x: ti.template(), y: ti.template()): for I in ti.grouped(y): + # I is a vector with same dimensionality with x and data type i32 + # If y is 0D, then I = None + # If y is 1D, then I = ti.Vector([i]) + # If y is 2D, then I = ti.Vector([i, j]) + # If y is 3D, then I = ti.Vector([i, j, k]) + # ... x[I] = y[I] @ti.kernel def array_op(x: ti.template(), y: ti.template()): - # If tensor x is 2D - for I in ti.grouped(x): # I is a vector of size x.dim() and data type i32 + # if tensor x is 2D: + for I in ti.grouped(x): # I is simply a 2D vector with x data type i32 y[I + ti.Vector([0, 1])] = I[0] + I[1] - # is equivalent to + + # then it is equivalent to: for i, j in x: y[i, j + 1] = i + j -Tensor size reflection ----------------------- -Sometimes it will be useful to get the dimensionality (``tensor.dim()``) and shape (``tensor.shape()``) of tensors. -These functions can be used in both Taichi kernels and python scripts. +Tensor meta data +---------------- + +Sometimes it will be useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. +These attributes can be accessed in both Taichi kernels and python scripts. .. code-block:: python @ti.func - def print_tensor_size(x: ti.template()): - print(x.dim()) - for i in ti.static(range(x.dim())): - print(x.shape()[i]) + def print_tensor_info(x: ti.template()): + print('Tensor dimensionality is', len(x.shape)) + for i in ti.static(range(len(x.shape))): + print('Size alone dimension', i, 'is', x.shape[i]) + ti.static_print('Tensor data type is', x.dtype) + +See :ref:`scalar_tensor` for more details. + +.. note:: + + For sparse tensors, the full domain shape will be returned. + + +Matrix & vector meta data +------------------------- + +Sometimes it will also be useful to get the matrix column and row numbers when +you want to write dimensionality-independent code, such as reusing code between +2D/3D physical simulations. + +``matrix.m`` equals to the column number of matrix, while ``matrix.n`` equals to +the row number of matrix. +Since vectors are considered as matrices with one column, ``vector.n`` is simply +the dimensionality of vector. + +.. code-block:: python + + @ti.kernel + def foo(): + matrix = ti.Matrix([[1, 2], [3, 4], [5, 6]]) + print(matrix.n) # 2 + print(matrix.m) # 3 + vector = ti.Vector([7, 8, 9]) + print(vector.n) # 3 + print(vector.m) # 1 + -For sparse tensors, the full domain shape will be returned. Compile-time evaluations ------------------------ + Using compile-time evaluation will allow certain computations to happen when kernels are being instantiated. This saves the overhead of those computations at runtime. -* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.) +* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.): .. code-block:: python @@ -77,32 +146,20 @@ This saves the overhead of those computations at runtime. x[0] = 1 -* Use ``ti.static`` for forced loop unrolling +* Use ``ti.static`` for forced loop unrolling: .. code-block:: python - @ti.kernel - def g2p(f: ti.i32): - for p in range(0, n_particles): - base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32) - fx = x[f, p] * inv_dx - ti.cast(base, real) - w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0), - 0.5 * ti.sqr(fx - 0.5)] - new_v = ti.Vector([0.0, 0.0]) - new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]]) - - # Unrolled 9 iterations for higher performance - for i in ti.static(range(3)): - for j in ti.static(range(3)): - dpos = ti.cast(ti.Vector([i, j]), real) - fx - g_v = grid_v_out[base(0) + i, base(1) + j] - weight = w[i](0) * w[j](1) - new_v += weight * g_v - new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx - - v[f + 1, p] = new_v - x[f + 1, p] = x[f, p] + dt * v[f + 1, p] - C[f + 1, p] = new_C + @ti.kernel + def func(): + for i in ti.static(range(4)): + print(i) + + # is equivalent to: + print(0) + print(1) + print(2) + print(3) When to use for loops with ``ti.static`` @@ -120,7 +177,7 @@ For example, code for resetting this tensor of vectors should be @ti.kernel def reset(): for i in x: - for j in ti.static(range(3)): + for j in ti.static(range(x.n)): # The inner loop must be unrolled since j is a vector index instead # of a global tensor index. x[i][j] = 0 diff --git a/docs/scalar_tensor.rst b/docs/scalar_tensor.rst index db27b56e62939..e1b326f3329fd 100644 --- a/docs/scalar_tensor.rst +++ b/docs/scalar_tensor.rst @@ -107,24 +107,8 @@ You can access an element of the Taichi tensor by an index or indices. Meta data --------- -.. function:: a.dim() - :parameter a: (Tensor) the tensor - :return: (scalar) the length of ``a`` - - :: - - x = ti.var(ti.i32, (6, 5)) - x.dim() # 2 - - y = ti.var(ti.i32, 6) - y.dim() # 1 - - z = ti.var(ti.i32, ()) - z.dim() # 0 - - -.. function:: a.shape() +.. attribute:: a.shape :parameter a: (Tensor) the tensor :return: (tuple) the shape of tensor ``a`` @@ -132,16 +116,16 @@ Meta data :: x = ti.var(ti.i32, (6, 5)) - x.shape() # (6, 5) + x.shape # (6, 5) y = ti.var(ti.i32, 6) - y.shape() # (6,) + y.shape # (6,) z = ti.var(ti.i32, ()) - z.shape() # () + z.shape # () -.. function:: a.data_type() +.. function:: a.dtype :parameter a: (Tensor) the tensor :return: (DataType) the data type of ``a`` @@ -149,7 +133,7 @@ Meta data :: x = ti.var(ti.i32, (2, 3)) - x.data_type() # ti.i32 + x.dtype # ti.i32 .. function:: a.parent(n = 1) diff --git a/docs/snode.rst b/docs/snode.rst index c8db73bd2ea6c..368c01f0a309b 100644 --- a/docs/snode.rst +++ b/docs/snode.rst @@ -33,32 +33,19 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str assert x.snode() == y.snode() -.. function:: tensor.shape() +.. function:: tensor.shape :parameter tensor: (Tensor) :return: (tuple of integers) the shape of tensor - Equivalent to ``tensor.snode().shape()``. + Equivalent to ``tensor.snode().shape``. For example, :: ti.root.dense(ti.ijk, (3, 5, 4)).place(x) - x.shape() # returns (3, 5, 4) - - -.. function:: tensor.dim() - - :parameter tensor: (Tensor) - :return: (scalar) the dimensionality of the tensor - - Equivalent to ``len(tensor.shape())``. - - :: - - ti.root.dense(ti.ijk, (8, 9, 10)).place(x) - x.dim() # 3 + x.shape # returns (3, 5, 4) .. function:: tensor.snode() @@ -74,7 +61,7 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str x.snode() -.. function:: snode.shape() +.. function:: snode.shape :parameter snode: (SNode) :return: (tuple) the size of node along that axis @@ -85,29 +72,16 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk2 = blk1.dense(ti.i, 3) blk3 = blk2.dense(ti.jk, (5, 2)) blk4 = blk3.dense(ti.k, 2) - blk1.shape() # () - blk2.shape() # (3, ) - blk3.shape() # (3, 5, 2) - blk4.shape() # (3, 5, 4) - - -.. function:: snode.dim() - - :parameter snode: (SNode) - :return: (scalar) the dimensionality of ``snode`` - - Equivalent to ``len(snode.shape())``. - - :: - - blk1 = ti.root.dense(ti.ijk, (8, 9, 10)) - ti.root.dim() # 0 - blk1.dim() # 3 + blk1.shape # () + blk2.shape # (3, ) + blk3.shape # (3, 5, 2) + blk4.shape # (3, 5, 4) -.. function:: snode.parent() +.. function:: snode.parent(n = 1) :parameter snode: (SNode) + :parameter n: (optional, scalar) the number of parent steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. :return: (SNode) the parent node of ``snode`` :: @@ -118,6 +92,10 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk1.parent() # ti.root blk2.parent() # blk1 blk3.parent() # blk2 + blk3.parent(1) # blk2 + blk3.parent(2) # blk1 + blk3.parent(3) # ti.root + blk3.parent(4) # None Node types diff --git a/docs/vector.rst b/docs/vector.rst index 7e944db5c2ba4..49c4db885f02b 100644 --- a/docs/vector.rst +++ b/docs/vector.rst @@ -191,6 +191,9 @@ Methods Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. +Meta data +--------- + .. attribute:: a.n :parameter a: (Vector or tensor of Vector) diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index 09befef209304..cb747f57e8b11 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -139,7 +139,7 @@ def dim(self): def dtype(self): return self.snode().dtype - @deprecated('x.data_type()', 'x.dtype'): + @deprecated('x.data_type()', 'x.dtype') def data_type(self): return self.snode().dtype From 987b7387f40882d1cff08f3eaf61a7ad68a70cb3 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Mon, 29 Jun 2020 17:54:16 +0800 Subject: [PATCH 03/11] did deprecate --- python/taichi/lang/matrix.py | 16 +++++++++------- python/taichi/misc/gui.py | 4 ++-- tests/python/test_tensor_reflection.py | 10 ++++++---- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 42392125f658b..8c787bd90828b 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -522,8 +522,13 @@ def shape(self): def dim(self): return len(self.shape) + @property + def dtype(self): + return self.loop_range().dtype + + @deprecated('x.data_type()', 'x.dtype') def data_type(self): - return self.loop_range().data_type() + return self.dtype def make_grad(self): ret = self.empty_copy() @@ -624,9 +629,7 @@ def to_numpy(self, keep_dims=False, as_vector=None): if not self.is_global(): return np.array(self.entries).reshape(shape_ext) - ret = np.empty(self.loop_range().shape + shape_ext, - dtype=to_numpy_type( - self.loop_range().snode().data_type())) + ret = np.empty(self.shape + shape_ext, dtype=to_numpy_type(self.dtype) from .meta import matrix_to_ext_arr matrix_to_ext_arr(self, ret, as_vector) import taichi as ti @@ -638,9 +641,8 @@ def to_torch(self, device=None, keep_dims=False): import torch as_vector = self.m == 1 and not keep_dims shape_ext = (self.n, ) if as_vector else (self.n, self.m) - ret = torch.empty(self.loop_range().shape + shape_ext, - dtype=to_pytorch_type( - self.loop_range().snode().data_type()), + ret = torch.empty(self.shape + shape_ext, + dtype=to_pytorch_type(self.dtype), device=device) from .meta import matrix_to_ext_arr matrix_to_ext_arr(self, ret, as_vector) diff --git a/python/taichi/misc/gui.py b/python/taichi/misc/gui.py index 809a49ba9202a..0c8d2925694c2 100644 --- a/python/taichi/misc/gui.py +++ b/python/taichi/misc/gui.py @@ -80,7 +80,7 @@ def set_image(self, img): import taichi as ti if isinstance(img, ti.Expr): - if ti.core.is_integral(img.data_type()) or len(img.shape) != 2: + if ti.core.is_integral(img.dtype) or len(img.shape) != 2: # Images of uint is not optimized by xxx_to_image self.img = self.cook_image(img.to_numpy()) else: @@ -92,7 +92,7 @@ def set_image(self, img): ti.sync() elif isinstance(img, ti.Matrix): - if ti.core.is_integral(img.data_type()): + if ti.core.is_integral(img.dtype): self.img = self.cook_image(img.to_numpy()) else: # Type matched! We can use an optimized copy kernel. diff --git a/tests/python/test_tensor_reflection.py b/tests/python/test_tensor_reflection.py index 9d9f833d1084f..92a8e72059048 100644 --- a/tests/python/test_tensor_reflection.py +++ b/tests/python/test_tensor_reflection.py @@ -13,7 +13,7 @@ def test_POT(): ti.root.dense(ti.i, n).dense(ti.j, m).dense(ti.k, p).place(val) assert val.shape == (n, m, p) - assert val.data_type() == ti.i32 + assert val.dtype == ti.i32 @ti.all_archs @@ -30,7 +30,7 @@ def test_non_POT(): blk3.place(val) assert val.shape == (n, m, p) - assert val.data_type() == ti.i32 + assert val.dtype == ti.i32 @ti.all_archs @@ -46,7 +46,7 @@ def test_unordered(): blk3 = blk2.dense(ti.j, p) blk3.place(val) - assert val.data_type() == ti.i32 + assert val.dtype == ti.i32 assert val.shape == (n, m, p) assert val.snode().parent(0) == val.snode() assert val.snode().parent() == blk3 @@ -79,7 +79,7 @@ def test_unordered_matrix(): blk3.place(val) assert val.shape == (n, m, p) - assert val.data_type() == ti.i32 + assert val.dtype == ti.i32 assert val.loop_range().snode().parent(0) == val.loop_range().snode() assert val.loop_range().snode().parent() == blk3 assert val.loop_range().snode().parent(1) == blk3 @@ -104,8 +104,10 @@ def test_deprecated(): blk3.place(val, mat) assert val.dim() == 3 + assert val.data_type() == ti.f32 assert val.shape() == (n, m, p) assert mat.dim() == 3 + assert mat.data_type() == ti.i32 assert mat.shape() == (n, m, p) assert blk3.dim() == 3 assert blk3.shape() == (n, m, p) From f63b83cfd33e5053582955daa4c7d6c10be412f3 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Thu, 2 Jul 2020 12:56:43 +0800 Subject: [PATCH 04/11] fix --- python/taichi/lang/matrix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index 8c787bd90828b..086cd1619fc0a 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -629,7 +629,7 @@ def to_numpy(self, keep_dims=False, as_vector=None): if not self.is_global(): return np.array(self.entries).reshape(shape_ext) - ret = np.empty(self.shape + shape_ext, dtype=to_numpy_type(self.dtype) + ret = np.empty(self.shape + shape_ext, dtype=to_numpy_type(self.dtype)) from .meta import matrix_to_ext_arr matrix_to_ext_arr(self, ret, as_vector) import taichi as ti From 0f34e4fc2f7f1362492164c7e4d5a6f26b8f6d11 Mon Sep 17 00:00:00 2001 From: Taichi Gardener Date: Thu, 2 Jul 2020 00:58:22 -0400 Subject: [PATCH 05/11] [skip ci] enforce code format --- python/taichi/lang/expr.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/python/taichi/lang/expr.py b/python/taichi/lang/expr.py index cb747f57e8b11..9ab2e0c842304 100644 --- a/python/taichi/lang/expr.py +++ b/python/taichi/lang/expr.py @@ -147,8 +147,7 @@ def data_type(self): def to_numpy(self): from .meta import tensor_to_ext_arr import numpy as np - arr = np.zeros(shape=self.shape, - dtype=to_numpy_type(self.dtype)) + arr = np.zeros(shape=self.shape, dtype=to_numpy_type(self.dtype)) tensor_to_ext_arr(self, arr) import taichi as ti ti.sync() From 30d8f1cb1b9928b23154a71fca154dfa72d34cbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=BA=8E=E6=96=8C?= <1931127624@qq.com> Date: Fri, 3 Jul 2020 23:21:49 +0800 Subject: [PATCH 06/11] [skip ci] Update docs/vector.rst Co-authored-by: Yuanming Hu --- docs/vector.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/vector.rst b/docs/vector.rst index 49c4db885f02b..e7bcea087b18b 100644 --- a/docs/vector.rst +++ b/docs/vector.rst @@ -191,8 +191,8 @@ Methods Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. -Meta data ---------- +Metadata +-------- .. attribute:: a.n From 0d3efb6d37fe7e3e67f42f6e97bbc9805a6fb1f8 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Fri, 3 Jul 2020 23:23:24 +0800 Subject: [PATCH 07/11] revert doc --- docs/meta.rst | 131 ++++++++++++----------------------------- docs/scalar_tensor.rst | 28 +++++++-- docs/snode.rst | 50 +++++++++++----- docs/vector.rst | 3 - 4 files changed, 95 insertions(+), 117 deletions(-) diff --git a/docs/meta.rst b/docs/meta.rst index d79761de5a166..3ced4c60ed97e 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -17,9 +17,6 @@ Taichi kernels are *lazily instantiated* and a lot of computation can happen at Template metaprogramming ------------------------ -Taichi tensors oftenly are used as globals. But you may use ``ti.template()`` -as type hint to pass a tensor as argument. For example: - .. code-block:: python @ti.kernel @@ -27,114 +24,48 @@ as type hint to pass a tensor as argument. For example: for i in x: y[i] = x[i] - a = ti.var(ti.f32, 4) - b = ti.var(ti.f32, 4) - c = ti.var(ti.f32, 12) - d = ti.var(ti.f32, 12) - copy(a, b) - copy(c, d) - - -As shown by the above example, template programming may enable us to reuse our -code and improve better flexibility. - Dimensionality-independent programming using grouped indices ------------------------------------------------------------ -However, the ``copy`` template shown above is not perfect, i.e., it can only be -used to copy 1D tensors. What if we want to copy 2D tensors? Do we have to write -another kernel? - -.. code-block:: python - - @ti.kernel - def copy2d(x: ti.template(), y: ti.template()): - for i, j in x: - y[i, j] = x[i, j] - -Not necessary! Taichi provides ``ti.grouped`` syntax which enable you to get -loop indices into a grouped vector, therefore unify different dimensionalities. -For example: - .. code-block:: python @ti.kernel def copy(x: ti.template(), y: ti.template()): for I in ti.grouped(y): - # I is a vector with same dimensionality with x and data type i32 - # If y is 0D, then I = None - # If y is 1D, then I = ti.Vector([i]) - # If y is 2D, then I = ti.Vector([i, j]) - # If y is 3D, then I = ti.Vector([i, j, k]) - # ... x[I] = y[I] @ti.kernel def array_op(x: ti.template(), y: ti.template()): - # if tensor x is 2D: - for I in ti.grouped(x): # I is simply a 2D vector with x data type i32 + # If tensor x is 2D + for I in ti.grouped(x): # I is a vector of size x.dim() and data type i32 y[I + ti.Vector([0, 1])] = I[0] + I[1] - - # then it is equivalent to: + # is equivalent to for i, j in x: y[i, j + 1] = i + j +Tensor size reflection +---------------------- -Tensor meta data ----------------- - -Sometimes it will be useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. -These attributes can be accessed in both Taichi kernels and python scripts. +Sometimes it will be useful to get the dimensionality (``tensor.dim()``) and shape (``tensor.shape()``) of tensors. +These functions can be used in both Taichi kernels and python scripts. .. code-block:: python @ti.func - def print_tensor_info(x: ti.template()): - print('Tensor dimensionality is', len(x.shape)) - for i in ti.static(range(len(x.shape))): - print('Size alone dimension', i, 'is', x.shape[i]) - ti.static_print('Tensor data type is', x.dtype) - -See :ref:`scalar_tensor` for more details. - -.. note:: - - For sparse tensors, the full domain shape will be returned. - - -Matrix & vector meta data -------------------------- - -Sometimes it will also be useful to get the matrix column and row numbers when -you want to write dimensionality-independent code, such as reusing code between -2D/3D physical simulations. - -``matrix.m`` equals to the column number of matrix, while ``matrix.n`` equals to -the row number of matrix. -Since vectors are considered as matrices with one column, ``vector.n`` is simply -the dimensionality of vector. - -.. code-block:: python - - @ti.kernel - def foo(): - matrix = ti.Matrix([[1, 2], [3, 4], [5, 6]]) - print(matrix.n) # 2 - print(matrix.m) # 3 - vector = ti.Vector([7, 8, 9]) - print(vector.n) # 3 - print(vector.m) # 1 - + def print_tensor_size(x: ti.template()): + print(x.dim()) + for i in ti.static(range(x.dim())): + print(x.shape()[i]) +For sparse tensors, the full domain shape will be returned. Compile-time evaluations ------------------------ - Using compile-time evaluation will allow certain computations to happen when kernels are being instantiated. This saves the overhead of those computations at runtime. -* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.): +* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.) .. code-block:: python @@ -146,20 +77,32 @@ This saves the overhead of those computations at runtime. x[0] = 1 -* Use ``ti.static`` for forced loop unrolling: +* Use ``ti.static`` for forced loop unrolling .. code-block:: python - @ti.kernel - def func(): - for i in ti.static(range(4)): - print(i) - - # is equivalent to: - print(0) - print(1) - print(2) - print(3) + @ti.kernel + def g2p(f: ti.i32): + for p in range(0, n_particles): + base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32) + fx = x[f, p] * inv_dx - ti.cast(base, real) + w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0), + 0.5 * ti.sqr(fx - 0.5)] + new_v = ti.Vector([0.0, 0.0]) + new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]]) + + # Unrolled 9 iterations for higher performance + for i in ti.static(range(3)): + for j in ti.static(range(3)): + dpos = ti.cast(ti.Vector([i, j]), real) - fx + g_v = grid_v_out[base(0) + i, base(1) + j] + weight = w[i](0) * w[j](1) + new_v += weight * g_v + new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx + + v[f + 1, p] = new_v + x[f + 1, p] = x[f, p] + dt * v[f + 1, p] + C[f + 1, p] = new_C When to use for loops with ``ti.static`` @@ -177,7 +120,7 @@ For example, code for resetting this tensor of vectors should be @ti.kernel def reset(): for i in x: - for j in ti.static(range(x.n)): + for j in ti.static(range(3)): # The inner loop must be unrolled since j is a vector index instead # of a global tensor index. x[i][j] = 0 diff --git a/docs/scalar_tensor.rst b/docs/scalar_tensor.rst index e1b326f3329fd..db27b56e62939 100644 --- a/docs/scalar_tensor.rst +++ b/docs/scalar_tensor.rst @@ -107,8 +107,24 @@ You can access an element of the Taichi tensor by an index or indices. Meta data --------- +.. function:: a.dim() -.. attribute:: a.shape + :parameter a: (Tensor) the tensor + :return: (scalar) the length of ``a`` + + :: + + x = ti.var(ti.i32, (6, 5)) + x.dim() # 2 + + y = ti.var(ti.i32, 6) + y.dim() # 1 + + z = ti.var(ti.i32, ()) + z.dim() # 0 + + +.. function:: a.shape() :parameter a: (Tensor) the tensor :return: (tuple) the shape of tensor ``a`` @@ -116,16 +132,16 @@ Meta data :: x = ti.var(ti.i32, (6, 5)) - x.shape # (6, 5) + x.shape() # (6, 5) y = ti.var(ti.i32, 6) - y.shape # (6,) + y.shape() # (6,) z = ti.var(ti.i32, ()) - z.shape # () + z.shape() # () -.. function:: a.dtype +.. function:: a.data_type() :parameter a: (Tensor) the tensor :return: (DataType) the data type of ``a`` @@ -133,7 +149,7 @@ Meta data :: x = ti.var(ti.i32, (2, 3)) - x.dtype # ti.i32 + x.data_type() # ti.i32 .. function:: a.parent(n = 1) diff --git a/docs/snode.rst b/docs/snode.rst index 368c01f0a309b..c8db73bd2ea6c 100644 --- a/docs/snode.rst +++ b/docs/snode.rst @@ -33,19 +33,32 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str assert x.snode() == y.snode() -.. function:: tensor.shape +.. function:: tensor.shape() :parameter tensor: (Tensor) :return: (tuple of integers) the shape of tensor - Equivalent to ``tensor.snode().shape``. + Equivalent to ``tensor.snode().shape()``. For example, :: ti.root.dense(ti.ijk, (3, 5, 4)).place(x) - x.shape # returns (3, 5, 4) + x.shape() # returns (3, 5, 4) + + +.. function:: tensor.dim() + + :parameter tensor: (Tensor) + :return: (scalar) the dimensionality of the tensor + + Equivalent to ``len(tensor.shape())``. + + :: + + ti.root.dense(ti.ijk, (8, 9, 10)).place(x) + x.dim() # 3 .. function:: tensor.snode() @@ -61,7 +74,7 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str x.snode() -.. function:: snode.shape +.. function:: snode.shape() :parameter snode: (SNode) :return: (tuple) the size of node along that axis @@ -72,16 +85,29 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk2 = blk1.dense(ti.i, 3) blk3 = blk2.dense(ti.jk, (5, 2)) blk4 = blk3.dense(ti.k, 2) - blk1.shape # () - blk2.shape # (3, ) - blk3.shape # (3, 5, 2) - blk4.shape # (3, 5, 4) + blk1.shape() # () + blk2.shape() # (3, ) + blk3.shape() # (3, 5, 2) + blk4.shape() # (3, 5, 4) + + +.. function:: snode.dim() + + :parameter snode: (SNode) + :return: (scalar) the dimensionality of ``snode`` + + Equivalent to ``len(snode.shape())``. + + :: + + blk1 = ti.root.dense(ti.ijk, (8, 9, 10)) + ti.root.dim() # 0 + blk1.dim() # 3 -.. function:: snode.parent(n = 1) +.. function:: snode.parent() :parameter snode: (SNode) - :parameter n: (optional, scalar) the number of parent steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. :return: (SNode) the parent node of ``snode`` :: @@ -92,10 +118,6 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk1.parent() # ti.root blk2.parent() # blk1 blk3.parent() # blk2 - blk3.parent(1) # blk2 - blk3.parent(2) # blk1 - blk3.parent(3) # ti.root - blk3.parent(4) # None Node types diff --git a/docs/vector.rst b/docs/vector.rst index e7bcea087b18b..7e944db5c2ba4 100644 --- a/docs/vector.rst +++ b/docs/vector.rst @@ -191,9 +191,6 @@ Methods Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. -Metadata --------- - .. attribute:: a.n :parameter a: (Vector or tensor of Vector) From 38cf03e6e783ac702eea2911fbc806168836b9f1 Mon Sep 17 00:00:00 2001 From: archibate <1931127624@qq.com> Date: Fri, 3 Jul 2020 23:24:03 +0800 Subject: [PATCH 08/11] Revert "revert doc" This reverts commit 0d3efb6d37fe7e3e67f42f6e97bbc9805a6fb1f8. --- docs/meta.rst | 131 +++++++++++++++++++++++++++++------------ docs/scalar_tensor.rst | 28 ++------- docs/snode.rst | 50 +++++----------- docs/vector.rst | 3 + 4 files changed, 117 insertions(+), 95 deletions(-) diff --git a/docs/meta.rst b/docs/meta.rst index 3ced4c60ed97e..d79761de5a166 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -17,6 +17,9 @@ Taichi kernels are *lazily instantiated* and a lot of computation can happen at Template metaprogramming ------------------------ +Taichi tensors oftenly are used as globals. But you may use ``ti.template()`` +as type hint to pass a tensor as argument. For example: + .. code-block:: python @ti.kernel @@ -24,48 +27,114 @@ Template metaprogramming for i in x: y[i] = x[i] + a = ti.var(ti.f32, 4) + b = ti.var(ti.f32, 4) + c = ti.var(ti.f32, 12) + d = ti.var(ti.f32, 12) + copy(a, b) + copy(c, d) + + +As shown by the above example, template programming may enable us to reuse our +code and improve better flexibility. + Dimensionality-independent programming using grouped indices ------------------------------------------------------------ +However, the ``copy`` template shown above is not perfect, i.e., it can only be +used to copy 1D tensors. What if we want to copy 2D tensors? Do we have to write +another kernel? + +.. code-block:: python + + @ti.kernel + def copy2d(x: ti.template(), y: ti.template()): + for i, j in x: + y[i, j] = x[i, j] + +Not necessary! Taichi provides ``ti.grouped`` syntax which enable you to get +loop indices into a grouped vector, therefore unify different dimensionalities. +For example: + .. code-block:: python @ti.kernel def copy(x: ti.template(), y: ti.template()): for I in ti.grouped(y): + # I is a vector with same dimensionality with x and data type i32 + # If y is 0D, then I = None + # If y is 1D, then I = ti.Vector([i]) + # If y is 2D, then I = ti.Vector([i, j]) + # If y is 3D, then I = ti.Vector([i, j, k]) + # ... x[I] = y[I] @ti.kernel def array_op(x: ti.template(), y: ti.template()): - # If tensor x is 2D - for I in ti.grouped(x): # I is a vector of size x.dim() and data type i32 + # if tensor x is 2D: + for I in ti.grouped(x): # I is simply a 2D vector with x data type i32 y[I + ti.Vector([0, 1])] = I[0] + I[1] - # is equivalent to + + # then it is equivalent to: for i, j in x: y[i, j + 1] = i + j -Tensor size reflection ----------------------- -Sometimes it will be useful to get the dimensionality (``tensor.dim()``) and shape (``tensor.shape()``) of tensors. -These functions can be used in both Taichi kernels and python scripts. +Tensor meta data +---------------- + +Sometimes it will be useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. +These attributes can be accessed in both Taichi kernels and python scripts. .. code-block:: python @ti.func - def print_tensor_size(x: ti.template()): - print(x.dim()) - for i in ti.static(range(x.dim())): - print(x.shape()[i]) + def print_tensor_info(x: ti.template()): + print('Tensor dimensionality is', len(x.shape)) + for i in ti.static(range(len(x.shape))): + print('Size alone dimension', i, 'is', x.shape[i]) + ti.static_print('Tensor data type is', x.dtype) + +See :ref:`scalar_tensor` for more details. + +.. note:: + + For sparse tensors, the full domain shape will be returned. + + +Matrix & vector meta data +------------------------- + +Sometimes it will also be useful to get the matrix column and row numbers when +you want to write dimensionality-independent code, such as reusing code between +2D/3D physical simulations. + +``matrix.m`` equals to the column number of matrix, while ``matrix.n`` equals to +the row number of matrix. +Since vectors are considered as matrices with one column, ``vector.n`` is simply +the dimensionality of vector. + +.. code-block:: python + + @ti.kernel + def foo(): + matrix = ti.Matrix([[1, 2], [3, 4], [5, 6]]) + print(matrix.n) # 2 + print(matrix.m) # 3 + vector = ti.Vector([7, 8, 9]) + print(vector.n) # 3 + print(vector.m) # 1 + -For sparse tensors, the full domain shape will be returned. Compile-time evaluations ------------------------ + Using compile-time evaluation will allow certain computations to happen when kernels are being instantiated. This saves the overhead of those computations at runtime. -* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.) +* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.): .. code-block:: python @@ -77,32 +146,20 @@ This saves the overhead of those computations at runtime. x[0] = 1 -* Use ``ti.static`` for forced loop unrolling +* Use ``ti.static`` for forced loop unrolling: .. code-block:: python - @ti.kernel - def g2p(f: ti.i32): - for p in range(0, n_particles): - base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32) - fx = x[f, p] * inv_dx - ti.cast(base, real) - w = [0.5 * ti.sqr(1.5 - fx), 0.75 - ti.sqr(fx - 1.0), - 0.5 * ti.sqr(fx - 0.5)] - new_v = ti.Vector([0.0, 0.0]) - new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]]) - - # Unrolled 9 iterations for higher performance - for i in ti.static(range(3)): - for j in ti.static(range(3)): - dpos = ti.cast(ti.Vector([i, j]), real) - fx - g_v = grid_v_out[base(0) + i, base(1) + j] - weight = w[i](0) * w[j](1) - new_v += weight * g_v - new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx - - v[f + 1, p] = new_v - x[f + 1, p] = x[f, p] + dt * v[f + 1, p] - C[f + 1, p] = new_C + @ti.kernel + def func(): + for i in ti.static(range(4)): + print(i) + + # is equivalent to: + print(0) + print(1) + print(2) + print(3) When to use for loops with ``ti.static`` @@ -120,7 +177,7 @@ For example, code for resetting this tensor of vectors should be @ti.kernel def reset(): for i in x: - for j in ti.static(range(3)): + for j in ti.static(range(x.n)): # The inner loop must be unrolled since j is a vector index instead # of a global tensor index. x[i][j] = 0 diff --git a/docs/scalar_tensor.rst b/docs/scalar_tensor.rst index db27b56e62939..e1b326f3329fd 100644 --- a/docs/scalar_tensor.rst +++ b/docs/scalar_tensor.rst @@ -107,24 +107,8 @@ You can access an element of the Taichi tensor by an index or indices. Meta data --------- -.. function:: a.dim() - :parameter a: (Tensor) the tensor - :return: (scalar) the length of ``a`` - - :: - - x = ti.var(ti.i32, (6, 5)) - x.dim() # 2 - - y = ti.var(ti.i32, 6) - y.dim() # 1 - - z = ti.var(ti.i32, ()) - z.dim() # 0 - - -.. function:: a.shape() +.. attribute:: a.shape :parameter a: (Tensor) the tensor :return: (tuple) the shape of tensor ``a`` @@ -132,16 +116,16 @@ Meta data :: x = ti.var(ti.i32, (6, 5)) - x.shape() # (6, 5) + x.shape # (6, 5) y = ti.var(ti.i32, 6) - y.shape() # (6,) + y.shape # (6,) z = ti.var(ti.i32, ()) - z.shape() # () + z.shape # () -.. function:: a.data_type() +.. function:: a.dtype :parameter a: (Tensor) the tensor :return: (DataType) the data type of ``a`` @@ -149,7 +133,7 @@ Meta data :: x = ti.var(ti.i32, (2, 3)) - x.data_type() # ti.i32 + x.dtype # ti.i32 .. function:: a.parent(n = 1) diff --git a/docs/snode.rst b/docs/snode.rst index c8db73bd2ea6c..368c01f0a309b 100644 --- a/docs/snode.rst +++ b/docs/snode.rst @@ -33,32 +33,19 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str assert x.snode() == y.snode() -.. function:: tensor.shape() +.. function:: tensor.shape :parameter tensor: (Tensor) :return: (tuple of integers) the shape of tensor - Equivalent to ``tensor.snode().shape()``. + Equivalent to ``tensor.snode().shape``. For example, :: ti.root.dense(ti.ijk, (3, 5, 4)).place(x) - x.shape() # returns (3, 5, 4) - - -.. function:: tensor.dim() - - :parameter tensor: (Tensor) - :return: (scalar) the dimensionality of the tensor - - Equivalent to ``len(tensor.shape())``. - - :: - - ti.root.dense(ti.ijk, (8, 9, 10)).place(x) - x.dim() # 3 + x.shape # returns (3, 5, 4) .. function:: tensor.snode() @@ -74,7 +61,7 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str x.snode() -.. function:: snode.shape() +.. function:: snode.shape :parameter snode: (SNode) :return: (tuple) the size of node along that axis @@ -85,29 +72,16 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk2 = blk1.dense(ti.i, 3) blk3 = blk2.dense(ti.jk, (5, 2)) blk4 = blk3.dense(ti.k, 2) - blk1.shape() # () - blk2.shape() # (3, ) - blk3.shape() # (3, 5, 2) - blk4.shape() # (3, 5, 4) - - -.. function:: snode.dim() - - :parameter snode: (SNode) - :return: (scalar) the dimensionality of ``snode`` - - Equivalent to ``len(snode.shape())``. - - :: - - blk1 = ti.root.dense(ti.ijk, (8, 9, 10)) - ti.root.dim() # 0 - blk1.dim() # 3 + blk1.shape # () + blk2.shape # (3, ) + blk3.shape # (3, 5, 2) + blk4.shape # (3, 5, 4) -.. function:: snode.parent() +.. function:: snode.parent(n = 1) :parameter snode: (SNode) + :parameter n: (optional, scalar) the number of parent steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. :return: (SNode) the parent node of ``snode`` :: @@ -118,6 +92,10 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk1.parent() # ti.root blk2.parent() # blk1 blk3.parent() # blk2 + blk3.parent(1) # blk2 + blk3.parent(2) # blk1 + blk3.parent(3) # ti.root + blk3.parent(4) # None Node types diff --git a/docs/vector.rst b/docs/vector.rst index 7e944db5c2ba4..e7bcea087b18b 100644 --- a/docs/vector.rst +++ b/docs/vector.rst @@ -191,6 +191,9 @@ Methods Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. +Metadata +-------- + .. attribute:: a.n :parameter a: (Vector or tensor of Vector) From 6c1d48fb6648f9ca925e4ac1000de097c566b717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=BA=8E=E6=96=8C?= <1931127624@qq.com> Date: Thu, 16 Jul 2020 23:14:30 +0800 Subject: [PATCH 09/11] [skip ci] Apply suggestions from code review Co-authored-by: Yuanming Hu --- docs/meta.rst | 38 +++++++++++++++++++------------------- docs/snode.rst | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/meta.rst b/docs/meta.rst index d79761de5a166..161d56519edd5 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -17,8 +17,8 @@ Taichi kernels are *lazily instantiated* and a lot of computation can happen at Template metaprogramming ------------------------ -Taichi tensors oftenly are used as globals. But you may use ``ti.template()`` -as type hint to pass a tensor as argument. For example: +You may use ``ti.template()`` +as a type hint to pass a tensor as an argument. For example: .. code-block:: python @@ -35,14 +35,14 @@ as type hint to pass a tensor as argument. For example: copy(c, d) -As shown by the above example, template programming may enable us to reuse our -code and improve better flexibility. +As shown in the example above, template programming may enable us to reuse our +code and provide more flexibility. Dimensionality-independent programming using grouped indices ------------------------------------------------------------ -However, the ``copy`` template shown above is not perfect, i.e., it can only be +However, the ``copy`` template shown above is not perfect. For example, it can only be used to copy 1D tensors. What if we want to copy 2D tensors? Do we have to write another kernel? @@ -53,8 +53,8 @@ another kernel? for i, j in x: y[i, j] = x[i, j] -Not necessary! Taichi provides ``ti.grouped`` syntax which enable you to get -loop indices into a grouped vector, therefore unify different dimensionalities. +Not necessary! Taichi provides ``ti.grouped`` syntax which enables you to pack +loop indices into a grouped vector to unify kernels of different dimensionalities. For example: .. code-block:: python @@ -81,11 +81,11 @@ For example: y[i, j + 1] = i + j -Tensor meta data ----------------- +Tensor metadata +--------------- -Sometimes it will be useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. -These attributes can be accessed in both Taichi kernels and python scripts. +Sometimes it is useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. +These attributes can be accessed in both Taichi- and Python-scopes. .. code-block:: python @@ -103,17 +103,17 @@ See :ref:`scalar_tensor` for more details. For sparse tensors, the full domain shape will be returned. -Matrix & vector meta data -------------------------- +Matrix & vector metadata +------------------------ -Sometimes it will also be useful to get the matrix column and row numbers when -you want to write dimensionality-independent code, such as reusing code between -2D/3D physical simulations. +Getting the number of matrix columns and rows will allow +you to write dimensionality-independent code. For example, this can be used to unify +2D and 3D physical simulators. -``matrix.m`` equals to the column number of matrix, while ``matrix.n`` equals to -the row number of matrix. +``matrix.m`` equals to the number of columns of a matrix, while ``matrix.n`` equals to +the number of rows of a matrix. Since vectors are considered as matrices with one column, ``vector.n`` is simply -the dimensionality of vector. +the dimensionality of the vector. .. code-block:: python diff --git a/docs/snode.rst b/docs/snode.rst index b89e676257194..648c5fe64b7a0 100644 --- a/docs/snode.rst +++ b/docs/snode.rst @@ -81,7 +81,7 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str .. function:: snode.parent(n = 1) :parameter snode: (SNode) - :parameter n: (optional, scalar) the number of parent steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. + :parameter n: (optional, scalar) the number of steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. :return: (SNode) the parent node of ``snode`` :: From de62ab50e5388496265409dd5f0e1c43b9b37488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=BA=8E=E6=96=8C?= <1931127624@qq.com> Date: Thu, 16 Jul 2020 23:54:29 +0800 Subject: [PATCH 10/11] [skip ci] Update docs/meta.rst Co-authored-by: Yuanming Hu --- docs/meta.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/meta.rst b/docs/meta.rst index 161d56519edd5..8a44aec426a73 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -73,7 +73,7 @@ For example: @ti.kernel def array_op(x: ti.template(), y: ti.template()): # if tensor x is 2D: - for I in ti.grouped(x): # I is simply a 2D vector with x data type i32 + for I in ti.grouped(x): # I is simply a 2D vector with data type i32 y[I + ti.Vector([0, 1])] = I[0] + I[1] # then it is equivalent to: From 47cf398facc544eb2edf13910321a275013a7d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=BA=8E=E6=96=8C?= <1931127624@qq.com> Date: Fri, 17 Jul 2020 11:54:34 +0800 Subject: [PATCH 11/11] Update docs/meta.rst --- docs/meta.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/meta.rst b/docs/meta.rst index 8a44aec426a73..28f891ab9f376 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -63,7 +63,7 @@ For example: def copy(x: ti.template(), y: ti.template()): for I in ti.grouped(y): # I is a vector with same dimensionality with x and data type i32 - # If y is 0D, then I = None + # If y is 0D, then I = ti.Vector([]), which is equivalent to `None` when used in x[I] # If y is 1D, then I = ti.Vector([i]) # If y is 2D, then I = ti.Vector([i, j]) # If y is 3D, then I = ti.Vector([i, j, k])