From 6dc41cee29599004bc0a74223b740fba096779eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E4=BA=8E=E6=96=8C?= <1931127624@qq.com> Date: Fri, 17 Jul 2020 14:29:09 +0800 Subject: [PATCH] [doc] Update documentaion on meta programming for #1374 (#1389) * [Refactor] [lang] x.data_type() is deprecated, use x.dtype instead * improve doc * did deprecate * fix * [skip ci] enforce code format * [skip ci] Update docs/vector.rst Co-authored-by: Yuanming Hu * revert doc * Revert "revert doc" This reverts commit 0d3efb6d37fe7e3e67f42f6e97bbc9805a6fb1f8. * [skip ci] Apply suggestions from code review Co-authored-by: Yuanming Hu * [skip ci] Update docs/meta.rst Co-authored-by: Yuanming Hu * Update docs/meta.rst Co-authored-by: Taichi Gardener Co-authored-by: Yuanming Hu --- docs/meta.rst | 131 +++++++++++++++++++++++++++++------------ docs/scalar_tensor.rst | 28 ++------- docs/snode.rst | 50 +++++----------- docs/vector.rst | 3 + 4 files changed, 117 insertions(+), 95 deletions(-) diff --git a/docs/meta.rst b/docs/meta.rst index 53871e6d6f26b..28f891ab9f376 100644 --- a/docs/meta.rst +++ b/docs/meta.rst @@ -17,6 +17,9 @@ Taichi kernels are *lazily instantiated* and a lot of computation can happen at Template metaprogramming ------------------------ +You may use ``ti.template()`` +as a type hint to pass a tensor as an argument. For example: + .. code-block:: python @ti.kernel @@ -24,48 +27,114 @@ Template metaprogramming for i in x: y[i] = x[i] + a = ti.var(ti.f32, 4) + b = ti.var(ti.f32, 4) + c = ti.var(ti.f32, 12) + d = ti.var(ti.f32, 12) + copy(a, b) + copy(c, d) + + +As shown in the example above, template programming may enable us to reuse our +code and provide more flexibility. + Dimensionality-independent programming using grouped indices ------------------------------------------------------------ +However, the ``copy`` template shown above is not perfect. For example, it can only be +used to copy 1D tensors. What if we want to copy 2D tensors? Do we have to write +another kernel? + +.. code-block:: python + + @ti.kernel + def copy2d(x: ti.template(), y: ti.template()): + for i, j in x: + y[i, j] = x[i, j] + +Not necessary! Taichi provides ``ti.grouped`` syntax which enables you to pack +loop indices into a grouped vector to unify kernels of different dimensionalities. +For example: + .. code-block:: python @ti.kernel def copy(x: ti.template(), y: ti.template()): for I in ti.grouped(y): + # I is a vector with same dimensionality with x and data type i32 + # If y is 0D, then I = ti.Vector([]), which is equivalent to `None` when used in x[I] + # If y is 1D, then I = ti.Vector([i]) + # If y is 2D, then I = ti.Vector([i, j]) + # If y is 3D, then I = ti.Vector([i, j, k]) + # ... x[I] = y[I] @ti.kernel def array_op(x: ti.template(), y: ti.template()): - # If tensor x is 2D - for I in ti.grouped(x): # I is a vector of size x.dim() and data type i32 + # if tensor x is 2D: + for I in ti.grouped(x): # I is simply a 2D vector with data type i32 y[I + ti.Vector([0, 1])] = I[0] + I[1] - # is equivalent to + + # then it is equivalent to: for i, j in x: y[i, j + 1] = i + j -Tensor size reflection ----------------------- -Sometimes it will be useful to get the dimensionality (``tensor.dim()``) and shape (``tensor.shape()``) of tensors. -These functions can be used in both Taichi kernels and python scripts. +Tensor metadata +--------------- + +Sometimes it is useful to get the data type (``tensor.dtype``) and shape (``tensor.shape``) of tensors. +These attributes can be accessed in both Taichi- and Python-scopes. .. code-block:: python @ti.func - def print_tensor_size(x: ti.template()): - print(x.dim()) - for i in ti.static(range(x.dim())): - print(x.shape()[i]) + def print_tensor_info(x: ti.template()): + print('Tensor dimensionality is', len(x.shape)) + for i in ti.static(range(len(x.shape))): + print('Size alone dimension', i, 'is', x.shape[i]) + ti.static_print('Tensor data type is', x.dtype) + +See :ref:`scalar_tensor` for more details. + +.. note:: + + For sparse tensors, the full domain shape will be returned. + + +Matrix & vector metadata +------------------------ + +Getting the number of matrix columns and rows will allow +you to write dimensionality-independent code. For example, this can be used to unify +2D and 3D physical simulators. + +``matrix.m`` equals to the number of columns of a matrix, while ``matrix.n`` equals to +the number of rows of a matrix. +Since vectors are considered as matrices with one column, ``vector.n`` is simply +the dimensionality of the vector. + +.. code-block:: python + + @ti.kernel + def foo(): + matrix = ti.Matrix([[1, 2], [3, 4], [5, 6]]) + print(matrix.n) # 2 + print(matrix.m) # 3 + vector = ti.Vector([7, 8, 9]) + print(vector.n) # 3 + print(vector.m) # 1 + -For sparse tensors, the full domain shape will be returned. Compile-time evaluations ------------------------ + Using compile-time evaluation will allow certain computations to happen when kernels are being instantiated. This saves the overhead of those computations at runtime. -* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.) +* Use ``ti.static`` for compile-time branching (for those who come from C++17, this is `if constexpr `_.): .. code-block:: python @@ -77,32 +146,20 @@ This saves the overhead of those computations at runtime. x[0] = 1 -* Use ``ti.static`` for forced loop unrolling +* Use ``ti.static`` for forced loop unrolling: .. code-block:: python - @ti.kernel - def g2p(f: ti.i32): - for p in range(0, n_particles): - base = ti.cast(x[f, p] * inv_dx - 0.5, ti.i32) - fx = x[f, p] * inv_dx - ti.cast(base, real) - w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, - 0.5 * (fx - 0.5) ** 2] - new_v = ti.Vector([0.0, 0.0]) - new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]]) - - # Unrolled 9 iterations for higher performance - for i in ti.static(range(3)): - for j in ti.static(range(3)): - dpos = ti.cast(ti.Vector([i, j]), real) - fx - g_v = grid_v_out[base(0) + i, base(1) + j] - weight = w[i](0) * w[j](1) - new_v += weight * g_v - new_C += 4 * weight * ti.outer_product(g_v, dpos) * inv_dx - - v[f + 1, p] = new_v - x[f + 1, p] = x[f, p] + dt * v[f + 1, p] - C[f + 1, p] = new_C + @ti.kernel + def func(): + for i in ti.static(range(4)): + print(i) + + # is equivalent to: + print(0) + print(1) + print(2) + print(3) When to use for loops with ``ti.static`` @@ -120,7 +177,7 @@ For example, code for resetting this tensor of vectors should be @ti.kernel def reset(): for i in x: - for j in ti.static(range(3)): + for j in ti.static(range(x.n)): # The inner loop must be unrolled since j is a vector index instead # of a global tensor index. x[i][j] = 0 diff --git a/docs/scalar_tensor.rst b/docs/scalar_tensor.rst index db27b56e62939..e1b326f3329fd 100644 --- a/docs/scalar_tensor.rst +++ b/docs/scalar_tensor.rst @@ -107,24 +107,8 @@ You can access an element of the Taichi tensor by an index or indices. Meta data --------- -.. function:: a.dim() - :parameter a: (Tensor) the tensor - :return: (scalar) the length of ``a`` - - :: - - x = ti.var(ti.i32, (6, 5)) - x.dim() # 2 - - y = ti.var(ti.i32, 6) - y.dim() # 1 - - z = ti.var(ti.i32, ()) - z.dim() # 0 - - -.. function:: a.shape() +.. attribute:: a.shape :parameter a: (Tensor) the tensor :return: (tuple) the shape of tensor ``a`` @@ -132,16 +116,16 @@ Meta data :: x = ti.var(ti.i32, (6, 5)) - x.shape() # (6, 5) + x.shape # (6, 5) y = ti.var(ti.i32, 6) - y.shape() # (6,) + y.shape # (6,) z = ti.var(ti.i32, ()) - z.shape() # () + z.shape # () -.. function:: a.data_type() +.. function:: a.dtype :parameter a: (Tensor) the tensor :return: (DataType) the data type of ``a`` @@ -149,7 +133,7 @@ Meta data :: x = ti.var(ti.i32, (2, 3)) - x.data_type() # ti.i32 + x.dtype # ti.i32 .. function:: a.parent(n = 1) diff --git a/docs/snode.rst b/docs/snode.rst index f1ac731e15604..648c5fe64b7a0 100644 --- a/docs/snode.rst +++ b/docs/snode.rst @@ -33,32 +33,19 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str assert x.snode() == y.snode() -.. function:: tensor.shape() +.. function:: tensor.shape :parameter tensor: (Tensor) :return: (tuple of integers) the shape of tensor - Equivalent to ``tensor.snode().shape()``. + Equivalent to ``tensor.snode().shape``. For example, :: ti.root.dense(ti.ijk, (3, 5, 4)).place(x) - x.shape() # returns (3, 5, 4) - - -.. function:: tensor.dim() - - :parameter tensor: (Tensor) - :return: (scalar) the dimensionality of the tensor - - Equivalent to ``len(tensor.shape())``. - - :: - - ti.root.dense(ti.ijk, (8, 9, 10)).place(x) - x.dim() # 3 + x.shape # returns (3, 5, 4) .. function:: tensor.snode() @@ -74,7 +61,7 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str x.snode() -.. function:: snode.shape() +.. function:: snode.shape :parameter snode: (SNode) :return: (tuple) the size of node along that axis @@ -85,29 +72,16 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk2 = blk1.dense(ti.i, 3) blk3 = blk2.dense(ti.jk, (5, 2)) blk4 = blk3.dense(ti.k, 2) - blk1.shape() # () - blk2.shape() # (3, ) - blk3.shape() # (3, 5, 2) - blk4.shape() # (3, 5, 4) - - -.. function:: snode.dim() - - :parameter snode: (SNode) - :return: (scalar) the dimensionality of ``snode`` - - Equivalent to ``len(snode.shape())``. - - :: - - blk1 = ti.root.dense(ti.ijk, (8, 9, 10)) - ti.root.dim() # 0 - blk1.dim() # 3 + blk1.shape # () + blk2.shape # (3, ) + blk3.shape # (3, 5, 2) + blk4.shape # (3, 5, 4) -.. function:: snode.parent() +.. function:: snode.parent(n = 1) :parameter snode: (SNode) + :parameter n: (optional, scalar) the number of steps, i.e. ``n=1`` for parent, ``n=2`` grandparent, etc. :return: (SNode) the parent node of ``snode`` :: @@ -118,6 +92,10 @@ See :ref:`layout` for more details. ``ti.root`` is the root node of the data str blk1.parent() # ti.root blk2.parent() # blk1 blk3.parent() # blk2 + blk3.parent(1) # blk2 + blk3.parent(2) # blk1 + blk3.parent(3) # ti.root + blk3.parent(4) # None Node types diff --git a/docs/vector.rst b/docs/vector.rst index 0c58a107fdf09..d04190b5b9670 100644 --- a/docs/vector.rst +++ b/docs/vector.rst @@ -219,6 +219,9 @@ Methods Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. +Metadata +-------- + .. attribute:: a.n :parameter a: (Vector or tensor of Vector)