diff --git a/.travis.yml b/.travis.yml index 4c2c63558fa98..c968937bee6d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,6 +20,7 @@ matrix: skip_commits: files: - '*.md' + - '*.rst' - docs - benchmarks - examples diff --git a/appveyor.yml b/appveyor.yml index 6bdea46a9c339..52597a0ed8739 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -28,6 +28,7 @@ environment: skip_commits: files: - '*.md' + - '*.rst' - docs - benchmarks - examples diff --git a/docs/atomic.rst b/docs/atomic.rst new file mode 100644 index 0000000000000..b418c939838a4 --- /dev/null +++ b/docs/atomic.rst @@ -0,0 +1,74 @@ +.. _atomic: + +Atomic operations +================= + +In Taichi, augmented assignments (e.g., ``x[i] += 1``) are automatically `atomic `_. + + +.. warning:: + + When accumulating to global variables in parallel, make sure you use atomic operations. For example, to compute the sum of all elements in ``x``, + :: + + @ti.kernel + def sum(): + for i in x: + # Approach 1: OK + total[None] += x[i] + + # Approach 2: OK + ti.atomic_add(total[None], x[i]) + + # Approach 3: Wrong result since the operation is not atomic. + total[None] = total[None] + x[i] + + +.. note:: + When atomic operations are applied to local values, the Taichi compiler will try to demote these operations into their non-atomic correspondence. + +Apart from augmented assignments, explicit atomic operations such as ``ti.atomic_add`` also do read-modify-write atomically. +These operations additionally return the **old value** of the first argument. Below is the full list of explicit atomic operations: + +.. function:: ti.atomic_add(x, y) +.. function:: ti.atomic_sub(x, y) + + Atomically compute ``x + y``/``x - y`` and store the result to ``x``. + + :return: The old value of ``x``. + + For example, + :: + + x = 3 + y = 4 + z = ti.atomic_add(x, y) + # now z = 7, y = 4, z = 3 + + +.. function:: ti.atomic_and(x, y) +.. function:: ti.atomic_or(x, y) +.. function:: ti.atomic_xor(x, y) + + Atomically compute ``x & y`` (bitwise and), ``x | y`` (bitwise or), ``x ^ y`` (bitwise xor) and store the result to ``x``. + + :return: The old value of ``x``. + + +.. note:: + + Supported atomic operations on each backend: + + +----------+-----------+-----------+---------+ + | type | CPU/CUDA | OpenGL | Metal | + +==========+===========+===========+=========+ + | ``i32`` | OK | OK | OK | + +----------+-----------+-----------+---------+ + | ``f32`` | OK | OK | OK | + +----------+-----------+-----------+---------+ + | ``i64`` | OK | EXT | MISS | + +----------+-----------+-----------+---------+ + | ``f64`` | OK | EXT | MISS | + +----------+-----------+-----------+---------+ + + (OK: supported; EXT: require extension; MISS: not supported) diff --git a/docs/compilation.rst b/docs/compilation.rst index 0f48a10103bd6..00496f90e26fa 100644 --- a/docs/compilation.rst +++ b/docs/compilation.rst @@ -101,7 +101,7 @@ which allows a series of further IR passes to happen, such as - Atomic operation demotion The just-in-time (JIT) compilation engine ---------------------------------------- +----------------------------------------- Finally, the optimized SSA IR is fed into the LLVM IR codegen, and LLVM JIT generates high-performance executable CPU/GPU programs. diff --git a/docs/contributor_guide.rst b/docs/contributor_guide.rst index 87f52819e4493..49fc62915b0c6 100644 --- a/docs/contributor_guide.rst +++ b/docs/contributor_guide.rst @@ -77,7 +77,7 @@ Using continuous integration ---------------------------- - Continuous Integration (CI), will **build** and **test** your commits in a PR against in environments. -- Currently, Taichi uses `"Travis CI" `_(for OS X and Linux) and `"AppVeyor" `_(for Windows). +- Currently, Taichi uses `Travis CI `_ (for OS X and Linux) and `AppVeyor `_ (for Windows). - CI will be triggered everytime you push commits to an open PR. - You can prepend ``[skip ci]`` to your commit message to avoid triggering CI. e.g. ``[skip ci] This commit will not trigger CI`` - A tick on the right of commit hash means CI passed, a cross means CI failed. diff --git a/docs/cpp_style.rst b/docs/cpp_style.rst index 7079055543c86..88b0b6025aeff 100644 --- a/docs/cpp_style.rst +++ b/docs/cpp_style.rst @@ -28,3 +28,7 @@ Don'ts - ``NULL``, use ``nullptr`` instead. - ``using namespace std;`` in global scope. - ``typedef``. Use ``using`` instead. + +Automatic code formatting +-------------------------------------------------------------------------------- +- Please run ``ti format`` diff --git a/docs/dev_install.rst b/docs/dev_install.rst index 96c30e2fef9b2..fcf77a28e4094 100644 --- a/docs/dev_install.rst +++ b/docs/dev_install.rst @@ -11,6 +11,9 @@ For precise build instructions on Windows, please check out `appveyor.yml `_). To do so, download and unzip the llvm source, move to the llvm folder, and execute +* (If on Ubuntu) Execute ``sudo apt install libtinfo-dev clang-8`` (or ``clang-7`` should work as well). + +* (If on Arch Linux) Execute .. code-block:: bash + wget https://archive.archlinux.org/packages/c/clang/clang-8.0.1-1-x86_64.pkg.tar.xz + sudo pacman -Qp clang-8.0.1-1-x86_64.pkg.tar.xz + + .. warning:: + If you have installed ``clang`` (9.0.1) before, this command will overrides the existing ``clang``. + If you don't want to break up depedencies, please build from scratch and install it in ``/opt``. Then add ``/opt/clang/bin`` to your ``$PATH``. + + +- Make sure you have LLVM 8.0.1 built from scratch. To do so: + + .. code-block:: bash + + wget https://github.com/llvm/llvm-project/releases/download/llvmorg-8.0.1/llvm-8.0.1.src.tar.xz + tar xvJf llvm-8.0.1.src.tar.xz + cd llvm-8.0.1.src mkdir build cd build cmake .. -DLLVM_ENABLE_RTTI:BOOL=ON -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_BUILD_TYPE=Release -DLLVM_TARGETS_TO_BUILD="X86;NVPTX" -DLLVM_ENABLE_ASSERTIONS=ON @@ -31,27 +50,34 @@ Note that on Linux/OS X, ``clang`` is the only supported compiler for compiling make -j 8 sudo make install -- Clone the taichi repo, and then +Setting up Taichi for development +--------------------------------------------- + +- Clone the taichi repo, and build: .. code-block:: bash + git clone https://github.com/taichi-dev/taichi --depth=1 --branch=master + git submodule update --init --recursive --depth=1 cd taichi mkdir build cd build cmake .. - # if you are building with CUDA, say, 10.0, then please use "cmake .. -DCUDA_VERSION=10.0 -DTI_WITH_CUDA:BOOL=True" + # if you are building with CUDA 10.0, use the line below: + # cmake .. -DCUDA_VERSION=10.0 -DTI_WITH_CUDA:BOOL=True make -j 8 -- Add the following to your ``~/.bashrc`` (or ``~/.zshrc`` if you use ``zsh``) +- Add the following script to your ``~/.bashrc``: .. code-block:: bash export TAICHI_REPO_DIR=/home/XXX/taichi # Path to your taichi repository export PYTHONPATH=$TAICHI_REPO_DIR/python/:$PYTHONPATH export PATH=$TAICHI_REPO_DIR/bin/:$PATH + # export PATH=/opt/llvm/bin:$PATH # Uncomment if your llvm-8 or clang-8 is in /opt -- Execute ``source ~/.bashrc`` to reload shell config -- Execute ``ti test`` to run all the tests. It may take up to 5 minutes to run all tests. (On Windows the ``ti`` command should be replaced by ``python -m taichi``) +- Execute ``source ~/.bashrc`` to reload shell config. +- Execute ``python3 -m taichi test`` to run all the tests. It may take up to 5 minutes to run all tests. - Check out ``examples`` for runnable examples. Run them with ``python3``. diff --git a/docs/global_settings.rst b/docs/global_settings.rst index bdef07e3de71f..84ecbc1e6cd5f 100644 --- a/docs/global_settings.rst +++ b/docs/global_settings.rst @@ -3,4 +3,11 @@ Global Settings - Restart the Taichi runtime system (clear memory, destroy all variables and kernels): ``ti.reset()`` - Eliminate verbose outputs: ``ti.get_runtime().set_verbose(False)`` -- To specify which GPU to use: ``export CUDA_VISIBLE_DEVICES=0`` +- To not trigger GDB when crashes: ``export TI_GDB_TRIGGER=0`` +- To not use unified memory for CUDA: ``export TI_USE_UNIFIED_MEMORY=0`` +- To specify pre-allocated memory size for CUDA: ``export TI_DEVICE_MEMORY_GB=0.5`` +- Show more detailed log (TI_TRACE): ``export TI_LOG_LEVEL=trace`` +- To specify which GPU to use for CUDA: ``export CUDA_VISIBLE_DEVICES=0`` +- To specify which Arch to use: ``export TI_ARCH=cuda`` +- To print intermediate IR generated: ``export TI_PRINT_IR=1`` +- To print verbosed details: ``export TI_VERBOSE=1`` diff --git a/docs/hello.rst b/docs/hello.rst index b8ef2b988a412..f79d879955876 100644 --- a/docs/hello.rst +++ b/docs/hello.rst @@ -3,13 +3,11 @@ Hello, world! We introduce the Taichi programming language through a very basic `fractal` example. -If you haven't done so, please install Taichi via ``pip``. -Depending on your hardware and OS, please execute one of the following commands: +If you haven't done so, please install Taichi via ``pip``: .. code-block:: bash # Python 3.6+ needed - python3 -m pip install taichi Now you are ready to run the Taichi code below (``python3 fractal.py``) to compute a @@ -30,7 +28,7 @@ Now you are ready to run the Taichi code below (``python3 fractal.py``) to compu @ti.func def complex_sqr(z): - return ti.Vector([z[0] * z[0] - z[1] * z[1], z[1] * z[0] * 2]) + return ti.Vector([z[0] ** 2 - z[1] ** 2, z[1] * z[0] * 2]) @ti.kernel def paint(t: ti.f32): @@ -65,16 +63,35 @@ You can also reuse the package management system, Python IDEs, and existing Pyth Portability ----------------- -Taichi supports both CPUs and NVIDIA GPUs. +Taichi code can run on CPUs or GPUs. Initialize Taichi according to your hardware platform: .. code-block:: python - # Run on GPU + # Run on NVIDIA GPU, CUDA required ti.init(arch=ti.cuda) + # Run on GPU, with the OpenGL backend + ti.init(arch=ti.opengl) + # Run on GPU, with the Apple Metal backend, if you are on OS X + ti.init(arch=ti.metal) # Run on CPU (default) ti.init(arch=ti.x64) -If the machine does not have CUDA support, Taichi will fall back to CPUs instead. +.. note:: + Supported backends on different platforms: + + +----------+------+------+--------+-------+ + | platform | CPU | CUDA | OpenGL | Metal | + +==========+======+======+========+=======+ + | Windows | OK | OK | WIP | N/A | + +----------+------+------+--------+-------+ + | Linux | OK | OK | OK | N/A | + +----------+------+------+--------+-------+ + | Mac OS X | OK | N/A | N/A | OK | + +----------+------+------+--------+-------+ + + (OK: supported, WIP: work in progress, N/A: not available) + + If the machine does not have CUDA support, Taichi will fall back to CPUs instead. .. note:: @@ -86,7 +103,7 @@ If the machine does not have CUDA support, Taichi will fall back to CPUs instead On other platforms Taichi will make use of its on-demand memory allocator to adaptively allocate memory. (Sparse) Tensors -------- +---------------- Taichi is a data-oriented programming language, where dense or spatially-sparse tensors are first-class citizens. See :ref:`sparse` for more details on sparse tensors. @@ -110,7 +127,7 @@ You can also define Taichi **functions** with ``ti.func``, which can be called a .. warning:: - Taichi kernels must be called in the Python-scope. I.e., **nested Taichi kernels are not supported**. + Taichi kernels must be called in the Python-scope. I.e., **nested kernels are not supported**. Nested functions are allowed. **Recursive functions are not supported for now**. Taichi functions can only be called in Taichi-scope. @@ -152,6 +169,9 @@ In the fractal code above, ``for i, j in pixels`` loops over all the pixel coord Struct-for is the key to :ref:`sparse` in Taichi, as it will only loop over active elements in a sparse tensor. In dense tensors, all elements are active. +.. note:: + Struct-for's must be at the outer-most scope of kernels. + .. note:: It is the loop **at the outermost scope** that gets parallelized, not the outermost loop. @@ -171,9 +191,23 @@ In the fractal code above, ``for i, j in pixels`` loops over all the pixel coord for i in x: ... -.. warning:: +.. note:: + ``break`` is not supported in **outermost (parallelized)** loops: - Struct-for's must be at the outer-most scope of kernels. + .. code-block:: python + + @ti.kernel + def foo(): + for i in x: + ... + break # ERROR! You cannot break a parallelized loop! + + @ti.kernel + def foo(): + for i in x: + for j in y: + ... + break # OK Interacting with Python diff --git a/docs/index.rst b/docs/index.rst index e5008b3acde66..90fe3c2207315 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,58 +6,65 @@ The Taichi Programming Language :maxdepth: 3 overview + hello + .. toctree:: :caption: Basic Concepts :maxdepth: 3 - hello - syntax - type + tensor_matrix + atomic + external - linalg - tensor_matrix +.. toctree:: + :caption: API References + :maxdepth: 3 - global_settings + scalar_tensor + vector + linalg - external .. toctree:: :caption: Advanced Programming :maxdepth: 3 meta - data_layout - sparse - differentiable_programming - odop - compilation - syntax_sugars .. toctree:: - :caption: Miscellaneous - :maxdepth: 3 + :caption: Contribution + :maxdepth: 1 - utilities dev_install contributor_guide cpp_style internal - faq + + +.. toctree:: + :caption: Miscellaneous + :maxdepth: 3 + + utilities + global_settings + performance acknowledgments + faq + .. toctree:: :caption: Legacy :maxdepth: 3 - installation + legacy_installation diff --git a/docs/internal.rst b/docs/internal.rst index c425771d1a289..919468f244152 100644 --- a/docs/internal.rst +++ b/docs/internal.rst @@ -7,6 +7,7 @@ Vector type system Intermediate representation --------------------------------------- +Use ``ti.init(print_ir=True)`` to print IR on the console. Code generation diff --git a/docs/installation.rst b/docs/legacy_installation.rst similarity index 100% rename from docs/installation.rst rename to docs/legacy_installation.rst diff --git a/docs/linalg.rst b/docs/linalg.rst index 0ff214fbbbc74..4fbb25d05e1c8 100644 --- a/docs/linalg.rst +++ b/docs/linalg.rst @@ -1,13 +1,12 @@ .. _linalg: -Linear algebra -=============================================== - Matrices ---------------------------------------- +======== + - ``ti.Matrix`` is for small matrices (e.g. `3x3`) only. If you have `64x64` matrices, you should consider using a 2D tensor of scalars. - ``ti.Vector`` is the same as ``ti.Matrix``, except that it has only one column. - Differentiate element-wise product ``*`` and matrix product ``@``. +- ``ti.Vector(n, dt=ti.f32)`` or ``ti.Matrix(n, m, dt=ti.f32)`` to create tensors of vectors/matrices. - ``ti.transposed(A)`` or simply ``A.T()`` - ``ti.inverse(A)`` - ``ti.Matrix.abs(A)`` @@ -18,14 +17,4 @@ Matrices - ``R, S = ti.polar_decompose(A, ti.f32)`` - ``U, sigma, V = ti.svd(A, ti.f32)`` (Note that ``sigma`` is a ``3x3`` diagonal matrix) - -Vectors ---------------------------------------- -Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. - -- Dot product: ``a.dot(b)``, where ``a`` and ``b`` are vectors. ``ti.transposed(a) @ b`` will give you a ``matrix`` of size ``1x1``, which is not a `scalar`. -- Outer product: ``ti.outer_product(a, b)`` -- l-2 norm: ``a.norm(eps = 0)`` - - - returns ``sqrt(\sum_i(x_i ^ 2) + eps)`` - - Set ``eps = 1e-5`` for example, to safe guards the operator's gradient on zero vectors during differentiable programming. +TODO: doc here better like Vector. diff --git a/docs/scalar_tensor.rst b/docs/scalar_tensor.rst new file mode 100644 index 0000000000000..0d69c849c2c2d --- /dev/null +++ b/docs/scalar_tensor.rst @@ -0,0 +1,78 @@ +.. _scalar_tensor: + +Tensors of scalars +================== + + +Declaration +----------- + +.. function:: ti.var(dt, shape = None) + + :parameter dt: (DataType) type of the tensor element + :parameter shape: (optional, scalar or tuple) the shape of tensor + + For example, this creates a *dense* tensor with four ``int32`` as elements: + :: + + x = ti.var(ti.i32, shape=4) + + This creates a 4x3 *dense* tensor with ``float32`` elements: + :: + + x = ti.var(ti.f32, shape=(4, 3)) + + If shape is ``()`` (empty tuple), then a 0-D tensor (scalar) is created: + :: + + x = ti.var(ti.f32, shape=()) + + Then access it by passing ``None`` as index: + :: + + x[None] = 2 + + If shape is **not provided** or ``None``, the user must manually ``place`` it afterwards: + :: + + x = ti.var(ti.f32) + ti.root.dense(ti.ij, (4, 3)).place(x) + # equivalent to: x = ti.var(ti.f32, shape=(4, 3)) + +.. note:: + + Not providing ``shape`` allows you to *place* the tensor as *sparse* tensors, see :ref:`sparse` for more details. + + +.. warning:: + + All variables should be created and placed before any kernel invocation or any of them accessed from python-scope. For example: + + .. code-block:: python + + x = ti.var(ti.f32) + x[None] = 1 # ERROR: x not placed! + + .. code-block:: python + + x = ti.var(ti.f32, shape=()) + @ti.kernel + def func(): + x[None] = 1 + + func() + y = ti.var(ti.f32, shape=()) + # ERROR: cannot create tensor after kernel invocation! + + .. code-block:: python + + x = ti.var(ti.f32, shape=()) + x[None] = 1 + y = ti.var(ti.f32, shape=()) + # ERROR: cannot create tensor after any tensor accesses from the Python-scope! + + +Attribute +--------- + +TODO: WIP diff --git a/docs/sparse.rst b/docs/sparse.rst index ef0ad0be4d1f5..5ef44c4be0c2f 100644 --- a/docs/sparse.rst +++ b/docs/sparse.rst @@ -1,6 +1,6 @@ .. _sparse: -Sparse computation +Sparse computation (WIP) =============================================== .. warning:: diff --git a/docs/syntax.rst b/docs/syntax.rst index d364657864d91..cce3a65d04d83 100644 --- a/docs/syntax.rst +++ b/docs/syntax.rst @@ -77,11 +77,11 @@ Use ``@ti.func`` to decorate your Taichi functions. These functions are callable .. warning:: - Functions with multiple return values are not supported for now. Use a **local** variable to store the results instead: + Functions with multiple ``return``'s are not supported for now. Use a **local** variable to store the results, so that you end up with only one ``return``: .. code-block:: python - # Bad function + # Bad function - two return's @ti.func def safe_sqrt(x): if x >= 0: @@ -89,7 +89,7 @@ Use ``@ti.func`` to decorate your Taichi functions. These functions are callable else: return 0.0 - # Good function + # Good function - single return @ti.func def safe_sqrt(x): rst = 0.0 @@ -101,7 +101,11 @@ Use ``@ti.func`` to decorate your Taichi functions. These functions are callable .. warning:: - All functions are force-inlined. Function arguments are passed by value. + Currently, all functions are force-inlined. Therefore, no recursion is allowed. + +.. note:: + + Function arguments are passed by value. Data layout @@ -118,6 +122,7 @@ Supported scalar functions: * ``ti.cos(x)`` * ``ti.asin(x)`` * ``ti.acos(x)`` +* ``ti.atan2(x, y)`` * ``ti.cast(x, type)`` * ``ti.sqr(x)`` * ``ti.sqrt(x)`` diff --git a/docs/tensor_matrix.rst b/docs/tensor_matrix.rst index c910c3f52831b..ca029d6b7e839 100644 --- a/docs/tensor_matrix.rst +++ b/docs/tensor_matrix.rst @@ -1,3 +1,5 @@ +.. _tensor: + Tensors and matrices ========================== @@ -14,6 +16,7 @@ Tensors of scalars * Even when accessing 0-D tensor ``x``, use ``x[None] = 0`` instead of ``x = 0``. Please always use indexing to access entries in tensors. * Tensor values are initially zero. * Sparse tensors are initially inactive. +* See :ref:`scalar_tensor` for more details. Tensors of matrices diff --git a/docs/type.rst b/docs/type.rst index 2430469cfc2c3..bdc5edd517584 100644 --- a/docs/type.rst +++ b/docs/type.rst @@ -5,11 +5,47 @@ Supported types --------------------------------------- Currently, supported basic types in Taichi are +- int8 ``ti.i8`` +- int16 ``ti.i16`` - int32 ``ti.i32`` - int64 ``ti.i64`` +- uint8 ``ti.u8`` +- uint16 ``ti.u16`` +- uint32 ``ti.u32`` +- uint64 ``ti.u64`` - float32 ``ti.f32`` - float64 ``ti.f64`` +.. note:: + Supported types on each backends: + + +------+-----------+-----------+---------+ + | type | CPU/CUDA | OpenGL | Metal | + +======+===========+===========+=========+ + | i8 | OK | MISS | OK | + +------+-----------+-----------+---------+ + | i16 | OK | MISS | OK | + +------+-----------+-----------+---------+ + | i32 | OK | OK | OK | + +------+-----------+-----------+---------+ + | i64 | OK | EXT | MISS | + +------+-----------+-----------+---------+ + | u8 | OK | MISS | OK | + +------+-----------+-----------+---------+ + | u16 | OK | MISS | OK | + +------+-----------+-----------+---------+ + | u32 | OK | MISS | OK | + +------+-----------+-----------+---------+ + | u64 | OK | MISS | MISS | + +------+-----------+-----------+---------+ + | f32 | OK | OK | OK | + +------+-----------+-----------+---------+ + | f64 | OK | OK | MISS | + +------+-----------+-----------+---------+ + + (OK: supported, EXT: require extension, MISS: not supported) + + Boolean types are represented using ``ti.i32``. Binary operations on different types will give you a promoted type, following the C programming language, e.g. diff --git a/docs/utilities.rst b/docs/utilities.rst index 3e14f5fe53e56..42efcf8758b65 100644 --- a/docs/utilities.rst +++ b/docs/utilities.rst @@ -3,6 +3,30 @@ Utilities TODO: update +GUI system +---------- + +.. code-block:: python + + gui = ti.GUI('Title', (640, 480)) + while not gui.is_pressed(ti.GUI.ESCAPE): + gui.set_image(img) + gui.show() + + +Also checkout ``examples/keyboard.py`` for more advanced event processing. + + +Image I/O +--------- + +.. code-block:: python + + img = ti.imread('hello.png') + ti.imshow(img, 'Window Title') + ti.imwrite(img, 'hello2.png') + + Logging ------- @@ -23,7 +47,7 @@ The default logging level is ``ti.INFO``. You can also override default logging level by setting the environment variable ``TI_LOG_LEVEL`` to values such as ``trace`` and ``warn``. -Trigger GDB when the program crashes: +Trigger GDB when the program crashes -------------------------------------- .. code-block:: diff --git a/docs/vector.rst b/docs/vector.rst new file mode 100644 index 0000000000000..11ae7f2e7b9b1 --- /dev/null +++ b/docs/vector.rst @@ -0,0 +1,183 @@ +.. _vector: + +Vectors +======= + +A vector in Taichi can have two forms: + + - as a temporary local variable, an ``n``-D vector consists of ``n`` scalar values. + - as a global tensor, where each tensor element is a vector. In this case, an ``n``-D vector consists of ``n`` global tensors of scalars. + The tensors of scalars, when treated together, can be considered to be **a global tensor of vectors**. + +Declaration +----------- + +As global tensors of vectors +++++++++++++++++++++++++++++ + +.. function:: ti.Vector(n, dt=type, shape=shape) + + :parameter n: (scalar) the number of components in the vector + :parameter type: (DataType) data type of the components + :parameter shape: (scalar or tuple) shape the tensor of vectors, see :ref:`tensor` + + For example, this creates a 5x4 tensor of 3D vectors: + :: + + # Python-scope + a = ti.Vector(3, dt=ti.f32, shape=(5, 4)) + +.. note:: + + In Python-scope, ``ti.var`` declares :ref:`scalar_tensor`, while ``ti.Vector`` declares tensors of vectors. + + +As a temporary local variable ++++++++++++++++++++++++++++++ + +.. function:: ti.Vector([x, y, ...]) + + :parameter x: (scalar) the first component of the vector + :parameter y: (scalar) the second component of the vector + + For example, this creates a 3D vector with components (2, 3, 4): + :: + + # Taichi-scope + a = ti.Vector([2, 3, 4]) + + +Accessing components +-------------------- + +As global tensors of vectors +++++++++++++++++++++++++++++ +.. attribute:: a[p, q, ...][i] + + :parameter a: (Vector) the vector + :parameter p: (scalar) index of the first tensor dimension + :parameter q: (scalar) index of the second tensor dimension + :parameter i: (scalar) index of the vector dimension + + This extracts the first component of vector ``a[6, 3]``: + :: + + x = a[6, 3][0] + + # or + vec = a[6, 3] + x = vec[0] + +.. note:: + + **Always** use two pair of square brackets to access scalar elements from tensors of vectors. + + - The indices in the first pair of brackets locate the vector index inside the tensor of vectors; + - The indices in the second pair of brackets locate the scalar element index inside the vector. + + For 0-D tensors of vectors, indices in the first pair of brackets should be ``[None]``. + + + +As a temporary local variable ++++++++++++++++++++++++++++++ + +.. attribute:: a[i] + + :parameter a: (Vector) the vector + :parameter i: (scalar) index of the component + + For example, this extracts the first component of vector ``a``: + :: + + x = a[0] + + This sets the second component of ``a`` to 4: + :: + + a[1] = 4 + + TODO: add descriptions about ``a(i, j)`` + +Methods +------- + +.. function:: a.norm(eps = 0) + + :parameter a: (Vector) + :parameter eps: (optional, scalar) a safe-guard value for ``sqrt``, usually 0. See the note below. + :return: (scalar) the magnitude / length / norm of vector + + For example, + :: + + a = ti.Vector([3, 4]) + a.norm() # sqrt(3*3 + 4*4 + 0) = 5 + + ``a.norm(eps)`` is equivalent to ``ti.sqrt(a.dot(a) + eps)`` + +.. note:: + Set ``eps = 1e-5`` for example, to safe guard the operator's gradient on zero vectors during differentiable programming. + + +.. function:: a.dot(b) + + :parameter a: (Vector) + :parameter b: (Vector) + :return: (scalar) the dot (inner) product of ``a`` and ``b`` + + E.g., + :: + + a = ti.Vector([1, 3]) + b = ti.Vector([2, 4]) + a.dot(b) # 1*2 + 3*4 = 14 + + +.. function:: ti.cross(a, b) + + :parameter a: (Vector, 3D) + :parameter b: (Vector, 3D) + :return: (Vector, 3D) the cross product of ``a`` and ``b`` + + We use right-handed coordinate system, E.g., + :: + + a = ti.Vector([1, 2, 3]) + b = ti.Vector([4, 5, 6]) + c = ti.cross(a, b) # [2*6 - 5*3, 4*3 - 1*6, 1*5 - 4*2] + + +.. function:: ti.outer_product(a, b) + + :parameter a: (Vector) + :parameter b: (Vector) + :return: (Matrix) the outer product of ``a`` and ``b`` + + E.g., + :: + + a = ti.Vector([1, 2, 3]) + b = ti.Vector([4, 5, 6]) + c = ti.outer_product(a, b) # NOTE: c[i, j] = a[i] * b[j] + # c = [[1*4, 1*5, 1*6], [2*4, 2*5, 2*6], [3*4, 3*5, 3*6]] + +.. note:: + This is not the same as `ti.cross`. ``a`` and ``b`` do not have to be 3D vectors. + + +.. function:: a.cast(dt) + + :parameter a: (Vector) + :parameter dt: (DataType) + :return: (Vector) vector with all components of ``a`` casted into type ``dt`` + + E.g., + :: + + # Taichi-scope + a = ti.Vector([1.6, 2.3]) + a.cast(ti.i32) # [2, 3] + +.. note:: + Vectors are special matrices with only 1 column. In fact, ``ti.Vector`` is just an alias of ``ti.Matrix``. diff --git a/examples/fractal.py b/examples/fractal.py index 412adbcae585a..d83b8157f3db6 100644 --- a/examples/fractal.py +++ b/examples/fractal.py @@ -8,7 +8,7 @@ @ti.func def complex_sqr(z): - return ti.Vector([z[0] * z[0] - z[1] * z[1], z[1] * z[0] * 2]) # z^2 + return ti.Vector([z[0]**2 - z[1]**2, z[1] * z[0] * 2]) @ti.kernel diff --git a/examples/lists.py b/examples/lists.py index 2a07865f18fc3..f0170c53e7967 100644 --- a/examples/lists.py +++ b/examples/lists.py @@ -1,10 +1,10 @@ import taichi as ti -x = ti.global_var(ti.i32) -l = ti.global_var(ti.i32) +x = ti.var(ti.i32) +l = ti.var(ti.i32) n = 16 -# ti.runtime.print_preprocessed = True +ti.init()#print_preprocessed=True) @ti.layout diff --git a/python/taichi/misc/image.py b/python/taichi/misc/image.py index b8ce4be3e9cde..704146b995d33 100644 --- a/python/taichi/misc/image.py +++ b/python/taichi/misc/image.py @@ -35,6 +35,6 @@ def imread(filename, channels=0): def imshow(img, winname='Taichi'): img = cook_image(img) gui = ti.GUI(winname, res=img.shape[:2]) - while not gui.has_key_pressed(): + while not gui.is_pressed(ti.GUI.ESCAPE): gui.set_image(img) gui.show()