diff --git a/docs/lang/articles/deployment/ndarray_android.md b/docs/lang/articles/deployment/ndarray_android.md index 26ab619000a44..6f573f8e9c675 100644 --- a/docs/lang/articles/deployment/ndarray_android.md +++ b/docs/lang/articles/deployment/ndarray_android.md @@ -66,7 +66,7 @@ The following Python script defines a Taichi AOT module for generating and savin ```python import taichi as ti -ti.init(arch=ti.opengl, use_gles=True, allow_nv_shader_extension=False) +ti.init(arch=ti.gles, allow_nv_shader_extension=False) # Define constants for computation G = 1 @@ -126,7 +126,7 @@ aot() **In line 3, you initialize Taichi:** -1. Set `use_gles` to `True` to generate GLES compute shaders for Android. +1. Set `arch=ti.gles` to generate GLES compute shaders for Android. 2. Set `allow_nv_shader_extension` to `False` to prevent the generated GLES compute shaders from using Nvidia GL extensions on Android. > This setting is because Android supports GLES APIs but GLES does not support `NV_SHADER_EXTENSION`. diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index d2c863cb25eb5..ca99c9fa22465 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -627,7 +627,8 @@ def create_field_member(dtype, name, needs_grad, needs_dual): # adjoint checkbit x_grad_checkbit = Expr(get_runtime().prog.make_id_expr("")) dtype = u8 - if prog.config().arch in (_ti_core.opengl, _ti_core.vulkan): + if prog.config().arch in (_ti_core.opengl, _ti_core.vulkan, + _ti_core.gles): dtype = i32 x_grad_checkbit.ptr = _ti_core.expr_field(x_grad_checkbit.ptr, cook_dtype(dtype)) diff --git a/python/taichi/lang/misc.py b/python/taichi/lang/misc.py index 0ff541f659e58..621924cb075a3 100644 --- a/python/taichi/lang/misc.py +++ b/python/taichi/lang/misc.py @@ -129,6 +129,11 @@ """ # ---------------------- +gles = _ti_core.gles +"""The OpenGL ES backend. OpenGL ES 3.1 required. +""" +# ---------------------- + # Skip annotating this one because it is barely maintained. cc = _ti_core.cc @@ -154,9 +159,9 @@ """ # ---------------------- -gpu = [cuda, metal, vulkan, opengl, dx11, dx12] +gpu = [cuda, metal, vulkan, opengl, dx11, dx12, gles] """A list of GPU backends supported on the current system. -Currently contains 'cuda', 'metal', 'opengl', 'vulkan', 'dx11', 'dx12'. +Currently contains 'cuda', 'metal', 'opengl', 'vulkan', 'dx11', 'dx12', 'gles'. When this is used, Taichi automatically picks the matching GPU backend. If no GPU is detected, Taichi falls back to the CPU backend. @@ -454,7 +459,7 @@ def init(arch=None, if env_arch is not None: _logging.info(f'Following TI_ARCH setting up for arch={env_arch}') arch = _ti_core.arch_from_name(env_arch) - cfg.arch = adaptive_arch_select(arch, enable_fallback, cfg.use_gles) + cfg.arch = adaptive_arch_select(arch, enable_fallback) if cfg.arch == cc: _ti_core.set_tmp_dir(locale_encode(prepare_sandbox())) print(f'[Taichi] Starting on arch={_ti_core.arch_name(cfg.arch)}') @@ -717,14 +722,11 @@ def mesh_patch_idx(): ) -def is_arch_supported(arch, use_gles=False): +def is_arch_supported(arch): """Checks whether an arch is supported on the machine. Args: arch (taichi_python.Arch): Specified arch. - use_gles (bool): If True, check is GLES is available otherwise - check if GLSL is available. Only effective when `arch` is `ti.opengl`. - Default is `False`. Returns: bool: Whether `arch` is supported on the machine. @@ -733,7 +735,8 @@ def is_arch_supported(arch, use_gles=False): arch_table = { cuda: _ti_core.with_cuda, metal: _ti_core.with_metal, - opengl: functools.partial(_ti_core.with_opengl, use_gles), + opengl: functools.partial(_ti_core.with_opengl, False), + gles: functools.partial(_ti_core.with_opengl, True), cc: _ti_core.with_cc, vulkan: _ti_core.with_vulkan, dx11: _ti_core.with_dx11, @@ -753,13 +756,13 @@ def is_arch_supported(arch, use_gles=False): return False -def adaptive_arch_select(arch, enable_fallback, use_gles): +def adaptive_arch_select(arch, enable_fallback): if arch is None: return cpu if not isinstance(arch, (list, tuple)): arch = [arch] for a in arch: - if is_arch_supported(a, use_gles): + if is_arch_supported(a): return a if not enable_fallback: raise RuntimeError(f'Arch={arch} is not supported') @@ -778,7 +781,7 @@ def get_compute_stream_device_time_elapsed_us() -> float: __all__ = [ 'i', 'ij', 'ijk', 'ijkl', 'ijl', 'ik', 'ikl', 'il', 'j', 'jk', 'jkl', 'jl', 'k', 'kl', 'l', 'x86_64', 'x64', 'dx11', 'dx12', 'wasm', 'arm64', 'cc', - 'cpu', 'cuda', 'gpu', 'metal', 'opengl', 'vulkan', 'extension', + 'cpu', 'cuda', 'gles', 'gpu', 'metal', 'opengl', 'vulkan', 'extension', 'loop_config', 'global_thread_idx', 'assume_in_range', 'block_local', 'cache_read_only', 'init', 'mesh_local', 'no_activate', 'reset', 'mesh_patch_idx', 'get_compute_stream_device_time_elapsed_us' diff --git a/taichi/analysis/offline_cache_util.cpp b/taichi/analysis/offline_cache_util.cpp index aa5d7c05ee051..15c15110f4bd3 100644 --- a/taichi/analysis/offline_cache_util.cpp +++ b/taichi/analysis/offline_cache_util.cpp @@ -52,9 +52,8 @@ static std::vector get_offline_cache_key_of_compile_config( if (config->arch == Arch::cc) { serializer(config->cc_compile_cmd); serializer(config->cc_link_cmd); - } else if (config->arch == Arch::opengl) { + } else if (config->arch == Arch::opengl || config->arch == Arch::gles) { serializer(config->allow_nv_shader_extension); - serializer(config->use_gles); } serializer(config->make_mesh_block_local); serializer(config->optimize_mesh_reordered_mapping); diff --git a/taichi/aot/module_loader.cpp b/taichi/aot/module_loader.cpp index a0cceca44c03c..4180ecd6020da 100644 --- a/taichi/aot/module_loader.cpp +++ b/taichi/aot/module_loader.cpp @@ -37,6 +37,10 @@ std::unique_ptr Module::load(Arch arch, std::any mod_params) { } else if (arch == Arch::opengl) { #ifdef TI_WITH_OPENGL return gfx::make_aot_module(mod_params, arch); +#endif + } else if (arch == Arch::gles) { +#ifdef TI_WITH_OPENGL + return gfx::make_aot_module(mod_params, arch); #endif } else if (arch == Arch::dx11) { #ifdef TI_WITH_DX11 diff --git a/taichi/inc/archs.inc.h b/taichi/inc/archs.inc.h index 076cf7b899ff9..d478981ef502c 100644 --- a/taichi/inc/archs.inc.h +++ b/taichi/inc/archs.inc.h @@ -16,3 +16,4 @@ PER_ARCH(dx12) // Microsoft DirectX 12, WIP PER_ARCH(opencl) // OpenCL, N/A PER_ARCH(amdgpu) // AMD GPU, N/A PER_ARCH(vulkan) // Vulkan +PER_ARCH(gles) // OpenGL ES diff --git a/taichi/program/compile_config.h b/taichi/program/compile_config.h index 1ec34359663dd..4f8652418d052 100644 --- a/taichi/program/compile_config.h +++ b/taichi/program/compile_config.h @@ -77,7 +77,6 @@ struct CompileConfig { // Opengl backend options: bool allow_nv_shader_extension{true}; - bool use_gles{false}; bool quant_opt_store_fusion{true}; bool quant_opt_atomic_demotion{true}; diff --git a/taichi/program/extension.cpp b/taichi/program/extension.cpp index 1c7b86315ffb5..283bce01b4492 100644 --- a/taichi/program/extension.cpp +++ b/taichi/program/extension.cpp @@ -23,6 +23,7 @@ bool is_extension_supported(Arch arch, Extension ext) { {Arch::metal, {Extension::adstack, Extension::assertion, Extension::sparse}}, {Arch::opengl, {Extension::extfunc}}, + {Arch::gles, {}}, {Arch::cc, {Extension::data64, Extension::extfunc, Extension::adstack}}, }; // if (with_opengl_extension_data64()) diff --git a/taichi/program/program.cpp b/taichi/program/program.cpp index 3540a61cb32ec..0243dbd93ef68 100644 --- a/taichi/program/program.cpp +++ b/taichi/program/program.cpp @@ -123,7 +123,14 @@ Program::Program(Arch desired_arch) : snode_rw_accessors_bank_(this) { #endif } else if (config.arch == Arch::opengl) { #ifdef TI_WITH_OPENGL - TI_ASSERT(opengl::initialize_opengl(config.use_gles)); + TI_ASSERT(opengl::initialize_opengl(false)); + program_impl_ = std::make_unique(config); +#else + TI_ERROR("This taichi is not compiled with OpenGL"); +#endif + } else if (config.arch == Arch::gles) { +#ifdef TI_WITH_OPENGL + TI_ASSERT(opengl::initialize_opengl(true)); program_impl_ = std::make_unique(config); #else TI_ERROR("This taichi is not compiled with OpenGL"); @@ -552,6 +559,7 @@ std::unique_ptr Program::make_aot_module_builder( this_thread_config().arch == Arch::metal || this_thread_config().arch == Arch::vulkan || this_thread_config().arch == Arch::opengl || + this_thread_config().arch == Arch::gles || this_thread_config().arch == Arch::dx12) { return program_impl_->make_aot_module_builder(cfg); } diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index f6e214da70ddd..24709e899edf0 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -209,7 +209,6 @@ void export_lang(py::module &m) { &CompileConfig::quant_opt_atomic_demotion) .def_readwrite("allow_nv_shader_extension", &CompileConfig::allow_nv_shader_extension) - .def_readwrite("use_gles", &CompileConfig::use_gles) .def_readwrite("make_mesh_block_local", &CompileConfig::make_mesh_block_local) .def_readwrite("mesh_localize_to_end_mapping", diff --git a/taichi/transforms/offload.cpp b/taichi/transforms/offload.cpp index 68b26c0ea1fb4..6423f79c21c48 100644 --- a/taichi/transforms/offload.cpp +++ b/taichi/transforms/offload.cpp @@ -112,7 +112,8 @@ class Offloader { offloaded->const_end = true; offloaded->end_value = val->val.val_int32(); } else { - if ((arch == Arch::opengl || arch == Arch::vulkan) && + if ((arch == Arch::opengl || arch == Arch::vulkan || + arch == Arch::gles) && demotable_axis_load(s->end)) { // TODO: We need to update codegen for each backend gradually so // let's limit it to opengl backend for now. @@ -385,7 +386,8 @@ class IdentifyValuesUsedInOtherOffloads : public BasicStmtVisitor { if (top_level_ptr->is() || stmt->is() || (stmt->is() && stmt->as()->is_ptr)) return; - if ((config_.arch == Arch::opengl || config_.arch == Arch::vulkan) && + if ((config_.arch == Arch::opengl || config_.arch == Arch::vulkan || + config_.arch == Arch::gles) && demotable_axis_load(stmt)) return; // Not yet allocated diff --git a/taichi/util/offline_cache.cpp b/taichi/util/offline_cache.cpp index 2b9150a09ba69..e3ba5e00f3717 100644 --- a/taichi/util/offline_cache.cpp +++ b/taichi/util/offline_cache.cpp @@ -22,7 +22,8 @@ std::string get_cache_path_by_arch(const std::string &base_path, Arch arch) { std::string subdir; if (arch_uses_llvm(arch)) { subdir = kLlvmCachSubPath; - } else if (arch == Arch::vulkan || arch == Arch::opengl) { + } else if (arch == Arch::vulkan || arch == Arch::opengl || + arch == Arch::gles) { subdir = kSpirvCacheSubPath; } else if (arch == Arch::metal) { subdir = kMetalCacheSubPath; diff --git a/tests/python/test_ad_grad_check.py b/tests/python/test_ad_grad_check.py index 4fe767e9087d9..118aae7b1531b 100644 --- a/tests/python/test_ad_grad_check.py +++ b/tests/python/test_ad_grad_check.py @@ -6,7 +6,7 @@ @test_utils.test(default_fp=ti.f64, - exclude=[ti.cc, ti.vulkan, ti.opengl, ti.metal]) + exclude=[ti.cc, ti.vulkan, ti.opengl, ti.gles, ti.metal]) def test_general(): x1 = ti.field(dtype=float, shape=(2, 2), needs_grad=True) y1 = ti.field(dtype=float, shape=(), needs_grad=True) @@ -71,6 +71,6 @@ def func(): lambda x: ti.atan2(x, 0.4), lambda x: 0.4**x, lambda x: x**0.4 ]) @test_utils.test(default_fp=ti.f64, - exclude=[ti.cc, ti.vulkan, ti.opengl, ti.metal]) + exclude=[ti.cc, ti.vulkan, ti.opengl, ti.gles, ti.metal]) def test_basics(tifunc): grad_test(tifunc) diff --git a/tests/python/test_aot.py b/tests/python/test_aot.py index da9d325e6480c..4019a84e2fab7 100644 --- a/tests/python/test_aot.py +++ b/tests/python/test_aot.py @@ -138,10 +138,8 @@ def test_non_dense_snode(): m.add_field('y', y) -@pytest.mark.parametrize('use_gles', [True, False]) -@test_utils.test(arch=[ti.opengl, ti.vulkan]) -def test_mpm88_aot(use_gles): - ti.init(ti.lang.impl.current_cfg().arch, use_gles=use_gles) +@test_utils.test(arch=[ti.opengl, ti.gles, ti.vulkan]) +def test_mpm88_aot(): n_particles = 8192 n_grid = 128 dx = 1 / n_grid diff --git a/tests/python/test_api.py b/tests/python/test_api.py index d712f4f045672..962dbf6dd22e1 100644 --- a/tests/python/test_api.py +++ b/tests/python/test_api.py @@ -72,20 +72,20 @@ def _get_expected_matrix_apis(): 'deactivate_all_snodes', 'dx11', 'dx12', 'eig', 'exp', 'experimental', 'extension', 'f16', 'f32', 'f64', 'field', 'float16', 'float32', 'float64', 'floor', 'func', 'get_addr', 'get_compute_stream_device_time_elapsed_us', - 'global_thread_idx', 'gpu', 'graph', 'grouped', 'hex_to_rgb', 'i', 'i16', - 'i32', 'i64', 'i8', 'ij', 'ijk', 'ijkl', 'ijl', 'ik', 'ikl', 'il', 'init', - 'int16', 'int32', 'int64', 'int8', 'is_active', 'is_logging_effective', - 'j', 'jk', 'jkl', 'jl', 'k', 'kernel', 'kl', 'l', 'lang', 'length', - 'linalg', 'log', 'loop_config', 'math', 'max', 'mesh_local', - 'mesh_patch_idx', 'metal', 'min', 'ndarray', 'ndrange', 'no_activate', - 'one', 'opengl', 'polar_decompose', 'pow', 'profiler', 'randn', 'random', - 'raw_div', 'raw_mod', 'ref', 'rescale_index', 'reset', 'rgb_to_hex', - 'root', 'round', 'rsqrt', 'select', 'set_logging_level', 'simt', 'sin', - 'solve', 'sparse_matrix_builder', 'sqrt', 'static', 'static_assert', - 'static_print', 'stop_grad', 'svd', 'swizzle_generator', 'sym_eig', 'sync', - 'tan', 'tanh', 'template', 'tools', 'types', 'u16', 'u32', 'u64', 'u8', - 'ui', 'uint16', 'uint32', 'uint64', 'uint8', 'vulkan', 'wasm', 'x64', - 'x86_64', 'zero' + 'gles', 'global_thread_idx', 'gpu', 'graph', 'grouped', 'hex_to_rgb', 'i', + 'i16', 'i32', 'i64', 'i8', 'ij', 'ijk', 'ijkl', 'ijl', 'ik', 'ikl', 'il', + 'init', 'int16', 'int32', 'int64', 'int8', 'is_active', + 'is_logging_effective', 'j', 'jk', 'jkl', 'jl', 'k', 'kernel', 'kl', 'l', + 'lang', 'length', 'linalg', 'log', 'loop_config', 'math', 'max', + 'mesh_local', 'mesh_patch_idx', 'metal', 'min', 'ndarray', 'ndrange', + 'no_activate', 'one', 'opengl', 'polar_decompose', 'pow', 'profiler', + 'randn', 'random', 'raw_div', 'raw_mod', 'ref', 'rescale_index', 'reset', + 'rgb_to_hex', 'root', 'round', 'rsqrt', 'select', 'set_logging_level', + 'simt', 'sin', 'solve', 'sparse_matrix_builder', 'sqrt', 'static', + 'static_assert', 'static_print', 'stop_grad', 'svd', 'swizzle_generator', + 'sym_eig', 'sync', 'tan', 'tanh', 'template', 'tools', 'types', 'u16', + 'u32', 'u64', 'u8', 'ui', 'uint16', 'uint32', 'uint64', 'uint8', 'vulkan', + 'wasm', 'x64', 'x86_64', 'zero' ] user_api[ti.ad] = [ 'FwdMode', 'Tape', 'clear_all_gradients', 'grad_for', 'grad_replaced', diff --git a/tests/python/test_arg_alignment.py b/tests/python/test_arg_alignment.py index 4320f219ee6ae..fb49b2934b42e 100644 --- a/tests/python/test_arg_alignment.py +++ b/tests/python/test_arg_alignment.py @@ -2,7 +2,7 @@ from tests import test_utils -@test_utils.test(exclude=[ti.opengl]) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_ret_write(): @ti.kernel def func(a: ti.i16) -> ti.f32: @@ -11,7 +11,7 @@ def func(a: ti.i16) -> ti.f32: assert func(255) == 3.0 -@test_utils.test(exclude=[ti.opengl]) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_arg_read(): x = ti.field(ti.i32, shape=()) diff --git a/tests/python/test_argument.py b/tests/python/test_argument.py index 837eb9fc4c34e..bee390c5ce944 100644 --- a/tests/python/test_argument.py +++ b/tests/python/test_argument.py @@ -26,7 +26,7 @@ def foo2(a: ti.i32, b: ti.i32, c: ti.i32, d: ti.i32, e: ti.i32, f: ti.i32, foo2(1, 2, 3, 4, 5, 6, 7, 8, 9) -@test_utils.test(exclude=[ti.opengl, ti.cc]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.cc]) def test_exceed_max_64(): N = 64 diff --git a/tests/python/test_cast.py b/tests/python/test_cast.py index fd1ad63fa4bb8..0f38383e346a5 100644 --- a/tests/python/test_cast.py +++ b/tests/python/test_cast.py @@ -5,7 +5,7 @@ @pytest.mark.parametrize('dtype', [ti.u8, ti.u16, ti.u32]) -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_cast_uint_to_float(dtype): @ti.kernel def func(a: dtype) -> ti.f32: @@ -19,7 +19,7 @@ def func_sugar(a: dtype) -> ti.f32: @pytest.mark.parametrize('dtype', [ti.u8, ti.u16, ti.u32]) -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_cast_float_to_uint(dtype): @ti.kernel def func(a: ti.f32) -> dtype: diff --git a/tests/python/test_cli.py b/tests/python/test_cli.py index 2ab78624408d0..9e0e17fcf5f67 100644 --- a/tests/python/test_cli.py +++ b/tests/python/test_cli.py @@ -213,7 +213,7 @@ def test_cli_run(): def test_cli_cache(): - archs = {ti.cpu, ti.cuda, ti.opengl, ti.vulkan, ti.metal} + archs = {ti.cpu, ti.cuda, ti.opengl, ti.vulkan, ti.metal, ti.gles} archs = {v for v in archs if v in test_utils.expected_archs()} exts = ('ll', 'bc', 'spv', 'metal', 'tcb', 'lock') tmp_path = tempfile.mkdtemp() diff --git a/tests/python/test_fields_builder.py b/tests/python/test_fields_builder.py index 22aff8e50143d..dc4a7a2ac5d85 100644 --- a/tests/python/test_fields_builder.py +++ b/tests/python/test_fields_builder.py @@ -196,7 +196,7 @@ def test_field_initialize_zero(): assert b[0] == 0 -@test_utils.test(exclude=[ti.opengl, ti.cc]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.cc]) def test_field_builder_place_grad(): @ti.kernel def mul(arr: ti.template(), out: ti.template()): diff --git a/tests/python/test_internal_func.py b/tests/python/test_internal_func.py index 78dcd8eef90f9..e691022ed386d 100644 --- a/tests/python/test_internal_func.py +++ b/tests/python/test_internal_func.py @@ -6,7 +6,8 @@ from tests import test_utils -@test_utils.test(exclude=[ti.metal, ti.opengl, ti.cuda, ti.vulkan, ti.cc]) +@test_utils.test( + exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.cc]) def test_basic(): @ti.kernel def test(): @@ -16,7 +17,8 @@ def test(): test() -@test_utils.test(exclude=[ti.metal, ti.opengl, ti.cuda, ti.vulkan, ti.cc]) +@test_utils.test( + exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.cc]) def test_host_polling(): return @@ -30,7 +32,8 @@ def test(): time.sleep(0.1) -@test_utils.test(exclude=[ti.metal, ti.opengl, ti.cuda, ti.vulkan, ti.cc]) +@test_utils.test( + exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.cc]) def test_list_manager(): @ti.kernel def test(): @@ -40,7 +43,8 @@ def test(): test() -@test_utils.test(exclude=[ti.metal, ti.opengl, ti.cuda, ti.vulkan, ti.cc]) +@test_utils.test( + exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.cc]) def test_node_manager(): @ti.kernel def test(): @@ -50,7 +54,8 @@ def test(): test() -@test_utils.test(exclude=[ti.metal, ti.opengl, ti.cuda, ti.vulkan, ti.cc]) +@test_utils.test( + exclude=[ti.metal, ti.opengl, ti.gles, ti.cuda, ti.vulkan, ti.cc]) def test_node_manager_gc(): @ti.kernel def test_cpu(): diff --git a/tests/python/test_linalg.py b/tests/python/test_linalg.py index cdad4e3325e8e..e7c8d7fabba3f 100644 --- a/tests/python/test_linalg.py +++ b/tests/python/test_linalg.py @@ -185,7 +185,7 @@ def V(i, j): @pytest.mark.parametrize("dim", [2, 3]) -@test_utils.test(default_fp=ti.f32, exclude=ti.opengl) +@test_utils.test(default_fp=ti.f32, exclude=[ti.opengl, ti.gles]) def test_polar_decomp_f32(dim): _test_polar_decomp(dim, ti.f32) diff --git a/tests/python/test_parallel_range_for.py b/tests/python/test_parallel_range_for.py index 45339b905059c..d009146601056 100644 --- a/tests/python/test_parallel_range_for.py +++ b/tests/python/test_parallel_range_for.py @@ -3,7 +3,7 @@ # such small block_dim will cause grid_dim too large for OpenGL... -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_parallel_range_for(): n = 1024 * 1024 val = ti.field(ti.i32, shape=(n)) @@ -37,7 +37,7 @@ def foo() -> ti.i32: assert foo() == 50 -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_loop_config_parallel_range_for(): n = 1024 * 1024 val = ti.field(ti.i32, shape=(n)) diff --git a/tests/python/test_pow.py b/tests/python/test_pow.py index a923e36f770a4..d7a5dc430c44e 100644 --- a/tests/python/test_pow.py +++ b/tests/python/test_pow.py @@ -65,7 +65,7 @@ def foo(x: dt, y: ti.template()): @test_utils.test(debug=True, advanced_optimization=False, - exclude=[ti.vulkan, ti.opengl, ti.cc]) + exclude=[ti.vulkan, ti.opengl, ti.gles, ti.cc]) def test_ipow_negative_exp_i32(): _ipow_negative_exp(ti.i32) @@ -73,7 +73,7 @@ def test_ipow_negative_exp_i32(): @test_utils.test(debug=True, advanced_optimization=False, require=ti.extension.data64, - exclude=[ti.vulkan, ti.opengl, ti.cc]) + exclude=[ti.vulkan, ti.opengl, ti.gles, ti.cc]) def test_ipow_negative_exp_i64(): _ipow_negative_exp(ti.i64) diff --git a/tests/python/test_reduction.py b/tests/python/test_reduction.py index d193a7918d39d..f03043af6683e 100644 --- a/tests/python/test_reduction.py +++ b/tests/python/test_reduction.py @@ -35,6 +35,7 @@ def _test_reduction_single(dtype, criterion, op): N = 1024 * 1024 if (ti.lang.impl.current_cfg().arch == ti.opengl or ti.lang.impl.current_cfg().arch == ti.vulkan + or ti.lang.impl.current_cfg().arch == ti.gles or ti.lang.impl.current_cfg().arch == ti.dx11) and dtype == ti.f32: # OpenGL/Vulkan are not capable of such large number in its float32... N = 1024 * 16 @@ -88,7 +89,7 @@ def test_reduction_single_i32(op): @pytest.mark.parametrize('op', [OP_ADD]) -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_reduction_single_u32(op): _test_reduction_single(ti.u32, lambda x, y: x % 2**32 == y % 2**32, op) @@ -106,7 +107,7 @@ def test_reduction_single_i64(op): @pytest.mark.parametrize('op', [OP_ADD]) -@test_utils.test(exclude=ti.opengl, require=ti.extension.data64) +@test_utils.test(exclude=[ti.opengl, ti.gles], require=ti.extension.data64) def test_reduction_single_u64(op): _test_reduction_single(ti.u64, lambda x, y: x % 2**64 == y % 2**64, op) diff --git a/tests/python/test_return.py b/tests/python/test_return.py index 30a456b3e89cc..5e8c94f41c429 100644 --- a/tests/python/test_return.py +++ b/tests/python/test_return.py @@ -165,7 +165,7 @@ def foo(): foo() -@test_utils.test(exclude=[ti.metal, ti.vulkan]) +@test_utils.test(exclude=[ti.metal, ti.vulkan, ti.gles]) def test_return_uint64(): @ti.kernel def foo() -> ti.u64: @@ -174,7 +174,7 @@ def foo() -> ti.u64: assert (foo() == 2**64 - 1) -@test_utils.test(exclude=[ti.metal, ti.vulkan]) +@test_utils.test(exclude=[ti.metal, ti.vulkan, ti.gles]) def test_return_uint64_vec(): @ti.kernel def foo() -> ti.types.vector(2, ti.u64): diff --git a/tests/python/test_scalar_op.py b/tests/python/test_scalar_op.py index 49fd7b16f8f54..c45e706e888ba 100644 --- a/tests/python/test_scalar_op.py +++ b/tests/python/test_scalar_op.py @@ -124,7 +124,7 @@ def max_i16(a: ti.i16, b: ti.i16) -> ti.i16: assert max_i16(a, b) == max(a, b) -@test_utils.test(exclude=[ti.opengl, ti.cc]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.cc]) def test_32_min_max(): @ti.kernel def min_u32(a: ti.u32, b: ti.u32) -> ti.u32: diff --git a/tests/python/test_struct_for_dynamic.py b/tests/python/test_struct_for_dynamic.py index dd3451ca84304..daf62f0426aa0 100644 --- a/tests/python/test_struct_for_dynamic.py +++ b/tests/python/test_struct_for_dynamic.py @@ -2,7 +2,7 @@ from tests import test_utils -@test_utils.test(exclude=[ti.opengl, ti.cc, ti.vulkan, ti.metal]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.cc, ti.vulkan, ti.metal]) def test_dynamic(): x = ti.field(ti.i32) y = ti.field(ti.i32, shape=()) @@ -23,7 +23,7 @@ def count(): assert y[None] == n // 3 + 1 -@test_utils.test(exclude=[ti.opengl, ti.cc, ti.vulkan, ti.metal]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.cc, ti.vulkan, ti.metal]) def test_dense_dynamic(): n = 128 diff --git a/tests/python/test_torch_ad.py b/tests/python/test_torch_ad.py index b7a6e5cf7c8bb..7b15fbc20983f 100644 --- a/tests/python/test_torch_ad.py +++ b/tests/python/test_torch_ad.py @@ -64,7 +64,7 @@ def backward(ctx, outp_grad): @pytest.mark.skipif(not has_pytorch(), reason='Pytorch not installed.') @pytest.mark.skipif(sys.platform == 'win32', reason='not working on Windows.') # FIXME: crashes at glCreateShader when arch=ti.opengl -@test_utils.test(exclude=ti.opengl) +@test_utils.test(exclude=[ti.opengl, ti.gles]) def test_torch_ad_gpu(): if not torch.cuda.is_available(): return diff --git a/tests/python/test_types.py b/tests/python/test_types.py index 893112ce546c8..6be92acb4d99b 100644 --- a/tests/python/test_types.py +++ b/tests/python/test_types.py @@ -20,13 +20,13 @@ def func(value: dt): @pytest.mark.parametrize('dt', _TI_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11]) def test_type_assign_argument(dt): _test_type_assign_argument(dt) @pytest.mark.parametrize('dt', _TI_64_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11], +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11], require=ti.extension.data64) def test_type_assign_argument64(dt): _test_type_assign_argument(dt) @@ -53,13 +53,13 @@ def func(): @pytest.mark.parametrize('dt', _TI_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11]) def test_type_operator(dt): _test_type_operator(dt) @pytest.mark.parametrize('dt', _TI_64_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11], +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11], require=ti.extension.data64) def test_type_operator64(dt): _test_type_operator(dt) @@ -79,13 +79,13 @@ def func(i: ti.i32, j: ti.i32): @pytest.mark.parametrize('dt', _TI_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11]) def test_type_field(dt): _test_type_field(dt) @pytest.mark.parametrize('dt', _TI_64_TYPES) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11], +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11], require=ti.extension.data64) def test_type_field64(dt): _test_type_field(dt) @@ -122,7 +122,7 @@ def func(): (ti.i32, 32), (ti.u32, 32), ]) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11]) +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11]) def test_overflow(dt, n): _test_overflow(dt, n) @@ -131,7 +131,7 @@ def test_overflow(dt, n): (ti.i64, 64), (ti.u64, 64), ]) -@test_utils.test(exclude=[ti.opengl, ti.vulkan, ti.dx11], +@test_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11], require=ti.extension.data64) def test_overflow64(dt, n): _test_overflow(dt, n) diff --git a/tests/python/test_unary_ops.py b/tests/python/test_unary_ops.py index 7af5fed484c4d..b88cd615a43d9 100644 --- a/tests/python/test_unary_ops.py +++ b/tests/python/test_unary_ops.py @@ -29,8 +29,7 @@ def fill(): else: assert abs(np_op(float(f(i))) - val[i]) < 1e-6 if ti.lang.impl.current_cfg( - ).arch != ti.opengl and ti.lang.impl.current_cfg( - ).arch != ti.vulkan else 1e-5 + ).arch not in (ti.opengl, ti.gles, ti.vulkan) else 1e-5 op_pairs = [ diff --git a/tests/test_utils.py b/tests/test_utils.py index e072431f8d626..f08c61efbb62a 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -11,7 +11,7 @@ import numpy as np import pytest from taichi._lib import core as _ti_core -from taichi.lang import cc, cpu, cuda, dx11, gpu, metal, opengl, vulkan +from taichi.lang import cc, cpu, cuda, dx11, gles, gpu, metal, opengl, vulkan from taichi.lang.misc import is_arch_supported import taichi as ti @@ -216,10 +216,9 @@ def expected_archs(): Returns: List[taichi_python.Arch]: All expected archs on the machine. """ - archs = set([cpu, cuda, metal, vulkan, opengl, cc]) + archs = set([cpu, cuda, metal, vulkan, opengl, cc, gles]) # TODO: now expected_archs is not called per test so we cannot test it - archs = set( - filter(functools.partial(is_arch_supported, use_gles=False), archs)) + archs = set(filter(is_arch_supported, archs)) wanted_archs = os.environ.get('TI_WANTED_ARCHS', '') want_exclude = wanted_archs.startswith('^')