Skip to content

Commit

Permalink
Merge branch 'master' into types-ndarray
Browse files Browse the repository at this point in the history
  • Loading branch information
strongoier authored Mar 22, 2022
2 parents e0fb40e + 3ab838d commit 33c332e
Show file tree
Hide file tree
Showing 39 changed files with 378 additions and 158 deletions.
33 changes: 33 additions & 0 deletions ci/scripts/release_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,36 @@ function taichi::test::voxel_editor {
cd "${WORKDIR}"
}

function taichi::test::generate_videos {
local WORKDIR=${1}
local PATTERN="test_*.py"
local ORG="taichi-dev"
local REPO="taichi"

# divider
taichi::utils::line
taichi::utils::logger::info "Generating examples videos"

# clone the repo
taichi::utils::git_clone "${ORG}" "${REPO}"
# mkdir "${REPO}/misc/output_videos"

# run tests
cd "${REPO}/tests/python/examples"
for directory in $(find ./ -mindepth 1 -maxdepth 1 -name "*" ! -name "__*" -type d); do
cd "${directory}"
for match in $(find ./ -maxdepth 1 -name "${PATTERN}" -type f); do
pytest -v "${match}"
taichi::utils::line
# taichi::utils::pause
done
cd ..
done

# go back to workdir
cd "${WORKDIR}"
}

function taichi::test::main {
# set debugging flag
DEBUG="false"
Expand Down Expand Up @@ -267,6 +297,9 @@ function taichi::test::main {

# voxel editor tests
taichi::test::voxel_editor "${WORKDIR}"

# generating example videos
taichi::test::generate_videos "${WORKDIR}"
}

taichi::test::main
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ Taichi partially supports list comprehension and dictionary comprehension,
but does not support set comprehension.

For list comprehensions and dictionary comprehensions, the `if`s and `for`s in them are evaluated at compile time.
The iterators and conditions are implicitly in [static scope](/lang/articles/advanced/meta#static-scope).
The iterators and conditions are implicitly in [static scope](/lang/articles/advanced/meta.md#static-scope).

### Operator `is`

Expand Down
22 changes: 18 additions & 4 deletions python/taichi/_kernels.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,21 @@ def ndarray_to_ext_arr(ndarray: ndarray_type.ndarray(),
@kernel
def ndarray_matrix_to_ext_arr(ndarray: ndarray_type.ndarray(),
arr: ndarray_type.ndarray(),
layout_is_aos: template(),
as_vector: template()):
for I in grouped(ndarray):
for p in static(range(ndarray[I].n)):
for q in static(range(ndarray[I].m)):
if static(as_vector):
arr[I, p] = ndarray[I][p]
if static(layout_is_aos):
arr[I, p] = ndarray[I][p]
else:
arr[p, I] = ndarray[I][p]
else:
arr[I, p, q] = ndarray[I][p, q]
if static(layout_is_aos):
arr[I, p, q] = ndarray[I][p, q]
else:
arr[p, q, I] = ndarray[I][p, q]


@kernel
Expand Down Expand Up @@ -130,14 +137,21 @@ def ext_arr_to_ndarray(arr: ndarray_type.ndarray(),
@kernel
def ext_arr_to_ndarray_matrix(arr: ndarray_type.ndarray(),
ndarray: ndarray_type.ndarray(),
layout_is_aos: template(),
as_vector: template()):
for I in grouped(ndarray):
for p in static(range(ndarray[I].n)):
for q in static(range(ndarray[I].m)):
if static(as_vector):
ndarray[I][p] = arr[I, p]
if static(layout_is_aos):
ndarray[I][p] = arr[I, p]
else:
ndarray[I][p] = arr[p, I]
else:
ndarray[I][p, q] = arr[I, p, q]
if static(layout_is_aos):
ndarray[I][p, q] = arr[I, p, q]
else:
ndarray[I][p, q] = arr[p, q, I]


@kernel
Expand Down
2 changes: 2 additions & 0 deletions python/taichi/_version_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ def try_check_version():
cur_date)
else:
cur_uuid = str(uuid.uuid4())
write_version_info({'status': 0}, cur_uuid, version_info_path,
cur_date)
response = check_version(cur_uuid)
write_version_info(response, cur_uuid, version_info_path, cur_date)
# Wildcard exception to catch potential file writing errors.
Expand Down
10 changes: 6 additions & 4 deletions python/taichi/lang/_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def _ndarray_to_numpy(self):
impl.get_runtime().sync()
return arr

def _ndarray_matrix_to_numpy(self, as_vector):
def _ndarray_matrix_to_numpy(self, layout, as_vector):
"""Converts matrix ndarray to a numpy array.
Returns:
Expand All @@ -99,7 +99,8 @@ def _ndarray_matrix_to_numpy(self, as_vector):
arr = np.zeros(shape=self.arr.shape, dtype=to_numpy_type(self.dtype))
from taichi._kernels import \
ndarray_matrix_to_ext_arr # pylint: disable=C0415
ndarray_matrix_to_ext_arr(self, arr, as_vector)
layout_is_aos = 1 if layout == Layout.AOS else 0
ndarray_matrix_to_ext_arr(self, arr, layout_is_aos, as_vector)
impl.get_runtime().sync()
return arr

Expand All @@ -122,7 +123,7 @@ def _ndarray_from_numpy(self, arr):
ext_arr_to_ndarray(arr, self)
impl.get_runtime().sync()

def _ndarray_matrix_from_numpy(self, arr, as_vector):
def _ndarray_matrix_from_numpy(self, arr, layout, as_vector):
"""Loads all values from a numpy array.
Args:
Expand All @@ -139,7 +140,8 @@ def _ndarray_matrix_from_numpy(self, arr, as_vector):

from taichi._kernels import \
ext_arr_to_ndarray_matrix # pylint: disable=C0415
ext_arr_to_ndarray_matrix(arr, self, as_vector)
layout_is_aos = 1 if layout == Layout.AOS else 0
ext_arr_to_ndarray_matrix(arr, self, layout_is_aos, as_vector)
impl.get_runtime().sync()

@python_scope
Expand Down
8 changes: 4 additions & 4 deletions python/taichi/lang/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -1416,11 +1416,11 @@ def __getitem__(self, key):

@python_scope
def to_numpy(self):
return self._ndarray_matrix_to_numpy(as_vector=0)
return self._ndarray_matrix_to_numpy(self.layout, as_vector=0)

@python_scope
def from_numpy(self, arr):
self._ndarray_matrix_from_numpy(arr, as_vector=0)
self._ndarray_matrix_from_numpy(arr, self.layout, as_vector=0)

def __deepcopy__(self, memo=None):
ret_arr = MatrixNdarray(self.n, self.m, self.dtype, self.shape,
Expand Down Expand Up @@ -1474,11 +1474,11 @@ def __getitem__(self, key):

@python_scope
def to_numpy(self):
return self._ndarray_matrix_to_numpy(as_vector=1)
return self._ndarray_matrix_to_numpy(self.layout, as_vector=1)

@python_scope
def from_numpy(self, arr):
self._ndarray_matrix_from_numpy(arr, as_vector=1)
self._ndarray_matrix_from_numpy(arr, self.layout, as_vector=1)

def __deepcopy__(self, memo=None):
ret_arr = VectorNdarray(self.n, self.dtype, self.shape, self.layout)
Expand Down
32 changes: 3 additions & 29 deletions taichi/backends/vulkan/aot_module_builder_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,12 @@ class AotDataConverter {
for (const auto &arg : in.ctx_attribs.args()) {
if (!arg.is_array) {
aot::ScalarArg scalar_arg{};
scalar_arg.dtype_name = arg.dt.to_string();
scalar_arg.dtype_name = PrimitiveType::get(arg.dtype).to_string();
scalar_arg.offset_in_args_buf = arg.offset_in_mem;
res.scalar_args[arg.index] = scalar_arg;
} else {
aot::ArrayArg arr_arg{};
arr_arg.dtype_name = arg.dt.to_string();
arr_arg.dtype_name = PrimitiveType::get(arg.dtype).to_string();
arr_arg.field_dim = arg.field_dim;
arr_arg.element_shape = arg.element_shape;
arr_arg.shape_offset_in_args_buf = arg.index * sizeof(int32_t);
Expand Down Expand Up @@ -105,32 +105,6 @@ AotModuleBuilderImpl::AotModuleBuilderImpl(
}
}

uint32_t AotModuleBuilderImpl::to_vk_dtype_enum(DataType dt) {
if (dt == PrimitiveType::u64) {
return 0;
} else if (dt == PrimitiveType::i64) {
return 1;
} else if (dt == PrimitiveType::u32) {
return 2;
} else if (dt == PrimitiveType::i32) {
return 3;
} else if (dt == PrimitiveType::u16) {
return 4;
} else if (dt == PrimitiveType::i16) {
return 5;
} else if (dt == PrimitiveType::u8) {
return 6;
} else if (dt == PrimitiveType::i8) {
return 7;
} else if (dt == PrimitiveType::f64) {
return 8;
} else if (dt == PrimitiveType::f32) {
return 9;
} else {
TI_NOT_IMPLEMENTED
}
}

std::string AotModuleBuilderImpl::write_spv_file(
const std::string &output_dir,
const TaskAttributes &k,
Expand Down Expand Up @@ -194,7 +168,7 @@ void AotModuleBuilderImpl::add_field_per_backend(const std::string &identifier,
aot::CompiledFieldData field_data;
field_data.field_name = identifier;
field_data.is_scalar = is_scalar;
field_data.dtype = to_vk_dtype_enum(dt);
field_data.dtype = static_cast<int>(dt->cast<PrimitiveType>()->type);
field_data.dtype_name = dt.to_string();
field_data.shape = shape;
field_data.mem_offset_in_parent = dense_desc.mem_offset_in_parent_cell;
Expand Down
2 changes: 0 additions & 2 deletions taichi/backends/vulkan/aot_module_builder_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,6 @@ class AotModuleBuilderImpl : public AotModuleBuilder {
const TaskAttributes &k,
const std::vector<uint32_t> &source_code) const;

uint32_t to_vk_dtype_enum(DataType dt);

const std::vector<CompiledSNodeStructs> &compiled_structs_;
TaichiAotData ti_aot_data_;
std::unique_ptr<Device> aot_target_device_;
Expand Down
5 changes: 3 additions & 2 deletions taichi/backends/vulkan/aot_module_loader_impl.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#pragma once

#include <any>
#include <string>
#include <vector>

Expand All @@ -15,12 +16,12 @@ namespace vulkan {

class VkRuntime;

struct AotModuleParams {
struct TI_DLL_EXPORT AotModuleParams {
std::string module_path;
VkRuntime *runtime{nullptr};
};

std::unique_ptr<aot::Module> make_aot_module(std::any mod_params);
TI_DLL_EXPORT std::unique_ptr<aot::Module> make_aot_module(std::any mod_params);

} // namespace vulkan
} // namespace lang
Expand Down
22 changes: 11 additions & 11 deletions taichi/backends/vulkan/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,16 +67,15 @@ class HostDeviceContextBlitter {
char *const device_base =
reinterpret_cast<char *>(device_->map(*device_args_buffer_));

#define TO_DEVICE(short_type, type) \
if (dt->is_primitive(PrimitiveTypeID::short_type)) { \
auto d = host_ctx_->get_arg<type>(i); \
reinterpret_cast<type *>(device_ptr)[0] = d; \
break; \
#define TO_DEVICE(short_type, type) \
if (arg.dtype == PrimitiveTypeID::short_type) { \
auto d = host_ctx_->get_arg<type>(i); \
reinterpret_cast<type *>(device_ptr)[0] = d; \
break; \
}

for (int i = 0; i < ctx_attribs_->args().size(); ++i) {
const auto &arg = ctx_attribs_->args()[i];
const auto dt = arg.dt;
char *device_ptr = device_base + arg.offset_in_mem;
do {
if (arg.is_array) {
Expand Down Expand Up @@ -118,13 +117,14 @@ class HostDeviceContextBlitter {
TO_DEVICE(f64, float64)
}
if (device_->get_cap(DeviceCapability::spirv_has_float16)) {
if (dt->is_primitive(PrimitiveTypeID::f16)) {
if (arg.dtype == PrimitiveTypeID::f16) {
auto d = fp16_ieee_from_fp32_value(host_ctx_->get_arg<float>(i));
reinterpret_cast<uint16 *>(device_ptr)[0] = d;
break;
}
}
TI_ERROR("Vulkan does not support arg type={}", data_type_name(arg.dt));
TI_ERROR("Vulkan does not support arg type={}",
PrimitiveType::get(arg.dtype).to_string());
} while (0);
}

Expand Down Expand Up @@ -196,8 +196,8 @@ class HostDeviceContextBlitter {
// *arg* on the host context.
const auto &ret = ctx_attribs_->rets()[i];
char *device_ptr = device_base + ret.offset_in_mem;
const auto dt = ret.dt;
const auto num = ret.stride / data_type_size(ret.dt);
const auto dt = PrimitiveType::get(ret.dtype);
const auto num = ret.stride / data_type_size(dt);
for (int j = 0; j < num; ++j) {
if (device_->get_cap(DeviceCapability::spirv_has_int8)) {
TO_HOST(i8, int8, j)
Expand Down Expand Up @@ -227,7 +227,7 @@ class HostDeviceContextBlitter {
}
}
TI_ERROR("Vulkan does not support return value type={}",
data_type_name(ret.dt));
data_type_name(PrimitiveType::get(ret.dtype)));
}
}
#undef TO_HOST
Expand Down
25 changes: 16 additions & 9 deletions taichi/codegen/spirv/kernel_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,13 @@ KernelContextAttributes::KernelContextAttributes(const Kernel &kernel)
rets_bytes_(0),
extra_args_bytes_(RuntimeContext::extra_args_size) {
arg_attribs_vec_.reserve(kernel.args.size());
// TODO: We should be able to limit Kernel args and rets to be primitive types
// as well but let's leave that as a followup up PR.
for (const auto &ka : kernel.args) {
ArgAttributes aa;
aa.dt = ka.dt;
const size_t dt_bytes = data_type_size(aa.dt);
TI_ASSERT(ka.dt->is<PrimitiveType>());
aa.dtype = ka.dt->cast<PrimitiveType>()->type;
const size_t dt_bytes = data_type_size(ka.dt);
aa.is_array = ka.is_array;
if (aa.is_array) {
aa.field_dim = ka.total_dim - ka.element_shape.size();
Expand All @@ -70,13 +73,16 @@ KernelContextAttributes::KernelContextAttributes(const Kernel &kernel)
RetAttributes ra;
size_t dt_bytes{0};
if (auto tensor_type = kr.dt->cast<TensorType>()) {
ra.dt = tensor_type->get_element_type();
dt_bytes = data_type_size(ra.dt);
auto tensor_dtype = tensor_type->get_element_type();
TI_ASSERT(tensor_dtype->is<PrimitiveType>());
ra.dtype = tensor_dtype->cast<PrimitiveType>()->type;
dt_bytes = data_type_size(tensor_dtype);
ra.is_array = true;
ra.stride = tensor_type->get_num_elements() * dt_bytes;
} else {
ra.dt = kr.dt;
dt_bytes = data_type_size(ra.dt);
TI_ASSERT(kr.dt->is<PrimitiveType>());
ra.dtype = kr.dt->cast<PrimitiveType>()->type;
dt_bytes = data_type_size(kr.dt);
ra.is_array = false;
ra.stride = dt_bytes;
}
Expand All @@ -88,9 +94,10 @@ KernelContextAttributes::KernelContextAttributes(const Kernel &kernel)
size_t bytes = offset;
for (int i = 0; i < vec->size(); ++i) {
auto &attribs = (*vec)[i];
const size_t dt_bytes = (attribs.is_array && !is_ret)
? sizeof(uint64_t)
: data_type_size(attribs.dt);
const size_t dt_bytes =
(attribs.is_array && !is_ret)
? sizeof(uint64_t)
: data_type_size(PrimitiveType::get(attribs.dtype));
// Align bytes to the nearest multiple of dt_bytes
bytes = (bytes + dt_bytes - 1) / dt_bytes * dt_bytes;
attribs.offset_in_mem = bytes;
Expand Down
10 changes: 8 additions & 2 deletions taichi/codegen/spirv/kernel_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,18 @@ class KernelContextAttributes {
size_t offset_in_mem{0};
// Index of the input arg or the return value in the host `Context`
int index{-1};
DataType dt;
PrimitiveTypeID dtype{PrimitiveTypeID::unknown};
bool is_array{false};
std::vector<int> element_shape;
std::size_t field_dim{0};

TI_IO_DEF(stride, offset_in_mem, index, is_array, element_shape, field_dim);
TI_IO_DEF(stride,
offset_in_mem,
index,
dtype,
is_array,
element_shape,
field_dim);
};

public:
Expand Down
Loading

0 comments on commit 33c332e

Please sign in to comment.