Skip to content

Commit

Permalink
[refactor] Update Ndarray constructor used in AOT runtime.
Browse files Browse the repository at this point in the history
This constructor is mainly used to construct an Ndarray out of an
existing device allocation. This PR updates the behavior of this
constructor to seprate element_shape out of shape.
  • Loading branch information
Ailing Zhang committed Jun 6, 2022
1 parent e1ae06e commit 094d9b3
Show file tree
Hide file tree
Showing 3 changed files with 103 additions and 4 deletions.
26 changes: 24 additions & 2 deletions taichi/program/ndarray.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,37 @@ Ndarray::Ndarray(Program *prog,

Ndarray::Ndarray(DeviceAllocation &devalloc,
const DataType type,
const std::vector<int> &shape)
const std::vector<int> &shape,
const std::vector<int> &element_shape,
ExternalArrayLayout layout)
: ndarray_alloc_(devalloc),
dtype(type),
element_shape(element_shape),
shape(shape),
layout(layout),
nelement_(std::accumulate(std::begin(shape),
std::end(shape),
1,
std::multiplies<>())),
element_size_(data_type_size(dtype)) {
element_size_(data_type_size(dtype) *
std::accumulate(std::begin(element_shape),
std::end(element_shape),
1,
std::multiplies<>())) {
// When element_shape is specfied but layout is not, default layout is AOS.
if (!element_shape.empty() && layout == ExternalArrayLayout::kNull) {
layout = ExternalArrayLayout::kAOS;
}
// Now that we have two shapes which may be concatenated differently
// depending on layout, total_shape_ comes handy.
total_shape_ = shape;
if (layout == ExternalArrayLayout::kAOS) {
total_shape_.insert(total_shape_.end(), element_shape.begin(),
element_shape.end());
} else if (layout == ExternalArrayLayout::kSOA) {
total_shape_.insert(total_shape_.begin(), element_shape.begin(),
element_shape.end());
}
}

Ndarray::~Ndarray() {
Expand Down
9 changes: 7 additions & 2 deletions taichi/program/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,17 @@ class TI_DLL_EXPORT Ndarray {
const std::vector<int> &element_shape = {},
ExternalArrayLayout layout = ExternalArrayLayout::kNull);

/* Constructs a Ndarray from an existing DeviceAllocation
/* Constructs a Ndarray from an existing DeviceAllocation.
* It doesn't handle the allocation and deallocation.
* You can see a Ndarray as a view or interpretation of DeviceAllocation
* with specified element_shape & dtype & layout.
*/
explicit Ndarray(DeviceAllocation &devalloc,
const DataType type,
const std::vector<int> &shape);
const std::vector<int> &shape,
const std::vector<int> &element_shape = {},
ExternalArrayLayout layout = ExternalArrayLayout::kNull);

DeviceAllocation ndarray_alloc_{kDeviceNullAllocation};
DataType dtype;
std::vector<int> element_shape;
Expand Down
72 changes: 72 additions & 0 deletions tests/cpp/aot/runtime_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#include "gtest/gtest.h"
#define TI_RUNTIME_HOST
#include "taichi/common/core.h"
#include "taichi/program/ndarray.h"
#include "taichi/program/context.h"
#include "taichi/system/memory_pool.h"
#include "taichi/runtime/gfx/runtime.h"
#ifdef TI_WITH_VULKAN
#include "taichi/backends/device.h"
#include "taichi/backends/vulkan/vulkan_device.h"
#include "taichi/backends/vulkan/vulkan_device_creator.h"
#include "taichi/backends/vulkan/vulkan_loader.h"
#include "taichi/backends/vulkan/vulkan_utils.h"
#endif

using namespace taichi;
using namespace lang;

#ifdef TI_WITH_VULKAN
TEST(RuntimeTest, ViewDevAllocAsNdarray) {
// Otherwise will segfault on macOS VM,
// where Vulkan is installed but no devices are present
if (!vulkan::is_vulkan_api_available()) {
return;
}

// API based on proposal https://github.com/taichi-dev/taichi/issues/3642
// Initialize Vulkan program
taichi::uint64 *result_buffer{nullptr};
auto memory_pool =
std::make_unique<taichi::lang::MemoryPool>(Arch::vulkan, nullptr);
result_buffer = (taichi::uint64 *)memory_pool->allocate(
sizeof(taichi::uint64) * taichi_result_buffer_entries, 8);

// Create Taichi Device for computation
lang::vulkan::VulkanDeviceCreator::Params evd_params;
evd_params.api_version =
taichi::lang::vulkan::VulkanEnvSettings::kApiVersion();
auto embedded_device =
std::make_unique<taichi::lang::vulkan::VulkanDeviceCreator>(evd_params);
taichi::lang::vulkan::VulkanDevice *device_ =
static_cast<taichi::lang::vulkan::VulkanDevice *>(
embedded_device->device());
// Create Vulkan runtime
gfx::GfxRuntime::Params params;
params.host_result_buffer = result_buffer;
params.device = device_;
auto vulkan_runtime =
std::make_unique<taichi::lang::gfx::GfxRuntime>(std::move(params));

const int size = 40;
taichi::lang::Device::AllocParams alloc_params;
alloc_params.host_write = true;
alloc_params.size = size * sizeof(int);
alloc_params.usage = taichi::lang::AllocUsage::Storage;
DeviceAllocation devalloc_arr_ = device_->allocate_memory(alloc_params);

std::vector<int> element_shape = {4};
auto arr1 = Ndarray(devalloc_arr_, PrimitiveType::i32, {10}, element_shape);
EXPECT_TRUE(arr1.element_shape == element_shape);
EXPECT_EQ(arr1.total_shape()[0], 10);
EXPECT_EQ(arr1.total_shape()[1], 4);

auto arr2 = Ndarray(devalloc_arr_, PrimitiveType::i32, {10}, element_shape,
ExternalArrayLayout::kSOA);
EXPECT_TRUE(arr2.element_shape == element_shape);
EXPECT_EQ(arr2.total_shape()[0], 4);
EXPECT_EQ(arr2.total_shape()[1], 10);

device_->dealloc_memory(devalloc_arr_);
}
#endif

0 comments on commit 094d9b3

Please sign in to comment.