Skip to content

Commit

Permalink
Addressed compilation erros
Browse files Browse the repository at this point in the history
  • Loading branch information
jim19930609 committed Jun 8, 2022
1 parent c12fb2b commit e3f1ab8
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 82 deletions.
3 changes: 2 additions & 1 deletion taichi/llvm/llvm_offline_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,15 @@ struct LlvmOfflineCache {
struct FieldCacheData {
struct SNodeCacheData {
int id;
int type;
SNodeType type;
size_t cell_size_bytes;
size_t chunk_size;

TI_IO_DEF(id, type, cell_size_bytes, chunk_size);
};

int tree_id;
int root_id;
size_t root_size;
std::vector<SNodeCacheData> snode_metas;

Expand Down
152 changes: 71 additions & 81 deletions taichi/llvm/llvm_program.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,103 +160,94 @@ LlvmProgramImpl::clone_struct_compiler_initial_context(

void LlvmProgramImpl::initialize_llvm_runtime_snodes(
const LlvmOfflineCache::FieldCacheData &field_cache_data,
uint64 *result_buffer);
TaichiLLVMContext *tlctx = nullptr;
if (config->arch == Arch::cuda) {
uint64 *result_buffer) {
TaichiLLVMContext *tlctx = nullptr;
if (config->arch == Arch::cuda) {
#if defined(TI_WITH_CUDA)
tlctx = llvm_context_device_.get();
tlctx = llvm_context_device_.get();
#else
TI_NOT_IMPLEMENTED
TI_NOT_IMPLEMENTED
#endif
} else {
tlctx = llvm_context_host_.get();
}
} else {
tlctx = llvm_context_host_.get();
}

auto *const runtime_jit = tlctx -> runtime_jit_module;
// By the time this creator is called, "this" is already destroyed.
// Therefore it is necessary to capture members by values.
size_t root_size = field_cache_data.root_size;
const auto snode_metas = field_cache_data.snode_metas;
const int root_id = field_cache_data.tree_id;

TI_TRACE("Allocating data structure of size {} bytes", root_size);
std::size_t rounded_size = taichi::iroundup(root_size, taichi_page_size);

Ptr root_buffer = snode_tree_buffer_manager_->allocate(runtime_jit,
llvm_runtime_,
rounded_size,
taichi_page_size,
root_id,
result_buffer);
if (config->arch == Arch::cuda) {
auto *const runtime_jit = tlctx->runtime_jit_module;
// By the time this creator is called, "this" is already destroyed.
// Therefore it is necessary to capture members by values.
size_t root_size = field_cache_data.root_size;
const auto snode_metas = field_cache_data.snode_metas;
const int tree_id = field_cache_data.tree_id;
const int root_id = field_cache_data.root_id;

TI_TRACE("Allocating data structure of size {} bytes", root_size);
std::size_t rounded_size = taichi::iroundup(root_size, taichi_page_size);

Ptr root_buffer = snode_tree_buffer_manager_->allocate(
runtime_jit, llvm_runtime_, rounded_size, taichi_page_size, tree_id,
result_buffer);
if (config->arch == Arch::cuda) {
#if defined(TI_WITH_CUDA)
CUDADriver::get_instance().memset(root_buffer, 0, rounded_size);
CUDADriver::get_instance().memset(root_buffer, 0, rounded_size);
#else
TI_NOT_IMPLEMENTED
TI_NOT_IMPLEMENTED
#endif
} else {
std::memset(root_buffer, 0, rounded_size);
}
} else {
std::memset(root_buffer, 0, rounded_size);
}

DeviceAllocation alloc{kDeviceNullAllocation};
DeviceAllocation alloc{kDeviceNullAllocation};

if (config->arch == Arch::cuda) {
if (config->arch == Arch::cuda) {
#if defined(TI_WITH_CUDA)
alloc = cuda_device()->import_memory(root_buffer, rounded_size);
alloc = cuda_device()->import_memory(root_buffer, rounded_size);
#else
TI_NOT_IMPLEMENTED
TI_NOT_IMPLEMENTED
#endif
} else {
alloc = cpu_device()->import_memory(root_buffer, rounded_size);
}
} else {
alloc = cpu_device()->import_memory(root_buffer, rounded_size);
}

snode_tree_allocs_[tree->id()] = alloc;
snode_tree_allocs_[tree_id] = alloc;

bool all_dense = config->demote_dense_struct_fors;
for (size_t i = 0; i < snode_metas.size(); i++) {
if (snode_metas[i]->type != SNodeType::dense &&
snode_metas[i]->type != SNodeType::place &&
snode_metas[i]->type != SNodeType::root) {
all_dense = false;
break;
bool all_dense = config->demote_dense_struct_fors;
for (size_t i = 0; i < snode_metas.size(); i++) {
if (snode_metas[i].type != SNodeType::dense &&
snode_metas[i].type != SNodeType::place &&
snode_metas[i].type != SNodeType::root) {
all_dense = false;
break;
}
}
}

runtime_jit->call<void *, std::size_t, int, int, int, std::size_t, Ptr>(
"runtime_initialize_snodes",
llvm_runtime_,
root_size,
root_id,
(int)snode_metas.size(),
root_id,
rounded_size,
root_buffer,
all_dense);

for (size_t i = 0; i < snode_metas.size(); i++) {
if (is_gc_able(snode_metas[i]->type)) {
const auto snode_id = snode_metas[i].id;
std::size_t node_size;
auto element_size = snode_metas[i].cell_size_bytes;
if (snode_metas[i].type == SNodeType::pointer) {
// pointer. Allocators are for single elements
node_size = element_size;
} else {
// dynamic. Allocators are for the chunks
node_size = sizeof(void *) + element_size * snode_metas[i].chunk_size;
runtime_jit->call<void *, std::size_t, int, int, int, std::size_t, Ptr>(
"runtime_initialize_snodes", llvm_runtime_, root_size, root_id,
(int)snode_metas.size(), tree_id, rounded_size, root_buffer, all_dense);

for (size_t i = 0; i < snode_metas.size(); i++) {
if (is_gc_able(snode_metas[i].type)) {
const auto snode_id = snode_metas[i].id;
std::size_t node_size;
auto element_size = snode_metas[i].cell_size_bytes;
if (snode_metas[i].type == SNodeType::pointer) {
// pointer. Allocators are for single elements
node_size = element_size;
} else {
// dynamic. Allocators are for the chunks
node_size = sizeof(void *) + element_size * snode_metas[i].chunk_size;
}
TI_TRACE("Initializing allocator for snode {} (node size {})", snode_id,
node_size);
auto rt = llvm_runtime_;
runtime_jit->call<void *, int, std::size_t>(
"runtime_NodeAllocator_initialize", rt, snode_id, node_size);
TI_TRACE("Allocating ambient element for snode {} (node size {})",
snode_id, node_size);
runtime_jit->call<void *, int>("runtime_allocate_ambient", rt, snode_id,
node_size);
}
TI_TRACE("Initializing allocator for snode {} (node size {})", snode_id,
node_size);
auto rt = llvm_runtime_;
runtime_jit->call<void *, int, std::size_t>(
"runtime_NodeAllocator_initialize", rt, snode_id, node_size);
TI_TRACE("Allocating ambient element for snode {} (node size {})", snode_id,
node_size);
runtime_jit->call<void *, int>("runtime_allocate_ambient", rt, snode_id,
node_size);
}
}
}

std::unique_ptr<StructCompiler> LlvmProgramImpl::compile_snode_tree_types_impl(
SNodeTree *tree) {
Expand Down Expand Up @@ -288,10 +279,9 @@ void LlvmProgramImpl::compile_snode_tree_types(SNodeTree *tree) {
static LlvmOfflineCache::FieldCacheData construct_filed_cache_data(
const SNodeTree &tree,
const StructCompiler &struct_compiler) {
TI_ASSERT(tree.id == tree.root()->id);

LlvmOfflineCache::FieldCacheData ret;
ret.tree_id = tree.id;
ret.tree_id = tree.id();
ret.root_id = tree.root()->id;
ret.root_size = struct_compiler.root_size;

const auto &snodes = struct_compiler.snodes;
Expand Down

0 comments on commit e3f1ab8

Please sign in to comment.