Skip to content

Commit

Permalink
update CMakeLists.txt
Browse files Browse the repository at this point in the history
  • Loading branch information
galeselee committed Dec 23, 2022
1 parent fddddcd commit 56d0168
Show file tree
Hide file tree
Showing 3 changed files with 132 additions and 0 deletions.
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,10 @@ if (TI_WITH_CUDA)
set(CUDA_ARCH "cuda")
endif()

if (TI_WITH_AMDGPU)
set(AMDGPU_ARCH "amdgpu")
endif()

if (TI_WITH_DX12)
set(DX12_ARCH "dx12")
endif()
Expand Down
26 changes: 26 additions & 0 deletions cmake/TaichiCore.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ option(TI_LLVM_15 "Switch to LLVM 15" OFF)
option(TI_WITH_METAL "Build with the Metal backend" ON)
option(TI_WITH_CUDA "Build with the CUDA backend" ON)
option(TI_WITH_CUDA_TOOLKIT "Build with the CUDA toolkit" OFF)
option(TI_WITH_AMDGPU "Build with the AMDGPU backend" OFF)
option(TI_WITH_OPENGL "Build with the OpenGL backend" ON)
option(TI_WITH_CC "Build with the C backend" ON)
option(TI_WITH_VULKAN "Build with the Vulkan backend" OFF)
Expand Down Expand Up @@ -35,6 +36,12 @@ if(ANDROID)
set(TI_WITH_DX12 OFF)
endif()

if(AMDGPU)
set(TI_WITH_LLVM ON)
set(TI_WITH_CUDA OFF)
set(TI_WITH_AMDGPU ON)
endif()

if(UNIX AND NOT APPLE)
# Handy helper for Linux
# https://stackoverflow.com/a/32259072/12003165
Expand All @@ -54,13 +61,21 @@ if (APPLE)
set(TI_WITH_CC OFF)
message(WARNING "C backend not supported on OS X. Setting TI_WITH_CC to OFF.")
endif()
if (TI_WITH_AMDGPU)
set(TI_WITH_AMDGPU OFF)
message(WARNING "AMDGPU backend not supported on OS X. Setting TI_WITH_AMDGPU to OFF.")
endif()
endif()

if (WIN32)
if (TI_WITH_CC)
set(TI_WITH_CC OFF)
message(WARNING "C backend not supported on Windows. Setting TI_WITH_CC to OFF.")
endif()
if (TI_WITH_AMDGPU)
set(TI_WITH_AMDGPU OFF)
message(WARNING "AMDGPU backend not supported on Windows. Setting TI_WITH_AMDGPU to OFF.")
endif()
endif()

if(TI_WITH_VULKAN)
Expand Down Expand Up @@ -226,6 +241,17 @@ if(TI_WITH_LLVM)
target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE cuda_rhi)
endif()

if (TI_WITH_AMDGPU)
llvm_map_components_to_libnames(llvm_amdgpu_libs AMDGPU)
# add_subdirectory(taichi/codegen/amdgpu)
# add_subdirectory(taichi/runtime/amdgpu)
add_subdirectory(taichi/rhi/amdgpu)

# target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE amdgpu_codegen)
# target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE amdgpu_runtime)
target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE amdgpu_rhi)
endif()

if (TI_WITH_DX12)
llvm_map_components_to_libnames(llvm_directx_libs DirectX)

Expand Down
102 changes: 102 additions & 0 deletions tests/cpp/backends/amdgpu_device_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#include "gtest/gtest.h"

#ifdef TI_WITH_AMDGPU
#include "taichi/ir/ir_builder.h"
#include "taichi/rhi/amdgpu/amdgpu_driver.h"
#include "taichi/rhi/amdgpu/amdgpu_context.h"
#include "taichi/rhi/amdgpu/amdgpu_device.h"
#include "tests/cpp/program/test_program.h"

namespace taichi {
namespace lang {
TEST(AMDGPU, CreateDeviceAndAlloc) {
std::unique_ptr<amdgpu::AmdgpuDevice> device =
std::make_unique<amdgpu::AmdgpuDevice>();
EXPECT_TRUE(device != nullptr);
taichi::lang::Device::AllocParams params;
params.size = 1048576;
params.host_read = false;
params.host_write = false;
const taichi::lang::DeviceAllocation device_alloc =
device->allocate_memory(params);

// The purpose of the device_alloc_guard is to rule out double free
const taichi::lang::DeviceAllocationGuard device_alloc_guard(device_alloc);
// Map to CPU, write some values, then check those values
void *mapped = device->map(device_alloc);
int *mapped_int = reinterpret_cast<int *>(mapped);
for (int i = 0; i < 100; i++) {
mapped_int[i] = i;
}
device->unmap(device_alloc);

mapped = device->map(device_alloc);
mapped_int = reinterpret_cast<int *>(mapped);
for (int i = 0; i < 100; i++) {
EXPECT_EQ(mapped_int[i], i);
}
device->unmap(device_alloc);
}

TEST(AMDGPU, ImportMemory) {
std::unique_ptr<amdgpu::AmdgpuDevice> device =
std::make_unique<amdgpu::AmdgpuDevice>();
EXPECT_TRUE(device != nullptr);

int *ptr = nullptr;
AMDGPUDriver::get_instance().malloc_managed((void **)&ptr, 400,
HIP_MEM_ATTACH_GLOBAL);
const taichi::lang::DeviceAllocation device_alloc =
device->import_memory(ptr, 400);

for (int i = 0; i < 100; i++) {
ptr[i] = i;
}

taichi::lang::Device::AllocParams params;
params.size = 400;
params.host_read = false;
params.host_write = false;
const taichi::lang::DeviceAllocation device_dest =
device->allocate_memory(params);
const taichi::lang::DeviceAllocationGuard device_dest_guard(device_dest);

AMDGPUDriver::get_instance().stream_synchronize(nullptr);
device->memcpy_internal(device_dest.get_ptr(0), device_alloc.get_ptr(0), 400);
void *mapped = device->map(device_dest);
int *mapped_int = reinterpret_cast<int *>(mapped);

for (int i = 0; i < 100; i++) {
EXPECT_EQ(mapped_int[i], i);
}
device->unmap(device_dest);
// import memory should been deallocated manually
AMDGPUDriver::get_instance().mem_free(ptr);
}

TEST(AMDGPU, CreateContextAndGetMemInfo) {
auto total_size = AMDGPUContext::get_instance().get_total_memory();
auto free_size = AMDGPUContext::get_instance().get_free_memory();
EXPECT_GE(total_size, free_size);
EXPECT_GE(free_size, 0);
}

TEST(AMDGPU, LaunchKernel) {
// NOT_IMPLEMENTED
// runtime part
// vec kernel
}

TEST(AMDGPU, FetchResult) {
// NOT_IMPLEMENTED
// runtime part
// reduce kernel
}

TEST(AMDGPU, CodeGen) {
// NOT_IMPLEMENTED
}

} // namespace lang
} // namespace taichi
#endif

0 comments on commit 56d0168

Please sign in to comment.