Skip to content

Commit

Permalink
implement MG test
Browse files Browse the repository at this point in the history
  • Loading branch information
ChuckHastings committed Jul 11, 2022
1 parent 43aa009 commit e238939
Show file tree
Hide file tree
Showing 3 changed files with 114 additions and 5 deletions.
1 change: 1 addition & 0 deletions cpp/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,7 @@ if(BUILD_CUGRAPH_MG_TESTS)
ConfigureCTestMG(MG_CAPI_UNIFORM_NEIGHBOR_SAMPLE c_api/mg_uniform_neighbor_sample_test.c c_api/mg_test_utils.cpp)
ConfigureCTestMG(MG_CAPI_TRIANGLE_COUNT c_api/mg_triangle_count_test.c c_api/mg_test_utils.cpp)
ConfigureCTestMG(MG_CAPI_LOUVAIN c_api/mg_louvain_test.c c_api/mg_test_utils.cpp)
ConfigureCTestMG(MG_CAPI_CORE_NUMBER c_api/mg_core_number_test.c c_api/mg_test_utils.cpp)
else()
message(FATAL_ERROR "OpenMPI NOT found, cannot build MG tests.")
endif()
Expand Down
4 changes: 0 additions & 4 deletions cpp/tests/c_api/core_number_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,6 @@ int generic_core_number_test(vertex_t* h_src,
p_handle, (byte_t*)h_core_numbers, core_numbers, &ret_error);
TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed.");

for (int i = 0; i < num_vertices; ++i) {
printf("%d: %d - expected = %d\n", h_vertices[i], h_core_numbers[i], h_result[h_vertices[i]]);
}

for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) {
TEST_ASSERT(test_ret_value,
nearlyEqual(h_result[h_vertices[i]], h_core_numbers[i], 0.001),
Expand Down
114 changes: 113 additions & 1 deletion cpp/tests/c_api/mg_core_number_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,116 @@

typedef int32_t vertex_t;
typedef int32_t edge_t;
typedef float weight_t;
typedef float weight_t;

int generic_core_number_test(const cugraph_resource_handle_t* p_handle,
vertex_t* h_src,
vertex_t* h_dst,
weight_t* h_wgt,
vertex_t* h_result,
size_t num_vertices,
size_t num_edges,
bool_t store_transposed)
{
int test_ret_value = 0;

cugraph_error_code_t ret_code = CUGRAPH_SUCCESS;
cugraph_error_t* ret_error;

cugraph_graph_t* p_graph = NULL;
cugraph_core_result_t* p_result = NULL;

ret_code = create_mg_test_graph(
p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, TRUE, &p_graph, &ret_error);

TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed.");
TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error));

ret_code = cugraph_core_number(p_handle, p_graph, 0, FALSE, &p_result, &ret_error);
TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_core_number failed.");
TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error));

cugraph_type_erased_device_array_view_t* vertices;
cugraph_type_erased_device_array_view_t* core_numbers;

vertices = cugraph_core_result_get_vertices(p_result);
core_numbers = cugraph_core_result_get_core_numbers(p_result);

size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices);

vertex_t h_vertices[num_local_vertices];
vertex_t h_core_numbers[num_local_vertices];

ret_code = cugraph_type_erased_device_array_view_copy_to_host(
p_handle, (byte_t*)h_vertices, vertices, &ret_error);
TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed.");

ret_code = cugraph_type_erased_device_array_view_copy_to_host(
p_handle, (byte_t*)h_core_numbers, core_numbers, &ret_error);
TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed.");

for (int i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) {
TEST_ASSERT(test_ret_value,
nearlyEqual(h_result[h_vertices[i]], h_core_numbers[i], 0.001),
"core number results don't match");
}

cugraph_core_result_free(p_result);
cugraph_sg_graph_free(p_graph);
cugraph_error_free(ret_error);

return test_ret_value;
}

int test_core_number(const cugraph_resource_handle_t* p_handle)
{
size_t num_edges = 22;
size_t num_vertices = 7;

vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5, 3, 1, 4, 5, 5, 6};
vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 3, 1, 6, 5};
weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
vertex_t h_result[] = {2, 3, 2, 3, 3, 3, 1};

return generic_core_number_test(
p_handle, h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE);
}

/******************************************************************************/

int main(int argc, char** argv)
{
// Set up MPI:
int comm_rank;
int comm_size;
int num_gpus_per_node;
cudaError_t status;
int mpi_status;
int result = 0;
cugraph_resource_handle_t* handle = NULL;
cugraph_error_t* ret_error;
cugraph_error_code_t ret_code = CUGRAPH_SUCCESS;
int prows = 1;

C_MPI_TRY(MPI_Init(&argc, &argv));
C_MPI_TRY(MPI_Comm_rank(MPI_COMM_WORLD, &comm_rank));
C_MPI_TRY(MPI_Comm_size(MPI_COMM_WORLD, &comm_size));
C_CUDA_TRY(cudaGetDeviceCount(&num_gpus_per_node));
C_CUDA_TRY(cudaSetDevice(comm_rank % num_gpus_per_node));

void* raft_handle = create_raft_handle(prows);
handle = cugraph_create_resource_handle(raft_handle);

if (result == 0) {
result |= RUN_MG_TEST(test_core_number, handle);

cugraph_free_resource_handle(handle);
}

free_raft_handle(raft_handle);

C_MPI_TRY(MPI_Finalize());

return result;
}

0 comments on commit e238939

Please sign in to comment.