Skip to content

Commit

Permalink
Merge pull request rapidsai#286 from kaatish/fea-analytic-reorg
Browse files Browse the repository at this point in the history
[REVIEW] Reorganized cugraph source directory
  • Loading branch information
BradReesWork authored May 14, 2019
2 parents 6452758 + 82ce0e5 commit f2f6f91
Show file tree
Hide file tree
Showing 31 changed files with 1,080 additions and 952 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

## Improvements
- PR #291 nvGraph is updated to use RMM instead of directly invoking cnmem functions.
- PR #286 Reorganized cugraph source directory


## Bug Fixes
Expand Down
21 changes: 13 additions & 8 deletions cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -229,14 +229,19 @@ link_directories("${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}" # CMAKE_CUDA_IMPLICIT
###################################################################################################
# - library targets -------------------------------------------------------------------------------
add_library(cugraph SHARED
src/grmat.cu
src/cugraph.cu
src/pagerank.cu
src/bfs.cu
src/jaccard.cu
src/overlap.cu
src/nvgraph_gdf.cu
src/two_hop_neighbors.cu
src/utilities/grmat.cu
src/utilities/degree.cu
src/structure/cugraph.cu
src/link_analysis/pagerank.cu
src/traversal/bfs.cu
src/link_prediction/jaccard.cu
src/link_prediction/overlap.cu
src/converters/nvgraph.cu
src/converters/renumber.cu
src/community/nvgraph_gdf.cu
src/traversal/nvgraph_sssp.cu
src/traversal/two_hop_neighbors.cu
src/snmg/blas/spmv.cu
${CMAKE_CURRENT_BINARY_DIR}/gunrock/gunrock/util/test_utils.cu
${CMAKE_CURRENT_BINARY_DIR}/gunrock/gunrock/util/error_utils.cu
${CMAKE_CURRENT_BINARY_DIR}/gunrock/gunrock/util/misc_utils.cu
Expand Down
272 changes: 47 additions & 225 deletions cpp/src/nvgraph_gdf.cu → cpp/src/community/nvgraph_gdf.cu
Original file line number Diff line number Diff line change
Expand Up @@ -21,242 +21,19 @@
* @file nvgraph_gdf.cu
* ---------------------------------------------------------------------------**/

#include <cugraph.h>
#include <nvgraph_gdf.h>
#include <nvgraph/nvgraph.h>
#include <thrust/device_vector.h>
#include <ctime>
#include "utilities/error_utils.h"
#include "converters/nvgraph.cuh"

//RMM:
//

#include <rmm_utils.h>

gdf_error nvgraph2gdf_error(nvgraphStatus_t nvg_stat) {
switch (nvg_stat) {
case NVGRAPH_STATUS_SUCCESS:
return GDF_SUCCESS;
case NVGRAPH_STATUS_NOT_INITIALIZED:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_INVALID_VALUE:
return GDF_INVALID_API_CALL;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
return GDF_UNSUPPORTED_DTYPE;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
return GDF_INVALID_API_CALL;
default:
return GDF_CUDA_ERROR;
}
}

gdf_error nvgraph2gdf_error_verbose(nvgraphStatus_t nvg_stat) {
switch (nvg_stat) {
case NVGRAPH_STATUS_NOT_INITIALIZED:
std::cerr << "nvGRAPH not initialized";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ALLOC_FAILED:
std::cerr << "nvGRAPH alloc failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INVALID_VALUE:
std::cerr << "nvGRAPH invalid value";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_ARCH_MISMATCH:
std::cerr << "nvGRAPH arch mismatch";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_MAPPING_ERROR:
std::cerr << "nvGRAPH mapping error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_EXECUTION_FAILED:
std::cerr << "nvGRAPH execution failed";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_INTERNAL_ERROR:
std::cerr << "nvGRAPH internal error";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH type not supported";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_NOT_CONVERGED:
std::cerr << "nvGRAPH algorithm failed to converge";
return GDF_CUDA_ERROR;
case NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED:
std::cerr << "nvGRAPH graph type not supported";
return GDF_CUDA_ERROR;
default:
std::cerr << "Unknown nvGRAPH Status";
return GDF_CUDA_ERROR;
}
}

#ifdef VERBOSE
#define NVG_TRY(call) \
{ \
if ((call)!=NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error_verbose((call)); \
}
#else
#define NVG_TRY(call) \
{ \
nvgraphStatus_t err_code = (call); \
if (err_code != NVGRAPH_STATUS_SUCCESS) \
return nvgraph2gdf_error(err_code); \
}
#endif

gdf_error gdf_createGraph_nvgraph(nvgraphHandle_t nvg_handle,
gdf_graph* gdf_G,
nvgraphGraphDescr_t* nvg_G,
bool use_transposed) {

// check input
GDF_REQUIRE(!((gdf_G->edgeList == nullptr) &&
(gdf_G->adjList == nullptr) &&
(gdf_G->transposedAdjList == nullptr)),
GDF_INVALID_API_CALL);
nvgraphTopologyType_t TT;
cudaDataType_t settype;
// create an nvgraph graph handle
NVG_TRY(nvgraphCreateGraphDescr(nvg_handle, nvg_G));
// setup nvgraph variables
if (use_transposed) {
// convert edgeList to transposedAdjList
if (gdf_G->transposedAdjList == nullptr) {
GDF_TRY(gdf_add_transposed_adj_list(gdf_G));
}
// using exiting transposedAdjList if it exisits and if adjList is missing
TT = NVGRAPH_CSC_32;
nvgraphCSCTopology32I_st topoData;
topoData.nvertices = gdf_G->transposedAdjList->offsets->size - 1;
topoData.nedges = gdf_G->transposedAdjList->indices->size;
topoData.destination_offsets = (int *) gdf_G->transposedAdjList->offsets->data;
topoData.source_indices = (int *) gdf_G->transposedAdjList->indices->data;
// attach the transposed adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvg_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->transposedAdjList->edge_data) {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvg_G,
0,
settype,
(float * ) gdf_G->transposedAdjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvg_G,
0,
settype,
(double * ) gdf_G->transposedAdjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}

}
else {
// convert edgeList to adjList
if (gdf_G->adjList == nullptr) {
GDF_TRY(gdf_add_adj_list(gdf_G));
}
TT = NVGRAPH_CSR_32;
nvgraphCSRTopology32I_st topoData;
topoData.nvertices = gdf_G->adjList->offsets->size - 1;
topoData.nedges = gdf_G->adjList->indices->size;
topoData.source_offsets = (int *) gdf_G->adjList->offsets->data;
topoData.destination_indices = (int *) gdf_G->adjList->indices->data;

// attach adj list
NVG_TRY(nvgraphAttachGraphStructure(nvg_handle, *nvg_G, (void * )&topoData, TT));
//attach edge values
if (gdf_G->adjList->edge_data) {
switch (gdf_G->adjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvg_G,
0,
settype,
(float * ) gdf_G->adjList->edge_data->data))
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
*nvg_G,
0,
settype,
(double * ) gdf_G->adjList->edge_data->data))
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}
}
return GDF_SUCCESS;
}

gdf_error gdf_sssp_nvgraph(gdf_graph *gdf_G,
const int *source_vert,
gdf_column *sssp_distances) {
GDF_REQUIRE(gdf_G != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert >= 0, GDF_INVALID_API_CALL);
GDF_REQUIRE(*source_vert < sssp_distances->size, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(sssp_distances->data != nullptr, GDF_INVALID_API_CALL);
GDF_REQUIRE(!sssp_distances->valid, GDF_VALIDITY_UNSUPPORTED);
GDF_REQUIRE(sssp_distances->size > 0, GDF_INVALID_API_CALL);

// init nvgraph
// TODO : time this call
nvgraphHandle_t nvg_handle = 0;
nvgraphGraphDescr_t nvgraph_G = 0;
cudaDataType_t settype;

NVG_TRY(nvgraphCreate(&nvg_handle));
GDF_TRY(gdf_createGraph_nvgraph(nvg_handle, gdf_G, &nvgraph_G, true));

int sssp_index = 0;
int weight_index = 0;
rmm::device_vector<float> d_val;

cudaStream_t stream{nullptr};

if (gdf_G->transposedAdjList->edge_data == nullptr) {
// use a fp32 vector [1,...,1]
settype = CUDA_R_32F;
d_val.resize(gdf_G->transposedAdjList->indices->size);
thrust::fill(rmm::exec_policy(stream)->on(stream), d_val.begin(), d_val.end(), 1.0);
NVG_TRY(nvgraphAttachEdgeData(nvg_handle,
nvgraph_G,
weight_index,
settype,
(void * ) thrust::raw_pointer_cast(d_val.data())));
}
else {
switch (gdf_G->transposedAdjList->edge_data->dtype) {
case GDF_FLOAT32:
settype = CUDA_R_32F;
break;
case GDF_FLOAT64:
settype = CUDA_R_64F;
break;
default:
return GDF_UNSUPPORTED_DTYPE;
}
}

NVG_TRY(nvgraphAttachVertexData(nvg_handle, nvgraph_G, 0, settype, sssp_distances->data));

NVG_TRY(nvgraphSssp(nvg_handle, nvgraph_G, weight_index, source_vert, sssp_index));

NVG_TRY(nvgraphDestroyGraphDescr(nvg_handle, nvgraph_G));
NVG_TRY(nvgraphDestroy(nvg_handle));

return GDF_SUCCESS;
}

gdf_error gdf_balancedCutClustering_nvgraph(gdf_graph* gdf_G,
const int num_clusters,
const int num_eigen_vects,
Expand Down Expand Up @@ -592,3 +369,48 @@ gdf_error gdf_triangle_count_nvgraph(gdf_graph* G, uint64_t* result) {
NVG_TRY(nvgraphTriangleCount(nvg_handle, nvg_G, result));
return GDF_SUCCESS;
}

gdf_error gdf_louvain(gdf_graph *graph, void *final_modularity, void *num_level, gdf_column *louvain_parts) {
GDF_REQUIRE(graph->adjList != nullptr || graph->edgeList != nullptr, GDF_INVALID_API_CALL);
gdf_error err = gdf_add_adj_list(graph);
if (err != GDF_SUCCESS)
return err;

size_t n = graph->adjList->offsets->size - 1;
size_t e = graph->adjList->indices->size;

void* offsets_ptr = graph->adjList->offsets->data;
void* indices_ptr = graph->adjList->indices->data;

void* value_ptr;
rmm::device_vector<float> d_values;
if(graph->adjList->edge_data) {
value_ptr = graph->adjList->edge_data->data;
}
else {
cudaStream_t stream {nullptr};
d_values.resize(graph->adjList->indices->size);
thrust::fill(rmm::exec_policy(stream)->on(stream), d_values.begin(), d_values.end(), 1.0);
value_ptr = (void * ) thrust::raw_pointer_cast(d_values.data());
}

void* louvain_parts_ptr = louvain_parts->data;

auto gdf_to_cudadtype= [](gdf_column *col){
cudaDataType_t cuda_dtype;
switch(col->dtype){
case GDF_INT8: cuda_dtype = CUDA_R_8I; break;
case GDF_INT32: cuda_dtype = CUDA_R_32I; break;
case GDF_FLOAT32: cuda_dtype = CUDA_R_32F; break;
case GDF_FLOAT64: cuda_dtype = CUDA_R_64F; break;
default: throw new std::invalid_argument("Cannot convert data type");
}return cuda_dtype;
};

cudaDataType_t index_type = gdf_to_cudadtype(graph->adjList->indices);
cudaDataType_t val_type = graph->adjList->edge_data? gdf_to_cudadtype(graph->adjList->edge_data): CUDA_R_32F;

nvgraphLouvain(index_type, val_type, n, e, offsets_ptr, indices_ptr, value_ptr, 1, 0, NULL,
final_modularity, louvain_parts_ptr, num_level);
return GDF_SUCCESS;
}
File renamed without changes.
Loading

0 comments on commit f2f6f91

Please sign in to comment.