Skip to content

Commit

Permalink
merge changelog
Browse files Browse the repository at this point in the history
  • Loading branch information
ChuckHastings committed Jul 19, 2019
2 parents 60d8bf0 + b9679ee commit ba9ef44
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 33 deletions.
6 changes: 4 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
- PR #353 Change snmg python wrapper in accordance to cpp api
- PR #362 Restructured python/cython directories and files.
- PR #365 Updates for setting device and vertex ids for snmg pagerank
- PR #383 Exposed MG pagerank solver parameters

## Bug Fixes
- PR #368 Bump cudf dependency versions for cugraph conda packages
Expand All @@ -19,11 +20,12 @@
- PR #364 Fixed bug building or installing cugraph when conda isn't installed
- PR #375 Added a function to initialize gdf columns in cugraph #375
- PR #378 cugraph was unable to import device_of_gpu_pointer
- PR #384 Fixed bug in snmg coo2csr causing error in dask-cugraph tests.
- PR #382 Disabled vertex id check to allow Azure deployment
- PR #395 run omp_ge_num_threads in a parallel context



# cuGraph 0.8.0 (Date TBD)
# cuGraph 0.8.0 (27 June 2019)

## New Features
- PR #287 SNMG power iteration step1
Expand Down
59 changes: 33 additions & 26 deletions cpp/src/snmg/COO2CSR/COO2CSR.cu
Original file line number Diff line number Diff line change
Expand Up @@ -290,20 +290,28 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,

#pragma omp barrier

int myRowCount = 0;
for (int j = 0; j < p; j++){
idx_t* otherRowCounts = comm->rowCounts + (j * p);
myRowCount += otherRowCounts[i];
}

// Each thread allocates space to receive their rows from others
idx_t *cooRowNew, *cooColNew;
val_t *cooValNew;
ALLOC_TRY(&cooRowNew, sizeof(idx_t) * myEdgeCount, nullptr);
ALLOC_TRY(&cooColNew, sizeof(idx_t) * myEdgeCount, nullptr);
ALLOC_TRY(&cooRowNew, sizeof(idx_t) * myRowCount, nullptr);
ALLOC_TRY(&cooColNew, sizeof(idx_t) * myRowCount, nullptr);
if (cooValTemp != nullptr) {
ALLOC_TRY(&cooValNew, sizeof(val_t) * myEdgeCount, nullptr);
ALLOC_TRY(&cooValNew, sizeof(val_t) * myRowCount, nullptr);
}
else {
cooValNew = nullptr;
}
comm->rowPtrs[i] = cooRowNew;
comm->colPtrs[i] = cooColNew;
comm->valPtrs[i] = cooValNew;
cudaCheckError();
cudaDeviceSynchronize();
#pragma omp barrier

// Each thread copies the rows needed by other threads to them
Expand All @@ -314,22 +322,22 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,
idx_t* prevRowCounts = comm->rowCounts + (prev * p);
offset += prevRowCounts[other];
}
cudaMemcpyPeer(comm->rowPtrs[other] + offset,
other,
cooRowTemp + positions[other],
i,
rowCount * sizeof(idx_t));
cudaMemcpyPeer(comm->colPtrs[other] + offset,
other,
cooColTemp + positions[other],
i,
rowCount * sizeof(idx_t));
if (cooValTemp != nullptr) {
cudaMemcpyPeer(comm->valPtrs[other] + offset,
other,
cooValTemp + positions[other],
i,
rowCount * sizeof(idx_t));

if (rowCount > 0) {
cudaMemcpy(comm->rowPtrs[other] + offset,
cooRowTemp + positions[other],
rowCount * sizeof(idx_t),
cudaMemcpyDefault);
cudaMemcpy(comm->colPtrs[other] + offset,
cooColTemp + positions[other],
rowCount * sizeof(idx_t),
cudaMemcpyDefault);
if (cooValTemp != nullptr) {
cudaMemcpy(comm->valPtrs[other],
cooValTemp + positions[other],
rowCount * sizeof(idx_t),
cudaMemcpyDefault);
}
}
}
cudaCheckError();
Expand All @@ -353,7 +361,7 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,
idx_t myOffset = part_offsets[i];
thrust::transform(rmm::exec_policy(nullptr)->on(nullptr),
cooRowNew,
cooRowNew + myEdgeCount,
cooRowNew + myRowCount,
thrust::make_constant_iterator(myOffset * -1),
cooRowNew,
thrust::plus<idx_t>());
Expand All @@ -363,7 +371,7 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,
auto zippy = thrust::make_zip_iterator(thrust::make_tuple(cooRowNew, cooColNew));
thrust::sort_by_key(rmm::exec_policy(nullptr)->on(nullptr),
zippy,
zippy + myEdgeCount,
zippy + myRowCount,
cooValNew);
}
else {
Expand All @@ -389,15 +397,15 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,
unique,
counts,
runcount,
myEdgeCount);
myRowCount);
ALLOC_TRY(&tmpStorage, tmpBytes, nullptr);
cub::DeviceRunLengthEncode::Encode(tmpStorage,
tmpBytes,
cooRowNew,
unique,
counts,
runcount,
myEdgeCount);
myRowCount);
ALLOC_FREE_TRY(tmpStorage, nullptr);

cudaDeviceSynchronize();
Expand Down Expand Up @@ -426,15 +434,14 @@ gdf_error snmg_coo2csr_impl(size_t* part_offsets,
csrOff->dtype = cooRow->dtype;
csrOff->size = localMaxId + 2;
csrOff->data = offsets;

cugraph::gdf_col_set_defaults(csrInd);
csrInd->dtype = cooRow->dtype;
csrInd->size = myEdgeCount;
csrInd->size = myRowCount;
csrInd->data = cooColNew;
if (cooValNew != nullptr) {
cugraph::gdf_col_set_defaults(cooVal);
csrVal->dtype = cooVal->dtype;
csrVal->size = myEdgeCount;
csrVal->size = myRowCount;
csrVal->data = cooValNew;
}
#pragma omp barrier
Expand Down
2 changes: 2 additions & 0 deletions cpp/src/utilities/validation.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ namespace cugraph {
// Function for checking 0-based indexing
template <typename T>
gdf_error indexing_check (T* srcs, T* dests, int64_t nnz) {
#if 0
cudaStream_t stream {nullptr};

// min from srcs
Expand Down Expand Up @@ -58,6 +59,7 @@ gdf_error indexing_check (T* srcs, T* dests, int64_t nnz) {
std::cerr<< "If this is not intended, please refer to ";
std::cerr<< "cuGraph renumbering feature." << std::endl;
}
#endif
return GDF_SUCCESS;
}
} //namespace cugraph
10 changes: 8 additions & 2 deletions python/cugraph/snmg/link_analysis/mg_pagerank.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,13 @@
from cugraph.snmg.link_analysis import mg_pagerank_wrapper


def mg_pagerank(src_ptrs_info, dest_ptrs_info):
df = mg_pagerank_wrapper.mg_pagerank(src_ptrs_info, dest_ptrs_info)
def mg_pagerank(src_ptrs_info,
dest_ptrs_info,
alpha=0.85,
max_iter=30):
df = mg_pagerank_wrapper.mg_pagerank(src_ptrs_info,
dest_ptrs_info,
alpha,
max_iter)

return df
9 changes: 6 additions & 3 deletions python/cugraph/snmg/link_analysis/mg_pagerank_wrapper.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ from librmm_cffi import librmm as rmm
import numpy as np


def mg_pagerank(src_ptrs_info, dest_ptrs_info):
def mg_pagerank(src_ptrs_info,
dest_ptrs_info,
alpha=0.85,
max_iter=30,):
cdef gdf_column** src_column_ptr = <gdf_column**>malloc(len(src_ptrs_info) * sizeof(gdf_column*))
cdef gdf_column** dest_column_ptr = <gdf_column**>malloc(len(dest_ptrs_info) * sizeof(gdf_column*))

Expand All @@ -42,8 +45,8 @@ def mg_pagerank(src_ptrs_info, dest_ptrs_info):
<gdf_column**> dest_column_ptr,
<gdf_column*> pr_ptr,
<const size_t>n_gpus,
<float> 0.85,#damping_factor,
<int> 10 #max_iter
<float> alpha,
<int> max_iter
)

data = rmm.device_array_from_ptr(<uintptr_t> pr_ptr.data,
Expand Down

0 comments on commit ba9ef44

Please sign in to comment.