Skip to content

Commit

Permalink
Warnings are errors (#4075)
Browse files Browse the repository at this point in the history
Depends on rapidsai/raft#299
Depends on rapidsai/raft#311

Fixes #4086

This PR enables -Wall (warnings are errors) and fixes all current compiler warnings.

Compiler warnings should almost never be ignored. -Wall helps prevent hard-to-find bugs.

Tested with both Release and Debug builds since more warnings are typically reported in debug builds.

In any file I touched, I also cleaned up the includes so that they are grouped and sorted and ordered from "near" to "far" (relative to the file that is including).

Also cleaned up many instances of `size_t` --> `std::size_t`, however I will leave a global search and replace for a separate PR to make reviewing easier.

Authors:
  - Mark Harris (https://github.com/harrism)

Approvers:
  - Robert Maynard (https://github.com/robertmaynard)
  - William Hicks (https://github.com/wphicks)
  - Dante Gama Dessavre (https://github.com/dantegd)

URL: #4075
  • Loading branch information
harrism authored Aug 10, 2021
1 parent 3b439a0 commit e977f3e
Show file tree
Hide file tree
Showing 62 changed files with 845 additions and 664 deletions.
2 changes: 1 addition & 1 deletion cpp/bench/prims/gram_matrix.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ struct GramMatrix : public Fixture {
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}

~GramMatrix() { CUBLAS_CHECK(cublasDestroy(cublas_handle)); }
~GramMatrix() { CUBLAS_CHECK_NO_THROW(cublasDestroy(cublas_handle)); }

protected:
void allocateBuffers(const ::benchmark::State& state) override
Expand Down
6 changes: 4 additions & 2 deletions cpp/cmake/modules/ConfigureCUDA.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ endif()
list(APPEND CUML_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)

# set warnings as errors
# list(APPEND CUML_CUDA_FLAGS -Werror=cross-execution-space-call)
# list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND CUML_CUDA_FLAGS -Werror=all-warnings)
endif()
list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)

if(DISABLE_DEPRECATION_WARNING)
list(APPEND CUML_CXX_FLAGS -Wno-deprecated-declarations)
Expand Down
5 changes: 5 additions & 0 deletions cpp/include/cuml/ensemble/randomforest.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,13 @@
#include <cuml/common/logger.hpp>
#include <cuml/ensemble/treelite_defs.hpp>
#include <cuml/tree/decisiontree.hpp>

#include <map>

namespace raft {
class handle_t; // forward decl
}

namespace ML {

enum RF_type {
Expand Down
3 changes: 2 additions & 1 deletion cpp/include/cuml/random_projection/rproj_c.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#pragma once

#include <cuml/common/device_buffer.hpp>

#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>

Expand Down Expand Up @@ -95,4 +96,4 @@ void RPROJtransform(const raft::handle_t& handle,

size_t johnson_lindenstrauss_min_dim(size_t n_samples, double eps);

} // namespace ML
} // namespace ML
7 changes: 3 additions & 4 deletions cpp/include/cuml/tree/decisiontree.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@
*/

#pragma once
#include <vector>

#include "algo_helper.h"
#include "flatnode.h"

namespace raft {
class handle_t;
}
#include <string>
#include <vector>

namespace ML {

Expand Down
9 changes: 5 additions & 4 deletions cpp/src/common/allocatorAdapter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@

#pragma once

#include <limits>

#include <thrust/system/cuda/execution_policy.h>

#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>

#include <thrust/system/cuda/execution_policy.h>

#include <cstddef>
#include <limits>

namespace ML {

template <typename T>
Expand Down
1 change: 1 addition & 0 deletions cpp/src/common/cumlHandle.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#pragma once

#include <cuml/cuml_api.h>

#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
Expand Down
10 changes: 5 additions & 5 deletions cpp/src/common/cuml_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,17 @@
* limitations under the License.
*/

#include "cumlHandle.hpp"

#include <cuml/cuml_api.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include <cuml/common/utils.hpp>

#include <raft/cudart_utils.h>
#include <cuml/common/utils.hpp>
#include <functional>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include "cumlHandle.hpp"

#include <cstddef>
#include <functional>
namespace ML {
namespace detail {

Expand Down
7 changes: 4 additions & 3 deletions cpp/src/common/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>

#include <vector>

namespace ML {
Expand Down Expand Up @@ -171,17 +172,17 @@ class Tensor {
std::shared_ptr<raft::mr::host::allocator> _hAllocator;

/// Raw pointer to where the tensor data begins
DataPtrT _data;
DataPtrT _data{};

/// Array of strides (in sizeof(T) terms) per each dimension
IndexT _stride[Dim];

/// Size per each dimension
IndexT _size[Dim];

AllocState _state;
AllocState _state{};

cudaStream_t _stream;
cudaStream_t _stream{};
};

}; // end namespace ML
14 changes: 7 additions & 7 deletions cpp/src/dbscan/adjgraph/algo.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,24 @@

#pragma once

#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <common/allocatorAdapter.hpp>
#include <raft/cuda_utils.cuh>
#include "../common.cuh"
#include "pack.h"

#include <common/allocatorAdapter.hpp>

#include <raft/cuda_utils.cuh>
#include <raft/sparse/convert/csr.cuh>

#include <thrust/device_ptr.h>
#include <thrust/scan.h>

using namespace thrust;

namespace ML {
namespace Dbscan {
namespace AdjGraph {
namespace Algo {

using namespace MLCommon;

static const int TPB_X = 256;

/**
Expand Down Expand Up @@ -61,4 +61,4 @@ void launcher(const raft::handle_t& handle,
} // namespace Algo
} // namespace AdjGraph
} // namespace Dbscan
} // namespace ML
} // namespace ML
23 changes: 13 additions & 10 deletions cpp/src/dbscan/dbscan.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,16 @@

#pragma once

#include "runner.cuh"

#include <common/nvtx.hpp>

#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "runner.cuh"

#include <algorithm>
#include <cstddef>

namespace ML {
namespace Dbscan {
Expand Down Expand Up @@ -65,7 +68,7 @@ size_t compute_batch_size(size_t& estimated_memory,

// To avoid overflow, we need: batch_size <= MAX_LABEL / n_rows (floor div)
Index_ MAX_LABEL = std::numeric_limits<Index_>::max();
if (batch_size > MAX_LABEL / n_rows) {
if (batch_size > static_cast<std::size_t>(MAX_LABEL / n_rows)) {
Index_ new_batch_size = MAX_LABEL / n_rows;
CUML_LOG_WARN(
"Batch size limited by the chosen integer type (%d bytes). %d -> %d. "
Expand All @@ -77,7 +80,8 @@ size_t compute_batch_size(size_t& estimated_memory,
}

// Warn when a smaller index type could be used
if (sizeof(Index_) > sizeof(int) && batch_size < std::numeric_limits<int>::max() / n_rows) {
if ((sizeof(Index_) > sizeof(int)) &&
(batch_size < std::numeric_limits<int>::max() / static_cast<std::size_t>(n_rows))) {
CUML_LOG_WARN(
"You are using an index type of size (%d bytes) but a smaller index "
"type (%d bytes) would be sufficient. Using the smaller integer type "
Expand Down Expand Up @@ -110,8 +114,11 @@ void dbscanFitImpl(const raft::handle_t& handle,
int algo_adj = 1;
int algo_ccl = 2;

int my_rank, n_rank;
Index_ start_row, n_owned_rows;
int my_rank{0};
int n_rank{1};
Index_ start_row{0};
Index_ n_owned_rows{n_rows};

if (opg) {
const auto& comm = handle.get_comms();
my_rank = comm.get_rank();
Expand All @@ -122,10 +129,6 @@ void dbscanFitImpl(const raft::handle_t& handle,
n_owned_rows = max(Index_(0), end_row - start_row);
// Note: it is possible for a node to have no work in theory. It won't
// happen in practice (because n_rows is much greater than n_rank)
} else {
my_rank = 0;
n_rank = 1;
n_owned_rows = n_rows;
}

CUML_LOG_DEBUG("#%d owns %ld rows", (int)my_rank, (unsigned long)n_owned_rows);
Expand Down Expand Up @@ -200,4 +203,4 @@ void dbscanFitImpl(const raft::handle_t& handle,
}

} // namespace Dbscan
} // namespace ML
} // namespace ML
1 change: 1 addition & 0 deletions cpp/src/dbscan/mergelabels/runner.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include <label/merge_labels.cuh>

#include <raft/handle.hpp>
namespace ML {
namespace Dbscan {
namespace MergeLabels {
Expand Down
4 changes: 4 additions & 0 deletions cpp/src/dbscan/mergelabels/tree_reduction.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@

#include "runner.cuh"

#include <common/nvtx.hpp>

#include <cuml/common/logger.hpp>

namespace ML {
namespace Dbscan {
namespace MergeLabels {
Expand Down
Loading

0 comments on commit e977f3e

Please sign in to comment.