From e977f3e4194313a857da96fdd810ee5ef1d742b3 Mon Sep 17 00:00:00 2001 From: Mark Harris Date: Tue, 10 Aug 2021 14:06:57 +1000 Subject: [PATCH] Warnings are errors (#4075) Depends on rapidsai/raft#299 Depends on rapidsai/raft#311 Fixes #4086 This PR enables -Wall (warnings are errors) and fixes all current compiler warnings. Compiler warnings should almost never be ignored. -Wall helps prevent hard-to-find bugs. Tested with both Release and Debug builds since more warnings are typically reported in debug builds. In any file I touched, I also cleaned up the includes so that they are grouped and sorted and ordered from "near" to "far" (relative to the file that is including). Also cleaned up many instances of `size_t` --> `std::size_t`, however I will leave a global search and replace for a separate PR to make reviewing easier. Authors: - Mark Harris (https://github.com/harrism) Approvers: - Robert Maynard (https://github.com/robertmaynard) - William Hicks (https://github.com/wphicks) - Dante Gama Dessavre (https://github.com/dantegd) URL: https://github.com/rapidsai/cuml/pull/4075 --- cpp/bench/prims/gram_matrix.cu | 2 +- cpp/cmake/modules/ConfigureCUDA.cmake | 6 +- cpp/include/cuml/ensemble/randomforest.hpp | 5 + cpp/include/cuml/random_projection/rproj_c.h | 3 +- cpp/include/cuml/tree/decisiontree.hpp | 7 +- cpp/src/common/allocatorAdapter.hpp | 9 +- cpp/src/common/cumlHandle.hpp | 1 + cpp/src/common/cuml_api.cpp | 10 +- cpp/src/common/tensor.hpp | 7 +- cpp/src/dbscan/adjgraph/algo.cuh | 14 +- cpp/src/dbscan/dbscan.cuh | 23 +-- cpp/src/dbscan/mergelabels/runner.cuh | 1 + cpp/src/dbscan/mergelabels/tree_reduction.cuh | 4 + cpp/src/dbscan/runner.cuh | 86 +++++----- cpp/src/decisiontree/decisiontree.cuh | 29 ++-- cpp/src/fil/fil.cu | 52 +++--- cpp/src/fil/infer.cu | 56 ++++--- cpp/src/glm/ols_mg.cu | 11 +- cpp/src/glm/ridge_mg.cu | 23 ++- cpp/src/hdbscan/detail/extract.cuh | 23 ++- cpp/src/hdbscan/detail/reachability_faiss.cuh | 25 +-- cpp/src/holtwinters/internal/hw_eval.cuh | 10 +- cpp/src/holtwinters/internal/hw_optim.cuh | 9 +- cpp/src/kmeans/common.cuh | 32 ++-- cpp/src/knn/knn.cu | 6 +- cpp/src/knn/knn_opg_common.cuh | 64 ++++---- cpp/src/pca/pca_mg.cu | 22 +-- cpp/src/pca/sign_flip_mg.cu | 15 +- cpp/src/random_projection/rproj.cuh | 55 ++++--- cpp/src/random_projection/rproj_utils.cuh | 6 +- cpp/src/randomforest/randomforest.cu | 22 ++- cpp/src/randomforest/randomforest.cuh | 26 +-- cpp/src/solver/cd_mg.cu | 33 ++-- cpp/src/solver/lars_impl.cuh | 4 +- cpp/src/solver/shuffle.h | 7 +- cpp/src/svm/kernelcache.cuh | 15 +- cpp/src/svm/smosolver.cuh | 36 ++--- cpp/src/svm/workingset.cuh | 48 +++--- cpp/src/tsvd/tsvd_mg.cu | 17 +- .../simpl_set_embed/optimize_batch_kernel.cuh | 10 +- cpp/src_prims/cache/cache.cuh | 13 +- cpp/src_prims/cache/cache_util.cuh | 6 +- cpp/src_prims/linalg/batched/matrix.cuh | 148 +++++++++--------- cpp/src_prims/linalg/init.h | 1 + cpp/src_prims/linalg/reduce_rows_by_key.cuh | 15 +- .../metrics/batched/information_criterion.cuh | 7 +- cpp/src_prims/metrics/contingencyMatrix.cuh | 28 ++-- cpp/src_prims/selection/haversine_knn.cuh | 4 +- cpp/src_prims/selection/knn.cuh | 50 +++--- cpp/src_prims/sparse/batched/csr.cuh | 74 ++++----- cpp/src_prims/stats/minmax.cuh | 25 ++- cpp/test/prims/batched/csr.cu | 25 +-- .../prims/batched/information_criterion.cu | 16 +- cpp/test/prims/batched/matrix.cu | 30 ++-- cpp/test/prims/rand_index.cu | 20 ++- cpp/test/sg/fil_test.cu | 23 ++- cpp/test/sg/multi_sum_test.cu | 18 ++- cpp/test/sg/rf_test.cu | 30 ++-- cpp/test/sg/rf_treelite_test.cu | 22 ++- cpp/test/sg/shap_kernel.cu | 11 +- cpp/test/sg/svc_test.cu | 67 ++++---- cpp/test/sg/umap_parametrizable_test.cu | 42 ++--- 62 files changed, 845 insertions(+), 664 deletions(-) diff --git a/cpp/bench/prims/gram_matrix.cu b/cpp/bench/prims/gram_matrix.cu index 5ce6a21032..c561a875c2 100644 --- a/cpp/bench/prims/gram_matrix.cu +++ b/cpp/bench/prims/gram_matrix.cu @@ -58,7 +58,7 @@ struct GramMatrix : public Fixture { std::unique_ptr>(KernelFactory::create(p.kernel_params, cublas_handle)); } - ~GramMatrix() { CUBLAS_CHECK(cublasDestroy(cublas_handle)); } + ~GramMatrix() { CUBLAS_CHECK_NO_THROW(cublasDestroy(cublas_handle)); } protected: void allocateBuffers(const ::benchmark::State& state) override diff --git a/cpp/cmake/modules/ConfigureCUDA.cmake b/cpp/cmake/modules/ConfigureCUDA.cmake index 0d43f702d2..8dbab5fbfc 100644 --- a/cpp/cmake/modules/ConfigureCUDA.cmake +++ b/cpp/cmake/modules/ConfigureCUDA.cmake @@ -25,8 +25,10 @@ endif() list(APPEND CUML_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr) # set warnings as errors -# list(APPEND CUML_CUDA_FLAGS -Werror=cross-execution-space-call) -# list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations) +if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0) + list(APPEND CUML_CUDA_FLAGS -Werror=all-warnings) +endif() +list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations) if(DISABLE_DEPRECATION_WARNING) list(APPEND CUML_CXX_FLAGS -Wno-deprecated-declarations) diff --git a/cpp/include/cuml/ensemble/randomforest.hpp b/cpp/include/cuml/ensemble/randomforest.hpp index 20a95d1c1b..4f82396dfc 100644 --- a/cpp/include/cuml/ensemble/randomforest.hpp +++ b/cpp/include/cuml/ensemble/randomforest.hpp @@ -19,8 +19,13 @@ #include #include #include + #include +namespace raft { +class handle_t; // forward decl +} + namespace ML { enum RF_type { diff --git a/cpp/include/cuml/random_projection/rproj_c.h b/cpp/include/cuml/random_projection/rproj_c.h index 20a8d2cc62..d4f1702b54 100644 --- a/cpp/include/cuml/random_projection/rproj_c.h +++ b/cpp/include/cuml/random_projection/rproj_c.h @@ -17,6 +17,7 @@ #pragma once #include + #include #include @@ -95,4 +96,4 @@ void RPROJtransform(const raft::handle_t& handle, size_t johnson_lindenstrauss_min_dim(size_t n_samples, double eps); -} // namespace ML \ No newline at end of file +} // namespace ML diff --git a/cpp/include/cuml/tree/decisiontree.hpp b/cpp/include/cuml/tree/decisiontree.hpp index 54020c45ec..827859f816 100644 --- a/cpp/include/cuml/tree/decisiontree.hpp +++ b/cpp/include/cuml/tree/decisiontree.hpp @@ -15,13 +15,12 @@ */ #pragma once -#include + #include "algo_helper.h" #include "flatnode.h" -namespace raft { -class handle_t; -} +#include +#include namespace ML { diff --git a/cpp/src/common/allocatorAdapter.hpp b/cpp/src/common/allocatorAdapter.hpp index f0f41d9d28..ca3f5c2cc5 100644 --- a/cpp/src/common/allocatorAdapter.hpp +++ b/cpp/src/common/allocatorAdapter.hpp @@ -16,14 +16,15 @@ #pragma once -#include - -#include - #include #include #include +#include + +#include +#include + namespace ML { template diff --git a/cpp/src/common/cumlHandle.hpp b/cpp/src/common/cumlHandle.hpp index 4120df9e41..4b0a4793fc 100644 --- a/cpp/src/common/cumlHandle.hpp +++ b/cpp/src/common/cumlHandle.hpp @@ -17,6 +17,7 @@ #pragma once #include + #include #include #include diff --git a/cpp/src/common/cuml_api.cpp b/cpp/src/common/cuml_api.cpp index e5fe9a6646..cca2793bca 100644 --- a/cpp/src/common/cuml_api.cpp +++ b/cpp/src/common/cuml_api.cpp @@ -14,17 +14,17 @@ * limitations under the License. */ +#include "cumlHandle.hpp" + #include -#include -#include +#include #include -#include -#include #include #include -#include "cumlHandle.hpp" +#include +#include namespace ML { namespace detail { diff --git a/cpp/src/common/tensor.hpp b/cpp/src/common/tensor.hpp index 8bb4b17221..8578556199 100644 --- a/cpp/src/common/tensor.hpp +++ b/cpp/src/common/tensor.hpp @@ -19,6 +19,7 @@ #include #include #include + #include namespace ML { @@ -171,7 +172,7 @@ class Tensor { std::shared_ptr _hAllocator; /// Raw pointer to where the tensor data begins - DataPtrT _data; + DataPtrT _data{}; /// Array of strides (in sizeof(T) terms) per each dimension IndexT _stride[Dim]; @@ -179,9 +180,9 @@ class Tensor { /// Size per each dimension IndexT _size[Dim]; - AllocState _state; + AllocState _state{}; - cudaStream_t _stream; + cudaStream_t _stream{}; }; }; // end namespace ML diff --git a/cpp/src/dbscan/adjgraph/algo.cuh b/cpp/src/dbscan/adjgraph/algo.cuh index effafb6c7f..13cbf3eae6 100644 --- a/cpp/src/dbscan/adjgraph/algo.cuh +++ b/cpp/src/dbscan/adjgraph/algo.cuh @@ -16,15 +16,17 @@ #pragma once -#include -#include -#include -#include #include "../common.cuh" #include "pack.h" +#include + +#include #include +#include +#include + using namespace thrust; namespace ML { @@ -32,8 +34,6 @@ namespace Dbscan { namespace AdjGraph { namespace Algo { -using namespace MLCommon; - static const int TPB_X = 256; /** @@ -61,4 +61,4 @@ void launcher(const raft::handle_t& handle, } // namespace Algo } // namespace AdjGraph } // namespace Dbscan -} // namespace ML \ No newline at end of file +} // namespace ML diff --git a/cpp/src/dbscan/dbscan.cuh b/cpp/src/dbscan/dbscan.cuh index 5250536aae..467ecb0839 100644 --- a/cpp/src/dbscan/dbscan.cuh +++ b/cpp/src/dbscan/dbscan.cuh @@ -16,13 +16,16 @@ #pragma once +#include "runner.cuh" + #include + #include #include #include -#include "runner.cuh" #include +#include namespace ML { namespace Dbscan { @@ -65,7 +68,7 @@ size_t compute_batch_size(size_t& estimated_memory, // To avoid overflow, we need: batch_size <= MAX_LABEL / n_rows (floor div) Index_ MAX_LABEL = std::numeric_limits::max(); - if (batch_size > MAX_LABEL / n_rows) { + if (batch_size > static_cast(MAX_LABEL / n_rows)) { Index_ new_batch_size = MAX_LABEL / n_rows; CUML_LOG_WARN( "Batch size limited by the chosen integer type (%d bytes). %d -> %d. " @@ -77,7 +80,8 @@ size_t compute_batch_size(size_t& estimated_memory, } // Warn when a smaller index type could be used - if (sizeof(Index_) > sizeof(int) && batch_size < std::numeric_limits::max() / n_rows) { + if ((sizeof(Index_) > sizeof(int)) && + (batch_size < std::numeric_limits::max() / static_cast(n_rows))) { CUML_LOG_WARN( "You are using an index type of size (%d bytes) but a smaller index " "type (%d bytes) would be sufficient. Using the smaller integer type " @@ -110,8 +114,11 @@ void dbscanFitImpl(const raft::handle_t& handle, int algo_adj = 1; int algo_ccl = 2; - int my_rank, n_rank; - Index_ start_row, n_owned_rows; + int my_rank{0}; + int n_rank{1}; + Index_ start_row{0}; + Index_ n_owned_rows{n_rows}; + if (opg) { const auto& comm = handle.get_comms(); my_rank = comm.get_rank(); @@ -122,10 +129,6 @@ void dbscanFitImpl(const raft::handle_t& handle, n_owned_rows = max(Index_(0), end_row - start_row); // Note: it is possible for a node to have no work in theory. It won't // happen in practice (because n_rows is much greater than n_rank) - } else { - my_rank = 0; - n_rank = 1; - n_owned_rows = n_rows; } CUML_LOG_DEBUG("#%d owns %ld rows", (int)my_rank, (unsigned long)n_owned_rows); @@ -200,4 +203,4 @@ void dbscanFitImpl(const raft::handle_t& handle, } } // namespace Dbscan -} // namespace ML \ No newline at end of file +} // namespace ML diff --git a/cpp/src/dbscan/mergelabels/runner.cuh b/cpp/src/dbscan/mergelabels/runner.cuh index e43ba382d1..506239e04a 100644 --- a/cpp/src/dbscan/mergelabels/runner.cuh +++ b/cpp/src/dbscan/mergelabels/runner.cuh @@ -18,6 +18,7 @@ #include