Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Warnings are errors #4075

Merged
merged 29 commits into from
Aug 10, 2021
Merged
Show file tree
Hide file tree
Changes from 26 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
07b557f
Enable warnings as errors and fix all current warnings
harrism Jul 21, 2021
74fadac
Merge branch 'branch-21.08' into fix-cuml-warnings-as-errors
harrism Jul 21, 2021
694a038
Merge branch 'branch-21.10' into fix-cuml-warnings-as-errors
harrism Jul 21, 2021
1d73c48
Use is_nan
harrism Jul 21, 2021
6ae8fae
std::size_t
harrism Jul 22, 2021
0ba156f
Eliminate #pragma unroll warnings
harrism Jul 22, 2021
173311c
Don't ignore unknown pragma warnings anymore.
harrism Jul 22, 2021
eb3373d
Respond to review feedback, std::size_t, include cstddef
harrism Jul 27, 2021
d48cf6c
Add `detail::bit_cast`
harrism Jul 27, 2021
bf8bf32
Merge branch 'branch-21.10' into fix-cuml-warnings-as-errors
harrism Jul 28, 2021
fea66f6
Fix new warning in rf_test.cu
harrism Jul 28, 2021
07f1efe
Merge branch 'branch-21.10' into fix-cuml-warnings-as-errors
harrism Aug 3, 2021
5f065fd
Fix warning in runner.cuh
harrism Aug 3, 2021
80c572a
Enable CUDA device code warnings are errors.
harrism Aug 3, 2021
06b44b1
Fix warning in contingencyMatrix.cuh
harrism Aug 3, 2021
60f04e6
Use size_t in batched matrix class.
harrism Aug 3, 2021
0d6c8e8
More auto
harrism Aug 3, 2021
fed871f
Next warnings fix...
harrism Aug 4, 2021
7a83c7d
NVCC Warnings as errors only for 11.2+
harrism Aug 4, 2021
4d82043
Another fix
harrism Aug 4, 2021
3a6a24f
More fixes in csr.cuh
harrism Aug 4, 2021
e1aee79
Next attempt
harrism Aug 4, 2021
1506dce
Temporarily disable -Werror
harrism Aug 4, 2021
d3ac9b9
Fix uninitialized variables.
harrism Aug 5, 2021
df2c6e2
Fix more warnings
harrism Aug 5, 2021
32939d2
Re-enable -Werror
harrism Aug 5, 2021
71bb6d5
Update cpp/cmake/modules/ConfigureCUDA.cmake
harrism Aug 7, 2021
b9bfa80
Remove unused parameter name.
harrism Aug 9, 2021
da342bf
Merge branch 'fix-cuml-warnings-as-errors' of github.com:harrism/cuml…
harrism Aug 9, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cpp/bench/prims/gram_matrix.cu
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ struct GramMatrix : public Fixture {
std::unique_ptr<GramMatrixBase<T>>(KernelFactory<T>::create(p.kernel_params, cublas_handle));
}

~GramMatrix() { CUBLAS_CHECK(cublasDestroy(cublas_handle)); }
~GramMatrix() { CUBLAS_CHECK_NO_THROW(cublasDestroy(cublas_handle)); }

protected:
void allocateBuffers(const ::benchmark::State& state) override
Expand Down
6 changes: 4 additions & 2 deletions cpp/cmake/modules/ConfigureCUDA.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,10 @@ endif()
list(APPEND CUML_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)

# set warnings as errors
# list(APPEND CUML_CUDA_FLAGS -Werror=cross-execution-space-call)
# list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND RAFT_CUDA_FLAGS -Werror=all-warnings)
harrism marked this conversation as resolved.
Show resolved Hide resolved
endif()
list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)

if(DISABLE_DEPRECATION_WARNING)
list(APPEND CUML_CXX_FLAGS -Wno-deprecated-declarations)
Expand Down
5 changes: 5 additions & 0 deletions cpp/include/cuml/ensemble/randomforest.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,13 @@
#include <cuml/common/logger.hpp>
#include <cuml/ensemble/treelite_defs.hpp>
#include <cuml/tree/decisiontree.hpp>

#include <map>

namespace raft {
class handle_t; // forward decl
}

namespace ML {

enum RF_type {
Expand Down
3 changes: 2 additions & 1 deletion cpp/include/cuml/random_projection/rproj_c.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#pragma once

#include <cuml/common/device_buffer.hpp>

#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>

Expand Down Expand Up @@ -95,4 +96,4 @@ void RPROJtransform(const raft::handle_t& handle,

size_t johnson_lindenstrauss_min_dim(size_t n_samples, double eps);

} // namespace ML
} // namespace ML
7 changes: 3 additions & 4 deletions cpp/include/cuml/tree/decisiontree.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,12 @@
*/

#pragma once
#include <vector>

#include "algo_helper.h"
#include "flatnode.h"

namespace raft {
class handle_t;
}
#include <string>
#include <vector>

namespace ML {

Expand Down
9 changes: 5 additions & 4 deletions cpp/src/common/allocatorAdapter.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@

#pragma once

#include <limits>

#include <thrust/system/cuda/execution_policy.h>

#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>

#include <thrust/system/cuda/execution_policy.h>

#include <cstddef>
#include <limits>

namespace ML {

template <typename T>
Expand Down
1 change: 1 addition & 0 deletions cpp/src/common/cumlHandle.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#pragma once

#include <cuml/cuml_api.h>

#include <raft/handle.hpp>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
Expand Down
10 changes: 5 additions & 5 deletions cpp/src/common/cuml_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,17 @@
* limitations under the License.
*/

#include "cumlHandle.hpp"

#include <cuml/cuml_api.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include <cuml/common/utils.hpp>

#include <raft/cudart_utils.h>
#include <cuml/common/utils.hpp>
#include <functional>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>
#include "cumlHandle.hpp"

#include <cstddef>
#include <functional>
namespace ML {
namespace detail {

Expand Down
7 changes: 4 additions & 3 deletions cpp/src/common/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <raft/cudart_utils.h>
#include <raft/mr/device/allocator.hpp>
#include <raft/mr/host/allocator.hpp>

#include <vector>

namespace ML {
Expand Down Expand Up @@ -171,17 +172,17 @@ class Tensor {
std::shared_ptr<raft::mr::host::allocator> _hAllocator;

/// Raw pointer to where the tensor data begins
DataPtrT _data;
DataPtrT _data{};

/// Array of strides (in sizeof(T) terms) per each dimension
IndexT _stride[Dim];

/// Size per each dimension
IndexT _size[Dim];

AllocState _state;
AllocState _state{};

cudaStream_t _stream;
cudaStream_t _stream{};
};

}; // end namespace ML
14 changes: 7 additions & 7 deletions cpp/src/dbscan/adjgraph/algo.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,24 @@

#pragma once

#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <common/allocatorAdapter.hpp>
#include <raft/cuda_utils.cuh>
#include "../common.cuh"
#include "pack.h"

#include <common/allocatorAdapter.hpp>

#include <raft/cuda_utils.cuh>
#include <raft/sparse/convert/csr.cuh>

#include <thrust/device_ptr.h>
#include <thrust/scan.h>

using namespace thrust;

namespace ML {
namespace Dbscan {
namespace AdjGraph {
namespace Algo {

using namespace MLCommon;

static const int TPB_X = 256;

/**
Expand Down Expand Up @@ -61,4 +61,4 @@ void launcher(const raft::handle_t& handle,
} // namespace Algo
} // namespace AdjGraph
} // namespace Dbscan
} // namespace ML
} // namespace ML
23 changes: 13 additions & 10 deletions cpp/src/dbscan/dbscan.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,16 @@

#pragma once

#include "runner.cuh"

#include <common/nvtx.hpp>

#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include "runner.cuh"

#include <algorithm>
#include <cstddef>

namespace ML {
namespace Dbscan {
Expand Down Expand Up @@ -65,7 +68,7 @@ size_t compute_batch_size(size_t& estimated_memory,

// To avoid overflow, we need: batch_size <= MAX_LABEL / n_rows (floor div)
Index_ MAX_LABEL = std::numeric_limits<Index_>::max();
if (batch_size > MAX_LABEL / n_rows) {
if (batch_size > static_cast<std::size_t>(MAX_LABEL / n_rows)) {
harrism marked this conversation as resolved.
Show resolved Hide resolved
Index_ new_batch_size = MAX_LABEL / n_rows;
CUML_LOG_WARN(
"Batch size limited by the chosen integer type (%d bytes). %d -> %d. "
Expand All @@ -77,7 +80,8 @@ size_t compute_batch_size(size_t& estimated_memory,
}

// Warn when a smaller index type could be used
if (sizeof(Index_) > sizeof(int) && batch_size < std::numeric_limits<int>::max() / n_rows) {
if ((sizeof(Index_) > sizeof(int)) &&
(batch_size < std::numeric_limits<int>::max() / static_cast<std::size_t>(n_rows))) {
CUML_LOG_WARN(
"You are using an index type of size (%d bytes) but a smaller index "
"type (%d bytes) would be sufficient. Using the smaller integer type "
Expand Down Expand Up @@ -110,8 +114,11 @@ void dbscanFitImpl(const raft::handle_t& handle,
int algo_adj = 1;
int algo_ccl = 2;

int my_rank, n_rank;
Index_ start_row, n_owned_rows;
int my_rank{0};
int n_rank{1};
Index_ start_row{0};
Index_ n_owned_rows{n_rows};

if (opg) {
const auto& comm = handle.get_comms();
my_rank = comm.get_rank();
Expand All @@ -122,10 +129,6 @@ void dbscanFitImpl(const raft::handle_t& handle,
n_owned_rows = max(Index_(0), end_row - start_row);
// Note: it is possible for a node to have no work in theory. It won't
// happen in practice (because n_rows is much greater than n_rank)
} else {
my_rank = 0;
n_rank = 1;
n_owned_rows = n_rows;
}

CUML_LOG_DEBUG("#%d owns %ld rows", (int)my_rank, (unsigned long)n_owned_rows);
Expand Down Expand Up @@ -200,4 +203,4 @@ void dbscanFitImpl(const raft::handle_t& handle,
}

} // namespace Dbscan
} // namespace ML
} // namespace ML
1 change: 1 addition & 0 deletions cpp/src/dbscan/mergelabels/runner.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include <label/merge_labels.cuh>

#include <raft/handle.hpp>
namespace ML {
namespace Dbscan {
namespace MergeLabels {
Expand Down
4 changes: 4 additions & 0 deletions cpp/src/dbscan/mergelabels/tree_reduction.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@

#include "runner.cuh"

#include <common/nvtx.hpp>

#include <cuml/common/logger.hpp>

namespace ML {
namespace Dbscan {
namespace MergeLabels {
Expand Down
Loading