Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement sketching with Hessian on GPU. #9399

Merged
merged 2 commits into from
Jul 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions include/xgboost/data.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,10 +185,10 @@ class MetaInfo {
return data_split_mode == DataSplitMode::kRow;
}

/*! \brief Whether the data is split column-wise. */
bool IsColumnSplit() const {
return data_split_mode == DataSplitMode::kCol;
}
/** @brief Whether the data is split column-wise. */
bool IsColumnSplit() const { return data_split_mode == DataSplitMode::kCol; }
/** @brief Whether this is a learning to rank data. */
bool IsRanking() const { return !group_ptr_.empty(); }

/*!
* \brief A convenient method to check if we are doing vertical federated learning, which requires
Expand Down Expand Up @@ -249,7 +249,7 @@ struct BatchParam {
/**
* \brief Hessian, used for sketching with future approx implementation.
*/
common::Span<float> hess;
common::Span<float const> hess;
/**
* \brief Whether should we force DMatrix to regenerate the batch. Only used for
* GHistIndex.
Expand Down Expand Up @@ -279,7 +279,7 @@ struct BatchParam {
* Get batch with sketch weighted by hessian. The batch will be regenerated if the
* span is changed, so caller should keep the span for each iteration.
*/
BatchParam(bst_bin_t max_bin, common::Span<float> hessian, bool regenerate)
BatchParam(bst_bin_t max_bin, common::Span<float const> hessian, bool regenerate)
: max_bin{max_bin}, hess{hessian}, regen{regenerate} {}

[[nodiscard]] bool ParamNotEqual(BatchParam const& other) const {
Expand Down
8 changes: 5 additions & 3 deletions include/xgboost/host_device_vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,12 @@
#ifndef XGBOOST_HOST_DEVICE_VECTOR_H_
#define XGBOOST_HOST_DEVICE_VECTOR_H_

#include <xgboost/context.h> // for DeviceOrd
#include <xgboost/span.h> // for Span

#include <initializer_list>
#include <vector>
#include <type_traits>

#include "span.h"
#include <vector>

namespace xgboost {

Expand Down Expand Up @@ -133,6 +134,7 @@ class HostDeviceVector {
GPUAccess DeviceAccess() const;

void SetDevice(int device) const;
void SetDevice(DeviceOrd device) const;

void Resize(size_t new_size, T v = T());

Expand Down
10 changes: 5 additions & 5 deletions src/common/hist_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@

#include <vector>

#include "../common/common.h"
#include "column_matrix.h"
#include "../data/adapter.h" // for SparsePageAdapterBatch
#include "../data/gradient_index.h" // for GHistIndexMatrix
#include "quantile.h"
#include "xgboost/base.h"
#include "xgboost/context.h" // Context
#include "xgboost/data.h" // SparsePage, SortedCSCPage
#include "xgboost/context.h" // for Context
#include "xgboost/data.h" // for SparsePage, SortedCSCPage

#if defined(XGBOOST_MM_PREFETCH_PRESENT)
#include <xmmintrin.h>
Expand All @@ -32,7 +32,7 @@ HistogramCuts::HistogramCuts() {
}

HistogramCuts SketchOnDMatrix(Context const *ctx, DMatrix *m, bst_bin_t max_bins, bool use_sorted,
Span<float> const hessian) {
Span<float const> hessian) {
HistogramCuts out;
auto const &info = m->Info();
auto n_threads = ctx->Threads();
Expand Down
Loading