Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Re enable IVF random sampling #2225

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
3f5e149
Make subsampling use less memory
tfeher Feb 5, 2024
1d2a681
Add subsample benchmark
tfeher Feb 5, 2024
cabd94f
Merge branch 'branch-24.04' into ivf_subsample2
tfeher Mar 11, 2024
4040a96
debug
tfeher Mar 12, 2024
e09c9f7
Fix bug
tfeher Mar 12, 2024
a6f9083
add tests
tfeher Mar 12, 2024
941e165
cleanup
tfeher Mar 13, 2024
eb73ef5
added sample_rows to matrix namespace
tfeher Mar 13, 2024
cc2cf24
add test for sample rows
tfeher Mar 13, 2024
eb7e6d1
Add mdspan input API, fix cmakelists
tfeher Mar 13, 2024
7857f2f
corrections
tfeher Mar 13, 2024
93ff94f
Add test to sample_rows
tfeher Mar 13, 2024
f2c28ce
Revert "[HOTFIX] 24.02 Revert Random Sampling (#2144)"
tfeher Mar 14, 2024
47eefd4
Use the new matrix::sample_rows API
tfeher Mar 14, 2024
3f9cbc3
Address issues
tfeher Mar 15, 2024
57cb99c
change member variables in test to local vars
tfeher Mar 15, 2024
84e307e
Fix omp gather and add bench
tfeher Mar 18, 2024
1dd9e13
Merge branch 'branch-24.04' into ivf_subsample2
tfeher Mar 18, 2024
c369149
Merge remote-tracking branch 'tfeher/ivf_subsample2' into re_enable_i…
tfeher Mar 18, 2024
4ced8c4
Merge remote-tracking branch 'origin/branch-24.04' into re_enable_ivf…
tfeher Mar 18, 2024
84609de
Adjust comment
tfeher Mar 18, 2024
6ab5f9a
Fix params for sample_rows
tfeher Mar 19, 2024
f01fa61
Change IVF cluster warning messages to debug msg
tfeher Mar 19, 2024
739ff05
Merge branch 'branch-24.04' into re_enable_ivf_random_sampling
tfeher Mar 19, 2024
28a0ed7
Remove changes from ann_utils.cuh
tfeher Mar 19, 2024
d88e2d3
allocate trainset usind default allocator
tfeher Mar 19, 2024
66e696a
Merge branch 'branch-24.06' into re_enable_ivf_random_sampling
tfeher Mar 21, 2024
4d7d7dd
Merge branch 'branch-24.06' into re_enable_ivf_random_sampling
tfeher Apr 8, 2024
bf67643
Merge branch 'branch-24.06' into re_enable_ivf_random_sampling
tfeher Apr 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cpp/bench/ann/src/raft/raft_ann_bench_param_parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ void parse_build_param(const nlohmann::json& conf,
"', should be either 'cluster' or 'subspace'");
}
}
if (conf.contains("max_train_points_per_pq_code")) {
param.max_train_points_per_pq_code = conf.at("max_train_points_per_pq_code");
}
}

template <typename T, typename IdxT>
Expand Down
10 changes: 5 additions & 5 deletions cpp/include/raft/cluster/detail/kmeans_balanced.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -881,10 +881,10 @@ auto build_fine_clusters(const raft::resources& handle,
if (labels_mptr[j] == LabelT(i)) { mc_trainset_ids[k++] = j; }
}
if (k != static_cast<IdxT>(mesocluster_sizes[i]))
RAFT_LOG_WARN("Incorrect mesocluster size at %d. %zu vs %zu",
static_cast<int>(i),
static_cast<size_t>(k),
static_cast<size_t>(mesocluster_sizes[i]));
RAFT_LOG_DEBUG("Incorrect mesocluster size at %d. %zu vs %zu",
tfeher marked this conversation as resolved.
Show resolved Hide resolved
static_cast<int>(i),
static_cast<size_t>(k),
static_cast<size_t>(mesocluster_sizes[i]));
if (k == 0) {
RAFT_LOG_DEBUG("Empty cluster %d", i);
RAFT_EXPECTS(fine_clusters_nums[i] == 0,
Expand Down Expand Up @@ -1030,7 +1030,7 @@ void build_hierarchical(const raft::resources& handle,
const IdxT mesocluster_size_max_balanced = div_rounding_up_safe<size_t>(
2lu * size_t(n_rows), std::max<size_t>(size_t(n_mesoclusters), 1lu));
if (mesocluster_size_max > mesocluster_size_max_balanced) {
RAFT_LOG_WARN(
RAFT_LOG_DEBUG(
"build_hierarchical: built unbalanced mesoclusters (max_mesocluster_size == %u > %u). "
"At most %u points will be used for training within each mesocluster. "
"Consider increasing the number of training iterations `n_iters`.",
Expand Down
25 changes: 11 additions & 14 deletions cpp/include/raft/neighbors/detail/ivf_flat_build.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,13 @@
#include <raft/linalg/add.cuh>
#include <raft/linalg/map.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/matrix/detail/sample_rows.cuh>
#include <raft/neighbors/detail/ivf_common.cuh>
#include <raft/neighbors/ivf_flat_codepacker.hpp>
#include <raft/neighbors/ivf_flat_types.hpp>
#include <raft/neighbors/ivf_list.hpp>
#include <raft/neighbors/ivf_list_types.hpp>
#include <raft/random/rng.cuh>
#include <raft/spatial/knn/detail/ann_utils.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/pow2_utils.cuh>
Expand Down Expand Up @@ -364,28 +366,23 @@ inline auto build(raft::resources const& handle,

// Train the kmeans clustering
{
raft::random::RngState random_state{137};
auto trainset_ratio = std::max<size_t>(
1, n_rows / std::max<size_t>(params.kmeans_trainset_fraction * n_rows, index.n_lists()));
auto n_rows_train = n_rows / trainset_ratio;
rmm::device_uvector<T> trainset(n_rows_train * index.dim(), stream);
// TODO: a proper sampling
RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(),
sizeof(T) * index.dim(),
dataset,
sizeof(T) * index.dim() * trainset_ratio,
sizeof(T) * index.dim(),
n_rows_train,
cudaMemcpyDefault,
stream));
auto trainset_const_view =
raft::make_device_matrix_view<const T, IdxT>(trainset.data(), n_rows_train, index.dim());
auto trainset = make_device_matrix<T, IdxT>(handle, n_rows_train, index.dim());
raft::matrix::detail::sample_rows(handle, random_state, dataset, n_rows, trainset.view());

auto centers_view = raft::make_device_matrix_view<float, IdxT>(
index.centers().data_handle(), index.n_lists(), index.dim());
raft::cluster::kmeans_balanced_params kmeans_params;
kmeans_params.n_iters = params.kmeans_n_iters;
kmeans_params.metric = index.metric();
raft::cluster::kmeans_balanced::fit(
handle, kmeans_params, trainset_const_view, centers_view, utils::mapping<float>{});
raft::cluster::kmeans_balanced::fit(handle,
kmeans_params,
make_const_mdspan(trainset.view()),
centers_view,
utils::mapping<float>{});
}

// add the data if necessary
Expand Down
157 changes: 46 additions & 111 deletions cpp/include/raft/neighbors/detail/ivf_pq_build.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include <raft/cluster/kmeans_balanced.cuh>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/core/operators.hpp>
Expand All @@ -31,6 +32,7 @@
#include <raft/linalg/map.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/detail/sample_rows.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/matrix/linewise_op.cuh>
#include <raft/neighbors/detail/ivf_common.cuh>
Expand Down Expand Up @@ -63,51 +65,6 @@ using namespace raft::spatial::knn::detail; // NOLINT

using internal_extents_t = int64_t; // The default mdspan extent type used internally.

template <uint32_t BlockDim, typename T, typename S>
__launch_bounds__(BlockDim) RAFT_KERNEL copy_warped_kernel(
T* out, uint32_t ld_out, const S* in, uint32_t ld_in, uint32_t n_cols, size_t n_rows)
{
using warp = Pow2<WarpSize>;
size_t row_ix = warp::div(size_t(threadIdx.x) + size_t(BlockDim) * size_t(blockIdx.x));
uint32_t i = warp::mod(threadIdx.x);
if (row_ix >= n_rows) return;
out += row_ix * ld_out;
in += row_ix * ld_in;
auto f = utils::mapping<T>{};
for (uint32_t col_ix = i; col_ix < n_cols; col_ix += warp::Value) {
auto x = f(in[col_ix]);
__syncwarp();
out[col_ix] = x;
}
}

/**
* Copy the data one warp-per-row:
*
* 1. load the data per-warp
* 2. apply the `utils::mapping<T>{}`
* 3. sync within warp
* 4. store the data.
*
* Assuming sizeof(T) >= sizeof(S) and the data is properly aligned (see the usage in `build`), this
* allows to re-structure the data within rows in-place.
*/
template <typename T, typename S>
void copy_warped(T* out,
uint32_t ld_out,
const S* in,
uint32_t ld_in,
uint32_t n_cols,
size_t n_rows,
rmm::cuda_stream_view stream)
{
constexpr uint32_t kBlockDim = 128;
dim3 threads(kBlockDim, 1, 1);
dim3 blocks(div_rounding_up_safe<size_t>(n_rows, kBlockDim / WarpSize), 1, 1);
copy_warped_kernel<kBlockDim, T, S>
<<<blocks, threads, 0, stream>>>(out, ld_out, in, ld_in, n_cols, n_rows);
}

/**
* @brief Fill-in a random orthogonal transformation matrix.
*
Expand Down Expand Up @@ -397,14 +354,19 @@ void train_per_subset(raft::resources const& handle,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code,
rmm::mr::device_memory_resource* managed_memory)
{
auto stream = resource::get_cuda_stream(handle);
auto device_memory = resource::get_workspace_resource(handle);

rmm::device_uvector<float> pq_centers_tmp(index.pq_centers().size(), stream, device_memory);
rmm::device_uvector<float> sub_trainset(n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(n_rows, stream, device_memory);
// Subsampling the train set for codebook generation based on max_train_points_per_pq_code.
size_t big_enough = max_train_points_per_pq_code * size_t(index.pq_book_size());
auto pq_n_rows = uint32_t(std::min(big_enough, n_rows));
rmm::device_uvector<float> sub_trainset(
pq_n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(pq_n_rows, stream, device_memory);

rmm::device_uvector<uint32_t> pq_cluster_sizes(index.pq_book_size(), stream, device_memory);

Expand All @@ -415,7 +377,7 @@ void train_per_subset(raft::resources const& handle,
// Get the rotated cluster centers for each training vector.
// This will be subtracted from the input vectors afterwards.
utils::copy_selected<float, float, size_t, uint32_t>(
n_rows,
pq_n_rows,
index.pq_len(),
index.centers_rot().data_handle() + index.pq_len() * j,
labels,
Expand All @@ -431,7 +393,7 @@ void train_per_subset(raft::resources const& handle,
true,
false,
index.pq_len(),
n_rows,
pq_n_rows,
index.dim(),
&alpha,
index.rotation_matrix().data_handle() + index.dim() * index.pq_len() * j,
Expand All @@ -445,13 +407,13 @@ void train_per_subset(raft::resources const& handle,

// train PQ codebook for this subspace
auto sub_trainset_view = raft::make_device_matrix_view<const float, internal_extents_t>(
sub_trainset.data(), n_rows, index.pq_len());
sub_trainset.data(), pq_n_rows, index.pq_len());
auto centers_tmp_view = raft::make_device_matrix_view<float, internal_extents_t>(
pq_centers_tmp.data() + index.pq_book_size() * index.pq_len() * j,
index.pq_book_size(),
index.pq_len());
auto sub_labels_view =
raft::make_device_vector_view<uint32_t, internal_extents_t>(sub_labels.data(), n_rows);
raft::make_device_vector_view<uint32_t, internal_extents_t>(sub_labels.data(), pq_n_rows);
auto cluster_sizes_view = raft::make_device_vector_view<uint32_t, internal_extents_t>(
pq_cluster_sizes.data(), index.pq_book_size());
raft::cluster::kmeans_balanced_params kmeans_params;
Expand All @@ -475,6 +437,7 @@ void train_per_cluster(raft::resources const& handle,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code,
rmm::mr::device_memory_resource* managed_memory)
{
auto stream = resource::get_cuda_stream(handle);
Expand Down Expand Up @@ -522,9 +485,11 @@ void train_per_cluster(raft::resources const& handle,
indices + cluster_offsets[l],
device_memory);

// limit the cluster size to bound the training time.
// limit the cluster size to bound the training time based on max_train_points_per_pq_code
// If pq_book_size is less than pq_dim, use max_train_points_per_pq_code per pq_dim instead
// [sic] we interpret the data as pq_len-dimensional
size_t big_enough = 256ul * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t big_enough =
max_train_points_per_pq_code * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t available_rows = size_t(cluster_size) * size_t(index.pq_dim());
auto pq_n_rows = uint32_t(std::min(big_enough, available_rows));
// train PQ codebook for this cluster
Expand Down Expand Up @@ -1702,77 +1667,45 @@ auto build(raft::resources const& handle,
utils::memzero(index.inds_ptrs().data_handle(), index.inds_ptrs().size(), stream);

{
raft::random::RngState random_state{137};
auto trainset_ratio = std::max<size_t>(
1,
size_t(n_rows) / std::max<size_t>(params.kmeans_trainset_fraction * n_rows, index.n_lists()));
size_t n_rows_train = n_rows / trainset_ratio;

auto* device_memory = resource::get_workspace_resource(handle);
rmm::mr::managed_memory_resource managed_memory_upstream;
auto* device_mr = resource::get_workspace_resource(handle);
tfeher marked this conversation as resolved.
Show resolved Hide resolved
rmm::mr::managed_memory_resource managed_mr;
tfeher marked this conversation as resolved.
Show resolved Hide resolved

// Besides just sampling, we transform the input dataset into floats to make it easier
// to use gemm operations from cublas.
rmm::device_uvector<float> trainset(n_rows_train * index.dim(), stream, device_memory);
// TODO: a proper sampling
auto trainset = make_device_matrix<float, internal_extents_t>(handle, n_rows_train, dim);

if constexpr (std::is_same_v<T, float>) {
RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(),
sizeof(T) * index.dim(),
dataset,
sizeof(T) * index.dim() * trainset_ratio,
sizeof(T) * index.dim(),
n_rows_train,
cudaMemcpyDefault,
stream));
raft::matrix::detail::sample_rows<T, int64_t>(
handle, random_state, dataset, n_rows, trainset.view());
} else {
size_t dim = index.dim();
cudaPointerAttributes dataset_attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&dataset_attr, dataset));
if (dataset_attr.devicePointer != nullptr) {
// data is available on device: just run the kernel to copy and map the data
auto p = reinterpret_cast<T*>(dataset_attr.devicePointer);
auto trainset_view =
raft::make_device_vector_view<float, IdxT>(trainset.data(), dim * n_rows_train);
linalg::map_offset(handle, trainset_view, [p, trainset_ratio, dim] __device__(size_t i) {
auto col = i % dim;
return utils::mapping<float>{}(p[(i - col) * size_t(trainset_ratio) + col]);
});
} else {
// data is not available: first copy, then map inplace
auto trainset_tmp = reinterpret_cast<T*>(reinterpret_cast<uint8_t*>(trainset.data()) +
(sizeof(float) - sizeof(T)) * index.dim());
// We copy the data in strides, one row at a time, and place the smaller rows of type T
// at the end of float rows.
RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset_tmp,
sizeof(float) * index.dim(),
dataset,
sizeof(T) * index.dim() * trainset_ratio,
sizeof(T) * index.dim(),
n_rows_train,
cudaMemcpyDefault,
stream));
// Transform the input `{T -> float}`, one row per warp.
// The threads in each warp copy the data synchronously; this and the layout of the data
// (content is aligned to the end of the rows) together allow doing the transform in-place.
copy_warped(trainset.data(),
index.dim(),
trainset_tmp,
index.dim() * sizeof(float) / sizeof(T),
index.dim(),
n_rows_train,
stream);
}
// TODO(tfeher): Enable codebook generation with any type T, and then remove trainset tmp.
auto trainset_tmp = make_device_mdarray<T>(
handle, &managed_mr, make_extents<internal_extents_t>(n_rows_train, dim));
raft::matrix::detail::sample_rows<T, int64_t>(
tfeher marked this conversation as resolved.
Show resolved Hide resolved
handle, random_state, dataset, n_rows, trainset_tmp.view());

raft::linalg::unaryOp(trainset.data_handle(),
trainset_tmp.data_handle(),
trainset.size(),
utils::mapping<float>{},
raft::resource::get_cuda_stream(handle));
}

// NB: here cluster_centers is used as if it is [n_clusters, data_dim] not [n_clusters,
// dim_ext]!
rmm::device_uvector<float> cluster_centers_buf(
index.n_lists() * index.dim(), stream, device_memory);
index.n_lists() * index.dim(), stream, device_mr);
auto cluster_centers = cluster_centers_buf.data();

// Train balanced hierarchical kmeans clustering
auto trainset_const_view = raft::make_device_matrix_view<const float, internal_extents_t>(
trainset.data(), n_rows_train, index.dim());
auto centers_view = raft::make_device_matrix_view<float, internal_extents_t>(
auto trainset_const_view = raft::make_const_mdspan(trainset.view());
auto centers_view = raft::make_device_matrix_view<float, internal_extents_t>(
cluster_centers, index.n_lists(), index.dim());
raft::cluster::kmeans_balanced_params kmeans_params;
kmeans_params.n_iters = params.kmeans_n_iters;
Expand All @@ -1781,7 +1714,7 @@ auto build(raft::resources const& handle,
handle, kmeans_params, trainset_const_view, centers_view, utils::mapping<float>{});

// Trainset labels are needed for training PQ codebooks
rmm::device_uvector<uint32_t> labels(n_rows_train, stream, device_memory);
rmm::device_uvector<uint32_t> labels(n_rows_train, stream, device_mr);
auto centers_const_view = raft::make_device_matrix_view<const float, internal_extents_t>(
cluster_centers, index.n_lists(), index.dim());
auto labels_view =
Expand All @@ -1808,19 +1741,21 @@ auto build(raft::resources const& handle,
train_per_subset(handle,
index,
n_rows_train,
trainset.data(),
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
&managed_memory_upstream);
params.max_train_points_per_pq_code,
&managed_mr);
break;
case codebook_gen::PER_CLUSTER:
train_per_cluster(handle,
index,
n_rows_train,
trainset.data(),
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
&managed_memory_upstream);
params.max_train_points_per_pq_code,
&managed_mr);
break;
default: RAFT_FAIL("Unreachable code");
}
Expand Down
8 changes: 8 additions & 0 deletions cpp/include/raft/neighbors/ivf_pq_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,14 @@ struct index_params : ann::index_params {
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation = false;
/**
* The max number of data points to use per PQ code during PQ codebook training. Using more data
* points per PQ code may increase the quality of PQ codebook but may also increase the build
* time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and
* PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training
* points to train each codebook.
*/
uint32_t max_train_points_per_pq_code = 256;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why 256 here? Have we tested this empirically across many datasets ti verify this is a good default?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The default value is inspired by FAISS, which also has 256 as default. We tested on DEEP-100M here #2052 (comment). I will share results on other datasets.

};

struct search_params : ann::search_params {
Expand Down
2 changes: 1 addition & 1 deletion cpp/test/neighbors/ann_ivf_pq.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ struct ivf_pq_inputs {
ivf_pq_inputs()
{
index_params.n_lists = max(32u, min(1024u, num_db_vecs / 128u));
index_params.kmeans_trainset_fraction = 1.0;
index_params.kmeans_trainset_fraction = 0.95;
}
};

Expand Down
Loading
Loading