Skip to content

Commit

Permalink
Enable random subsampling (#122)
Browse files Browse the repository at this point in the history
Random sampling of training set for IVF methods was reverted in rapidsai/raft#2144 due to the large memory usage of the subsample method.

Since then, PR rapidsai/raft#2155 has implemented a new random sampling method with improved memory utilization.  Using that we can now enable random sampling of IVF methods (rapidsai/raft#2052 and rapidsai/raft#2077).

Random subsampling has measurable overhead for IVF-Flat, therefore it is only enabled for IVF-PQ.

Authors:
  - Tamas Bela Feher (https://github.com/tfeher)
  - Corey J. Nolet (https://github.com/cjnolet)

Approvers:
  - Corey J. Nolet (https://github.com/cjnolet)

URL: #122
  • Loading branch information
tfeher authored Aug 1, 2024
1 parent 2ebbeca commit e599ab2
Show file tree
Hide file tree
Showing 7 changed files with 109 additions and 113 deletions.
9 changes: 9 additions & 0 deletions cpp/include/cuvs/neighbors/ivf_pq.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,15 @@ struct cuvsIvfPqIndexParams {
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation;

/**
* The max number of data points to use per PQ code during PQ codebook training. Using more data
* points per PQ code may increase the quality of PQ codebook but may also increase the build
* time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and
* PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training
* points to train each codebook.
*/
uint32_t max_train_points_per_pq_code;
};

typedef struct cuvsIvfPqIndexParams* cuvsIvfPqIndexParams_t;
Expand Down
9 changes: 9 additions & 0 deletions cpp/include/cuvs/neighbors/ivf_pq.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,15 @@ struct index_params : cuvs::neighbors::index_params {
*/
bool add_data_on_build = true;

/**
* The max number of data points to use per PQ code during PQ codebook training. Using more data
* points per PQ code may increase the quality of PQ codebook but may also increase the build
* time. The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and
* PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training
* points to train each codebook.
*/
uint32_t max_train_points_per_pq_code = 256;

/**
* Creates index_params based on shape of the input dataset.
* Usage example:
Expand Down
163 changes: 52 additions & 111 deletions cpp/src/neighbors/ivf_pq/ivf_pq_build.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/matrix/linewise_op.cuh>
#include <raft/matrix/sample_rows.cuh>
#include <raft/random/rng.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/cuda_utils.cuh>
Expand All @@ -69,51 +70,6 @@ using namespace cuvs::spatial::knn::detail; // NOLINT

using internal_extents_t = int64_t; // The default mdspan extent type used internally.

template <uint32_t BlockDim, typename T, typename S>
__launch_bounds__(BlockDim) static __global__ void copy_warped_kernel(
T* out, uint32_t ld_out, const S* in, uint32_t ld_in, uint32_t n_cols, size_t n_rows)
{
using warp = raft::Pow2<raft::WarpSize>;
size_t row_ix = warp::div(size_t(threadIdx.x) + size_t(BlockDim) * size_t(blockIdx.x));
uint32_t i = warp::mod(threadIdx.x);
if (row_ix >= n_rows) return;
out += row_ix * ld_out;
in += row_ix * ld_in;
auto f = utils::mapping<T>{};
for (uint32_t col_ix = i; col_ix < n_cols; col_ix += warp::Value) {
auto x = f(in[col_ix]);
__syncwarp();
out[col_ix] = x;
}
}

/**
* raft::copy the data one warp-per-row:
*
* 1. load the data per-warp
* 2. apply the `utils::mapping<T>{}`
* 3. sync within warp
* 4. store the data.
*
* Assuming sizeof(T) >= sizeof(S) and the data is properly aligned (see the usage in `build`), this
* allows to re-structure the data within rows in-place.
*/
template <typename T, typename S>
void copy_warped(T* out,
uint32_t ld_out,
const S* in,
uint32_t ld_in,
uint32_t n_cols,
size_t n_rows,
rmm::cuda_stream_view stream)
{
constexpr uint32_t kBlockDim = 128;
dim3 threads(kBlockDim, 1, 1);
dim3 blocks(raft::div_rounding_up_safe<size_t>(n_rows, kBlockDim / raft::WarpSize), 1, 1);
copy_warped_kernel<kBlockDim, T, S>
<<<blocks, threads, 0, stream>>>(out, ld_out, in, ld_in, n_cols, n_rows);
}

/**
* @brief Compute residual vectors from the source dataset given by selected indices.
*
Expand Down Expand Up @@ -358,14 +314,19 @@ void train_per_subset(raft::resources const& handle,
size_t n_rows,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters)
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code)
{
auto stream = raft::resource::get_cuda_stream(handle);
auto device_memory = raft::resource::get_workspace_resource(handle);

rmm::device_uvector<float> pq_centers_tmp(index.pq_centers().size(), stream, device_memory);
rmm::device_uvector<float> sub_trainset(n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(n_rows, stream, device_memory);
// Subsampling the train set for codebook generation based on max_train_points_per_pq_code.
size_t big_enough = max_train_points_per_pq_code * size_t(index.pq_book_size());
auto pq_n_rows = uint32_t(std::min(big_enough, n_rows));
rmm::device_uvector<float> sub_trainset(
pq_n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(pq_n_rows, stream, device_memory);

rmm::device_uvector<uint32_t> pq_cluster_sizes(index.pq_book_size(), stream, device_memory);

Expand All @@ -376,7 +337,7 @@ void train_per_subset(raft::resources const& handle,
// Get the rotated cluster centers for each training vector.
// This will be subtracted from the input vectors afterwards.
utils::copy_selected<float, float, size_t, uint32_t>(
n_rows,
pq_n_rows,
index.pq_len(),
index.centers_rot().data_handle() + index.pq_len() * j,
labels,
Expand All @@ -392,7 +353,7 @@ void train_per_subset(raft::resources const& handle,
true,
false,
index.pq_len(),
n_rows,
pq_n_rows,
index.dim(),
&alpha,
index.rotation_matrix().data_handle() + index.dim() * index.pq_len() * j,
Expand All @@ -406,13 +367,13 @@ void train_per_subset(raft::resources const& handle,

// train PQ codebook for this subspace
auto sub_trainset_view = raft::make_device_matrix_view<const float, internal_extents_t>(
sub_trainset.data(), n_rows, index.pq_len());
sub_trainset.data(), pq_n_rows, index.pq_len());
auto centers_tmp_view = raft::make_device_matrix_view<float, internal_extents_t>(
pq_centers_tmp.data() + index.pq_book_size() * index.pq_len() * j,
index.pq_book_size(),
index.pq_len());
auto sub_labels_view =
raft::make_device_vector_view<uint32_t, internal_extents_t>(sub_labels.data(), n_rows);
raft::make_device_vector_view<uint32_t, internal_extents_t>(sub_labels.data(), pq_n_rows);
auto cluster_sizes_view = raft::make_device_vector_view<uint32_t, internal_extents_t>(
pq_cluster_sizes.data(), index.pq_book_size());
cuvs::cluster::kmeans::balanced_params kmeans_params;
Expand All @@ -435,7 +396,8 @@ void train_per_cluster(raft::resources const& handle,
size_t n_rows,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters)
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code)
{
auto stream = raft::resource::get_cuda_stream(handle);
auto device_memory = raft::resource::get_workspace_resource(handle);
Expand Down Expand Up @@ -485,9 +447,11 @@ void train_per_cluster(raft::resources const& handle,
indices + cluster_offsets[l],
device_memory);

// limit the cluster size to bound the training time.
// limit the cluster size to bound the training time based on max_train_points_per_pq_code
// If pq_book_size is less than pq_dim, use max_train_points_per_pq_code per pq_dim instead
// [sic] we interpret the data as pq_len-dimensional
size_t big_enough = 256ul * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t big_enough =
max_train_points_per_pq_code * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t available_rows = size_t(cluster_size) * size_t(index.pq_dim());
auto pq_n_rows = uint32_t(std::min(big_enough, available_rows));
// train PQ codebook for this cluster
Expand Down Expand Up @@ -1730,6 +1694,7 @@ auto build(raft::resources const& handle,
utils::memzero(index.inds_ptrs().data_handle(), index.inds_ptrs().size(), stream);

{
raft::random::RngState random_state{137};
auto trainset_ratio = std::max<size_t>(
1,
size_t(n_rows) / std::max<size_t>(params.kmeans_trainset_fraction * n_rows, index.n_lists()));
Expand All @@ -1749,9 +1714,11 @@ auto build(raft::resources const& handle,

// Besides just sampling, we transform the input dataset into floats to make it easier
// to use gemm operations from cublas.
rmm::device_uvector<float> trainset(0, stream, big_memory_resource);
auto trainset = raft::make_device_mdarray<float>(
handle, big_memory_resource, raft::make_extents<int64_t>(0, 0));
try {
trainset.resize(n_rows_train * index.dim(), stream);
trainset = raft::make_device_mdarray<float>(
handle, big_memory_resource, raft::make_extents<int64_t>(n_rows_train, dim));
} catch (raft::logic_error& e) {
RAFT_LOG_ERROR(
"Insufficient memory for kmeans training set allocation. Please decrease "
Expand All @@ -1760,54 +1727,19 @@ auto build(raft::resources const& handle,
}
// TODO: a proper sampling
if constexpr (std::is_same_v<T, float>) {
RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(),
sizeof(T) * index.dim(),
dataset.data_handle(),
sizeof(T) * index.dim() * trainset_ratio,
sizeof(T) * index.dim(),
n_rows_train,
cudaMemcpyDefault,
stream));
raft::matrix::sample_rows<T, int64_t>(handle, random_state, dataset, trainset.view());
} else {
size_t dim = index.dim();
cudaPointerAttributes dataset_attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&dataset_attr, dataset.data_handle()));
if (dataset_attr.devicePointer != nullptr) {
// data is available on device: just run the kernel to raft::copy and map the data
auto p = reinterpret_cast<T*>(dataset_attr.devicePointer);
auto trainset_view =
raft::make_device_vector_view<float, IdxT>(trainset.data(), dim * n_rows_train);
raft::linalg::map_offset(
handle, trainset_view, [p, trainset_ratio, dim] __device__(size_t i) {
auto col = i % dim;
return utils::mapping<float>{}(p[(i - col) * size_t(trainset_ratio) + col]);
});
} else {
// data is not available: first raft::copy, then map inplace
auto trainset_tmp = reinterpret_cast<T*>(reinterpret_cast<uint8_t*>(trainset.data()) +
(sizeof(float) - sizeof(T)) * index.dim());
// We raft::copy the data in strides, one row at a time, and place the smaller rows of type
// T at the end of float rows.
RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset_tmp,
sizeof(float) * index.dim(),
dataset.data_handle(),
sizeof(T) * index.dim() * trainset_ratio,
sizeof(T) * index.dim(),
n_rows_train,
cudaMemcpyDefault,
stream));
// Transform the input `{T -> float}`, one row per warp.
// The threads in each warp raft::copy the data synchronously; this and the layout of the
// data (content is aligned to the end of the rows) together allow doing the transform
// in-place.
copy_warped(trainset.data(),
index.dim(),
trainset_tmp,
index.dim() * sizeof(float) / sizeof(T),
index.dim(),
n_rows_train,
stream);
}
// TODO(tfeher): Enable codebook generation with any type T, and then remove trainset tmp.
auto trainset_tmp = raft::make_device_mdarray<T>(
handle, big_memory_resource, raft::make_extents<int64_t>(n_rows_train, dim));

raft::matrix::sample_rows<T, int64_t>(handle, random_state, dataset, trainset_tmp.view());

raft::linalg::unaryOp(trainset.data_handle(),
trainset_tmp.data_handle(),
trainset.size(),
utils::mapping<float>{},
raft::resource::get_cuda_stream(handle));
}

// NB: here cluster_centers is used as if it is [n_clusters, data_dim] not [n_clusters,
Expand All @@ -1817,9 +1749,8 @@ auto build(raft::resources const& handle,
auto cluster_centers = cluster_centers_buf.data();

// Train balanced hierarchical kmeans clustering
auto trainset_const_view = raft::make_device_matrix_view<const float, internal_extents_t>(
trainset.data(), n_rows_train, index.dim());
auto centers_view = raft::make_device_matrix_view<float, internal_extents_t>(
auto trainset_const_view = raft::make_const_mdspan(trainset.view());
auto centers_view = raft::make_device_matrix_view<float, internal_extents_t>(
cluster_centers, index.n_lists(), index.dim());
cuvs::cluster::kmeans::balanced_params kmeans_params;
kmeans_params.n_iters = params.kmeans_n_iters;
Expand Down Expand Up @@ -1848,12 +1779,22 @@ auto build(raft::resources const& handle,
// Train PQ codebooks
switch (index.codebook_kind()) {
case codebook_gen::PER_SUBSPACE:
train_per_subset(
handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters);
train_per_subset(handle,
index,
n_rows_train,
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
params.max_train_points_per_pq_code);
break;
case codebook_gen::PER_CLUSTER:
train_per_cluster(
handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters);
train_per_cluster(handle,
index,
n_rows_train,
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
params.max_train_points_per_pq_code);
break;
default: RAFT_FAIL("Unreachable code");
}
Expand Down
4 changes: 3 additions & 1 deletion cpp/src/neighbors/ivf_pq_c.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ void* _build(cuvsResources_t res, cuvsIvfPqIndexParams params, DLManagedTensor*
static_cast<cuvs::neighbors::ivf_pq::codebook_gen>((int)params.codebook_kind);
build_params.force_random_rotation = params.force_random_rotation;
build_params.conservative_memory_allocation = params.conservative_memory_allocation;
build_params.max_train_points_per_pq_code = params.max_train_points_per_pq_code;

auto dataset = dataset_tensor->dl_tensor;
auto dim = dataset.shape[0];
Expand Down Expand Up @@ -214,7 +215,8 @@ extern "C" cuvsError_t cuvsIvfPqIndexParamsCreate(cuvsIvfPqIndexParams_t* params
.pq_dim = 0,
.codebook_kind = codebook_gen::PER_SUBSPACE,
.force_random_rotation = false,
.conservative_memory_allocation = false};
.conservative_memory_allocation = false,
.max_train_points_per_pq_code = 256};
});
}

Expand Down
1 change: 1 addition & 0 deletions python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ cdef extern from "cuvs/neighbors/ivf_pq.h" nogil:
codebook_gen codebook_kind
bool force_random_rotation
bool conservative_memory_allocation
uint32_t max_train_points_per_pq_code

ctypedef cuvsIvfPqIndexParams* cuvsIvfPqIndexParams_t

Expand Down
24 changes: 23 additions & 1 deletion python/cuvs/cuvs/neighbors/ivf_pq/ivf_pq.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,27 @@ cdef class IndexParams:
default, if `dim == rot_dim`, the rotation transform is
initialized with the identity matrix. When
`force_random_rotation == True`, a random orthogonal transform
matrix is generated regardless of the values of `dim` and `pq_dim`.
add_data_on_build : bool, default = True
After training the coarse and fine quantizers, we will populate
the index with the dataset if add_data_on_build == True, otherwise
the index is left empty, and the extend method can be used
to add new vectors to the index.
conservative_memory_allocation : bool, default = True
By default, the algorithm allocates more space than necessary for
individual clusters (`list_data`). This allows to amortize the cost
of memory allocation and reduce the number of data copies during
repeated calls to `extend` (extending the database).
To disable this behavior and use as little GPU memory for the
database as possible, set this flat to `True`.
max_train_points_per_pq_code : int, default = 256
The max number of data points to use per PQ code during PQ codebook
training. Using more data points per PQ code may increase the
quality of PQ codebook but may also increase the build time. The
parameter is applied to both PQ codebook generation methods, i.e.,
PER_SUBSPACE and PER_CLUSTER. In both cases, we will use
pq_book_size * max_train_points_per_pq_code training points to
train each codebook.
"""

cdef cuvsIvfPqIndexParams* params
Expand All @@ -124,7 +140,8 @@ cdef class IndexParams:
codebook_kind="subspace",
force_random_rotation=False,
add_data_on_build=True,
conservative_memory_allocation=False):
conservative_memory_allocation=False,
max_train_points_per_pq_code=256):
self.params.n_lists = n_lists
self._metric = metric
self.params.metric = <cuvsDistanceType>DISTANCE_TYPES[metric]
Expand All @@ -143,6 +160,8 @@ cdef class IndexParams:
self.params.add_data_on_build = add_data_on_build
self.params.conservative_memory_allocation = \
conservative_memory_allocation
self.params.max_train_points_per_pq_code = \
max_train_points_per_pq_code

@property
def metric(self):
Expand Down Expand Up @@ -192,6 +211,9 @@ cdef class IndexParams:
def conservative_memory_allocation(self):
return self.params.conservative_memory_allocation

@property
def max_train_points_per_pq_code(self):
return self.params.max_train_points_per_pq_code

cdef class Index:
"""
Expand Down
Loading

0 comments on commit e599ab2

Please sign in to comment.