Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Subsampling for IVF-PQ codebook generation #2052

Merged
merged 25 commits into from
Jan 25, 2024
Merged
Show file tree
Hide file tree
Changes from 22 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
00d1ece
Add subsample support for PQ codebook generation. More benchmark needed.
abc99lr Dec 8, 2023
7332a27
Add knob to control the amount of PQ codebook training subsampling.
abc99lr Jan 6, 2024
3153f0e
Fix if-statement dependency issue in parse build parameter.
abc99lr Jan 13, 2024
a03f0af
DOC v24.04 Updates [skip ci]
raydouglass Jan 18, 2024
9165d89
Merge pull request #2100 from rapidsai/branch-24.02
GPUtester Jan 19, 2024
c3b30f3
Merge pull request #2101 from rapidsai/branch-24.02
GPUtester Jan 19, 2024
3790c2a
Merge pull request #2103 from rapidsai/branch-24.02
GPUtester Jan 19, 2024
5464ae7
Merge pull request #2112 from rapidsai/branch-24.02
GPUtester Jan 23, 2024
e9ba740
Merge pull request #2114 from rapidsai/branch-24.02
GPUtester Jan 23, 2024
38d154e
Merge pull request #2115 from rapidsai/branch-24.02
GPUtester Jan 23, 2024
20b0869
Merge branch 'branch-24.02' of https://github.com/rapidsai/raft into …
Jan 23, 2024
c90cdfa
Merge pull request #2116 from rapidsai/branch-24.02
GPUtester Jan 23, 2024
aea3d44
Merge remote-tracking branch 'upstream/branch-24.04' into subsampling…
Jan 23, 2024
fdd4ad2
Revert "Allow topk larger than 1024 in CAGRA (#2097)"
Jan 23, 2024
f8bc4ff
Revert "Revert "Allow topk larger than 1024 in CAGRA (#2097)""
Jan 23, 2024
2856bff
Revert "Merge remote-tracking branch 'upstream/branch-24.04' into sub…
Jan 23, 2024
49112dd
Revert "Merge pull request #2116 from rapidsai/branch-24.02"
Jan 23, 2024
a70a745
Change to max point based subsampling.
Jan 24, 2024
f480c13
Add max_train_points_per_pq_code row in benchmark tuning guide.
abc99lr Jan 24, 2024
c2b2715
Address comments.
abc99lr Jan 24, 2024
9395be8
Merge remote-tracking branch 'upstream/branch-24.02' into subsampling…
abc99lr Jan 24, 2024
aa1a3e5
Run format checker.
abc99lr Jan 24, 2024
cc88715
Fix formatting issue.
abc99lr Jan 24, 2024
67205f9
More format changes.
abc99lr Jan 25, 2024
4352489
Merge remote-tracking branch 'upstream/branch-24.02' into subsampling…
abc99lr Jan 25, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cpp/bench/ann/src/raft/raft_ann_bench_param_parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ void parse_build_param(const nlohmann::json& conf,
"', should be either 'cluster' or 'subspace'");
}
}
if (conf.contains("max_train_points_per_pq_code")) {
param.max_train_points_per_pq_code = conf.at("max_train_points_per_pq_code");
}
}

template <typename T, typename IdxT>
Expand Down
24 changes: 16 additions & 8 deletions cpp/include/raft/neighbors/detail/ivf_pq_build.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -353,14 +353,18 @@ void train_per_subset(raft::resources const& handle,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code,
rmm::mr::device_memory_resource* managed_memory)
{
auto stream = resource::get_cuda_stream(handle);
auto device_memory = resource::get_workspace_resource(handle);

rmm::device_uvector<float> pq_centers_tmp(index.pq_centers().size(), stream, device_memory);
rmm::device_uvector<float> sub_trainset(n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(n_rows, stream, device_memory);
// Subsampling the train set for codebook generation based on max_train_points_per_pq_code.
size_t big_enough = max_train_points_per_pq_code * size_t(index.pq_book_size());
auto pq_n_rows = uint32_t(std::min(big_enough, n_rows));
rmm::device_uvector<float> sub_trainset(pq_n_rows * size_t(index.pq_len()), stream, device_memory);
rmm::device_uvector<uint32_t> sub_labels(pq_n_rows, stream, device_memory);

rmm::device_uvector<uint32_t> pq_cluster_sizes(index.pq_book_size(), stream, device_memory);

Expand All @@ -371,7 +375,7 @@ void train_per_subset(raft::resources const& handle,
// Get the rotated cluster centers for each training vector.
// This will be subtracted from the input vectors afterwards.
utils::copy_selected<float, float, size_t, uint32_t>(
n_rows,
pq_n_rows,
index.pq_len(),
index.centers_rot().data_handle() + index.pq_len() * j,
labels,
Expand All @@ -387,7 +391,7 @@ void train_per_subset(raft::resources const& handle,
true,
false,
index.pq_len(),
n_rows,
pq_n_rows,
index.dim(),
&alpha,
index.rotation_matrix().data_handle() + index.dim() * index.pq_len() * j,
Expand All @@ -401,12 +405,12 @@ void train_per_subset(raft::resources const& handle,

// train PQ codebook for this subspace
auto sub_trainset_view =
raft::make_device_matrix_view<const float, IdxT>(sub_trainset.data(), n_rows, index.pq_len());
raft::make_device_matrix_view<const float, IdxT>(sub_trainset.data(), pq_n_rows, index.pq_len());
auto centers_tmp_view = raft::make_device_matrix_view<float, IdxT>(
pq_centers_tmp.data() + index.pq_book_size() * index.pq_len() * j,
index.pq_book_size(),
index.pq_len());
auto sub_labels_view = raft::make_device_vector_view<uint32_t, IdxT>(sub_labels.data(), n_rows);
auto sub_labels_view = raft::make_device_vector_view<uint32_t, IdxT>(sub_labels.data(), pq_n_rows);
auto cluster_sizes_view =
raft::make_device_vector_view<uint32_t, IdxT>(pq_cluster_sizes.data(), index.pq_book_size());
raft::cluster::kmeans_balanced_params kmeans_params;
Expand All @@ -430,6 +434,7 @@ void train_per_cluster(raft::resources const& handle,
const float* trainset, // [n_rows, dim]
const uint32_t* labels, // [n_rows]
uint32_t kmeans_n_iters,
uint32_t max_train_points_per_pq_code,
rmm::mr::device_memory_resource* managed_memory)
{
auto stream = resource::get_cuda_stream(handle);
Expand Down Expand Up @@ -477,9 +482,10 @@ void train_per_cluster(raft::resources const& handle,
indices + cluster_offsets[l],
device_memory);

// limit the cluster size to bound the training time.
// limit the cluster size to bound the training time based on max_train_points_per_pq_code
// If pq_book_size is less than pq_dim, use max_train_points_per_pq_code per pq_dim instead
// [sic] we interpret the data as pq_len-dimensional
size_t big_enough = 256ul * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t big_enough = max_train_points_per_pq_code * std::max<size_t>(index.pq_book_size(), index.pq_dim());
size_t available_rows = size_t(cluster_size) * size_t(index.pq_dim());
auto pq_n_rows = uint32_t(std::min(big_enough, available_rows));
// train PQ codebook for this cluster
Expand Down Expand Up @@ -1788,6 +1794,7 @@ auto build(raft::resources const& handle,
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
params.max_train_points_per_pq_code,
&managed_mr);
break;
case codebook_gen::PER_CLUSTER:
abc99lr marked this conversation as resolved.
Show resolved Hide resolved
Expand All @@ -1797,6 +1804,7 @@ auto build(raft::resources const& handle,
trainset.data_handle(),
labels.data(),
params.kmeans_n_iters,
params.max_train_points_per_pq_code,
&managed_mr);
break;
default: RAFT_FAIL("Unreachable code");
Expand Down
8 changes: 8 additions & 0 deletions cpp/include/raft/neighbors/ivf_pq_types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,14 @@ struct index_params : ann::index_params {
* flag to `true` if you prefer to use as little GPU memory for the database as possible.
*/
bool conservative_memory_allocation = false;
/**
* The max number of data points to use per PQ code during PQ codebook training. Using more data
* points per PQ code may increase the quality of PQ codebook but may also increase the build time.
* The parameter is applied to both PQ codebook generation methods, i.e., PER_SUBSPACE and
* PER_CLUSTER. In both cases, we will use `pq_book_size * max_train_points_per_pq_code` training
* points to train each codebook.
*/
uint32_t max_train_points_per_pq_code = 256;
};

struct search_params : ann::search_params {
Expand Down
1 change: 1 addition & 0 deletions docs/source/ann_benchmarks_param_tuning.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ IVF-pq is an inverted-file index, which partitions the vectors into a series of
| `pq_bits` | `build` | N | Positive Integer. [4-8] | 8 | Bit length of the vector element after quantization. |
| `codebook_kind` | `build` | N | ["cluster", "subspace"] | "subspace" | Type of codebook. See the [API docs](https://docs.rapids.ai/api/raft/nightly/cpp_api/neighbors_ivf_pq/#_CPPv412codebook_gen) for more detail |
| `dataset_memory_type` | `build` | N | ["device", "host", "mmap"] | "host" | What memory type should the dataset reside? |
| `max_train_points_per_pq_code` | `build` | N | Positive Number >=1 | 256 | Max number of data points per PQ code used for PQ code book creation. Depending on input dataset size, the data points could be less than what user specifies. |
| `query_memory_type` | `search` | N | ["device", "host", "mmap"] | "device | What memory type should the queries reside? |
| `nprobe` | `search` | Y | Positive Integer >0 | | The closest number of clusters to search for each query vector. Larger values will improve recall but will search more points in the index. |
| `internalDistanceDtype` | `search` | N | [`float`, `half`] | `half` | The precision to use for the distance computations. Lower precision can increase performance at the cost of accuracy. |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ cdef extern from "raft/neighbors/ivf_pq_types.hpp" \
codebook_gen codebook_kind
bool force_random_rotation
bool conservative_memory_allocation
uint32_t max_train_points_per_pq_code

cdef cppclass index[IdxT](ann_index):
index(const device_resources& handle,
Expand Down
16 changes: 15 additions & 1 deletion python/pylibraft/pylibraft/neighbors/ivf_pq/ivf_pq.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,14 @@ cdef class IndexParams:
repeated calls to `extend` (extending the database).
To disable this behavior and use as little GPU memory for the
database as possible, set this flat to `True`.
max_train_points_per_pq_code : int, default = 256
The max number of data points to use per PQ code during PQ codebook
training. Using more data points per PQ code may increase the
quality of PQ codebook but may also increase the build time. The
parameter is applied to both PQ codebook generation methods, i.e.,
PER_SUBSPACE and PER_CLUSTER. In both cases, we will use
pq_book_size * max_train_points_per_pq_code training points to
train each codebook.
"""
def __init__(self, *,
n_lists=1024,
Expand All @@ -167,7 +175,8 @@ cdef class IndexParams:
codebook_kind="subspace",
force_random_rotation=False,
add_data_on_build=True,
conservative_memory_allocation=False):
conservative_memory_allocation=False,
max_train_points_per_pq_code=256):
self.params.n_lists = n_lists
self.params.metric = _get_metric(metric)
self.params.metric_arg = 0
Expand All @@ -185,6 +194,8 @@ cdef class IndexParams:
self.params.add_data_on_build = add_data_on_build
self.params.conservative_memory_allocation = \
conservative_memory_allocation
self.params.max_train_points_per_pq_code = \
max_train_points_per_pq_code

@property
def n_lists(self):
Expand Down Expand Up @@ -226,6 +237,9 @@ cdef class IndexParams:
def conservative_memory_allocation(self):
return self.params.conservative_memory_allocation

@property
def max_train_points_per_pq_code(self):
return self.params.max_train_points_per_pq_code

cdef class Index:
# We store a pointer to the index because it dose not have a trivial
Expand Down