Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FEA] Codepacking for IVF-flat #1632

Merged
merged 34 commits into from
Aug 1, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
cc9cbd3
Unpack list data kernel
tarang-jain Jul 1, 2023
28484ef
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 1, 2023
e39ee56
update packing and unpacking functions
tarang-jain Jul 5, 2023
68bf927
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 5, 2023
78d6380
Update codepacker
tarang-jain Jul 14, 2023
49a8834
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 14, 2023
897338e
refactor codepacker (does not build)
tarang-jain Jul 17, 2023
c1d80f5
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 17, 2023
2a2ee51
Undo deletions
tarang-jain Jul 17, 2023
834dd2c
undo yaml changes
tarang-jain Jul 17, 2023
6013429
style
tarang-jain Jul 17, 2023
ab6345a
Update tests, correct make_list_extents
tarang-jain Jul 18, 2023
ed80d1a
More changes
tarang-jain Jul 19, 2023
cdff9e1
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 19, 2023
7412272
debugging
tarang-jain Jul 20, 2023
700ea82
Working build
tarang-jain Jul 21, 2023
27451c6
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 21, 2023
9d742ef
rename codepacking api
tarang-jain Jul 21, 2023
d1ef8a1
Updated gtest
tarang-jain Jul 27, 2023
e187147
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 27, 2023
4f233a6
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 27, 2023
4ee99e3
updates
tarang-jain Jul 27, 2023
22f4f80
update testing
tarang-jain Jul 28, 2023
9f4e22c
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 28, 2023
c95d1e0
updates
tarang-jain Jul 28, 2023
da78c66
Update testing, pow2
tarang-jain Jul 31, 2023
5cc6dc9
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 31, 2023
15db0c6
remove unneccessary changes
tarang-jain Jul 31, 2023
154dc6d
Delete log.txt
tarang-jain Jul 31, 2023
47d6421
updates
tarang-jain Jul 31, 2023
0f1d106
Merge branch 'faiss-ivf' of https://github.com/tarang-jain/raft into …
tarang-jain Jul 31, 2023
e2e1308
ore cleanup
tarang-jain Jul 31, 2023
3f470c8
Merge branch 'branch-23.08' of https://github.com/rapidsai/raft into …
tarang-jain Jul 31, 2023
41a49b2
style
tarang-jain Jul 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
130 changes: 130 additions & 0 deletions cpp/include/raft/neighbors/detail/ivf_flat_build.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <raft/neighbors/ivf_list_types.hpp>
#include <raft/spatial/knn/detail/ann_utils.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/fast_int_div.cuh>
#include <raft/util/pow2_utils.cuh>

#include <rmm/cuda_stream_view.hpp>
Expand Down Expand Up @@ -416,4 +417,133 @@ inline void fill_refinement_index(raft::resources const& handle,
refinement_index->veclen());
RAFT_CUDA_TRY(cudaPeekAtLastError());
}



// template <typename T, typename IdxT, uint32_t BlockSize>
// __launch_bounds__(BlockSize) __global__ void unpack_list_data_kernel_float32(
// T* out_codes,
// T* in_list_data,
// uint32_t n_rows,
// uint32_t dim,
// uint32_t veclen)
// {
// const IdxT i = IdxT(blockDim.x) * IdxT(blockIdx.x) + threadIdx.x;
// if (i >= n_rows * dim) { return; }

// auto col = i % kIndexGroupSize * veclen;
// auto row = i / (kIndexGroupSize * veclen);

// auto vec =

// // The data is written in interleaved groups of `index::kGroupSize` vectors
// using interleaved_group = Pow2<kIndexGroupSize>;
// auto group_offset = interleaved_group::roundDown(row);
// auto ingroup_id = interleaved_group::mod(row) * veclen;

// // The value of 4 was chosen because for float_32 dtype, calculate_veclen returns 4
// auto within_group_offset = Pow2<4>::quot(col);

// // Interleave dimensions of the source vector while recording it.
// // NB: such `veclen` is selected, that `dim % veclen == 0`
// out_codes[] = in_list_data[i];
// }

// /**
// * Pack interleaved flat codes from an existing packed non-interleaved list by the given row offset.
// *
// * @param[out] codes flat codes, [n_rows, dim]
// * @param[in] list_data the packed ivf::list data.
// * @param[in] row_offset how many rows in the list to skip.
// * @param[in] stream
// */
// template<typename T, typename IdxT>
// inline void unpack_list_data_float32(
// raft::resources const& handle,
// device_matrix_view<T, uint32_t, row_major> codes,
// device_mdspan<T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data,
// uint32_t row_offset)
// {
// auto stream = raft::resource::get_cuda_stream(handle);
// auto n_rows = codes.extent(0);
// if (n_rows == 0) { return; }

// auto dim = codes.extent(1);

// n_rows -= row_offset;
// constexpr uint32_t kBlockSize = 256;
// dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1);
// dim3 threads(kBlockSize, 1, 1);
// auto kernel = pack_list_data_kernel_float32<T, IdxT, kBlockSize>;
// kernel<<<blocks, threads, 0, stream>>>(codes.data_handle(),
// list_data.data_handle(),
// n_rows,
// dim,
// 4);
// RAFT_CUDA_TRY(cudaPeekAtLastError());
// }



// template <typename T, typename IdxT, uint32_t BlockSize>
// __launch_bounds__(BlockSize) __global__ void pack_list_data_kernel_float32(
// T* list_data,
// T* codes,
// uint32_t n_rows,
// uint32_t dim,
// uint32_t veclen)
// {
// const IdxT i = IdxT(blockDim.x) * IdxT(blockIdx.x) + threadIdx.x;
// if (i >= n_rows * dim) { return; }

// auto col = i % dim;
// auto row = i / n_rows;

// // The data is written in interleaved groups of `index::kGroupSize` vectors
// using interleaved_group = Pow2<kIndexGroupSize>;
// auto group_offset = interleaved_group::roundDown(row);
// auto ingroup_id = interleaved_group::mod(row) * veclen;

// // The value of 4 was chosen because for float_32 dtype, calculate_veclen returns 4
// auto within_group_offset = Pow2<4>::quot(col);

// // Interleave dimensions of the source vector while recording it.
// // NB: such `veclen` is selected, that `dim % veclen == 0`
// list_data[group_offset * dim + within_group_offset * kIndexGroupSize * veclen + ingroup_id + col % veclen] = codes[i];
// }

// /**
// * Pack interleaved flat codes from an existing packed non-interleaved list by the given row offset.
// *
// * @param[out] codes flat codes, [n_rows, dim]
// * @param[in] list_data the packed ivf::list data.
// * @param[in] row_offset how many rows in the list to skip.
// * @param[in] stream
// */
// template<typename T, typename IdxT>
// inline void pack_list_data_float32(
// raft::resources const& handle,
// device_mdspan<T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data,
// device_matrix_view<const T, uint32_t, row_major> codes,
// uint32_t row_offset)
// {
// auto stream = raft::resource::get_cuda_stream(handle);
// auto n_rows = codes.extent(0);
// if (n_rows == 0) { return; }

// auto dim = codes.extent(1);

// n_rows -= row_offset;
// constexpr uint32_t kBlockSize = 256;
// dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1);
// dim3 threads(kBlockSize, 1, 1);
// auto kernel = pack_list_data_kernel_float32<T, IdxT, kBlockSize>;
// kernel<<<blocks, threads, 0, stream>>>(codes.data_handle(),
// list_data.data_handle(),
// n_rows,
// dim,
// 4);
// RAFT_CUDA_TRY(cudaPeekAtLastError());
// }

} // namespace raft::neighbors::ivf_flat::detail
103 changes: 103 additions & 0 deletions cpp/include/raft/neighbors/ivf_flat_helpers.cuh
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <raft/core/resource/cuda_stream.hpp>
#include <raft/neighbors/detail/ivf_flat_build.cuh>
#include <raft/neighbors/ivf_flat_types.hpp>

#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>

// #include <omp.h>
cjnolet marked this conversation as resolved.
Show resolved Hide resolved

namespace raft::neighbors::ivf_flat::helpers {
/**
* @defgroup ivf_flat_helpers Helper functions for manipulationg IVF Flat Index
* @{
*/

namespace codepacker {
/**
* Write one flat code into a block by the given offset. The offset indicates the id of the record in the list. This function interleaves the code and is intended to later copy the interleaved codes over to the IVF list on device.
* NB: no memory allocation happens here; the block must fit the record (offset + 1).
*
* @tparam T
*
* @param[in] flat_code input flat code
* @param[out] block block of memory to write interleaved codes to
* @param[in] dim dimension of the flat code
* @param[in] veclen size of interleaved data chunks
* @param[in] offset how many records to skip before writing the data into the list
*/
template <typename T>
__host__ __device__ void pack_1_interleaved(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we have a reason to add the _interleaved suffix? Faiss has a function called (un)pack_1.
Alternativele, IVF-PQ has functions (un)pack, that can take one or many vectors at once. We shall aim for consistency with one of these.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright, (un)pack_1 sounds like the better naming convention to avoid confusion. I was also told by @cjnolet that the current IVF-PQ CodePacker needs some refactoring and the names of the IVF-PQ functions can be changed to (un)pack_1 then.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IVF-PQ functions can unpack multiple vectors at a time, I would not name them (un)pack_1

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The way to think of ivf_flat_codepacker.hpp is that the codepacker a public API and is the smallest atomic unit (packing and unpacking one single row). This can now be used in host and device functions alike -- for packing and unpacking multiple records either through a kernel, or through a CPU for loop.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The helper pack_full_list is just an example of using the CodePacker for packing a full list.

const T* flat_code,
T* block,
uint32_t dim,
uint32_t veclen,
uint32_t offset) {
// The data is written in interleaved groups of `index::kGroupSize` vectors
using interleaved_group = Pow2<kIndexGroupSize>;

// Interleave dimensions of the source vector while recording it.
// NB: such `veclen` is selected, that `dim % veclen == 0`
auto group_offset = interleaved_group::roundDown(offset);
auto ingroup_id = interleaved_group::mod(offset) * veclen;

for (uint32_t l = 0; l < dim; l += veclen) {
for (uint32_t j = 0; j < veclen; j++) {
block[group_offset * dim + l * kIndexGroupSize + ingroup_id + j] = flat_code[l + j];
}
}
}

/**
* Unpack 1 record of a single list (cluster) in the index to fetch the flat code. The offset indicates the id of the record. This function fetches one flat code from an interleaved code.
*
* @tparam T
*
* @param[in] block interleaved block. The block can be thought of as the whole inverted list in interleaved format.
* @param[out] flat_code output flat code
* @param[in] dim dimension of the flat code
* @param[in] veclen size of interleaved data chunks
* @param[in] offset fetch the flat code by the given offset
*/
template <typename T>
__host__ __device__ void unpack_1_interleaved(
const T* block,
T* flat_code,
uint32_t dim,
uint32_t veclen,
uint32_t offset) {

// The data is written in interleaved groups of `index::kGroupSize` vectors
using interleaved_group = Pow2<kIndexGroupSize>;

// NB: such `veclen` is selected, that `dim % veclen == 0`
auto group_offset = interleaved_group::roundDown(offset);
auto ingroup_id = interleaved_group::mod(offset) * veclen;

for (uint32_t l = 0; l < dim; l += veclen) {
for (uint32_t j = 0; j < veclen; j++) {
flat_code[l + j] = block[group_offset * dim + l * kIndexGroupSize + ingroup_id + j];
}
}
}
} // namespace codepacker
/** @} */
} // namespace raft::neighbors::ivf_flat::helpers
90 changes: 90 additions & 0 deletions cpp/include/raft/spatial/knn/detail/ann_quantized.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,14 @@

#include "../ann_common.h"
#include "../ivf_flat.cuh"
#include <cstring>
#include <raft/core/resource/cuda_stream.hpp>

#include "processing.cuh"
#include "raft/core/host_mdarray.hpp"
#include "raft/neighbors/ivf_flat_types.hpp"
#include "raft/neighbors/ivf_flat_helpers.cuh"
#include "raft/util/pow2_utils.cuh"
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
Expand Down Expand Up @@ -73,6 +78,91 @@ void approx_knn_build_index(raft::resources const& handle,
auto new_params = from_legacy_index_params(*ivf_ft_pams, metric, metricArg);
index->ivf_flat<T, int64_t>() = std::make_unique<const ivf_flat::index<T, int64_t>>(
ivf_flat::build(handle, new_params, index_array, int64_t(n), D));

// raft::resource::sync_stream(handle);
cjnolet marked this conversation as resolved.
Show resolved Hide resolved

// auto old_list = index->ivf_flat<T, int64_t>()->lists()[0];
// uint32_t n_rows = old_list->size.load();
// uint32_t roundup = Pow2<raft::neighbors::ivf_flat::kIndexGroupSize>::roundUp(n_rows);

// RAFT_LOG_INFO("roundup %d, n_rows %d", roundup, n_rows);

// if (n_rows == 0) { return; }

// auto dim = index->ivf_flat<T, int64_t>()->dim();
// auto veclen = index -> ivf_flat<T, int64_t>()->veclen();
// RAFT_LOG_INFO("roundup %d, n_rows %d, veclen %d, dim %d", roundup, n_rows, veclen, dim);
// auto codes = make_host_matrix<T>(roundup, dim);
// auto block = make_host_matrix<T>(roundup, dim);

// T* firstArray;
// cudaMemcpy(&firstArray, index->ivf_flat<T, int64_t>()->data_ptrs().data_handle(), sizeof(float*), cudaMemcpyDeviceToHost); // Copy the pointer to the first array from device to host

// raft::print_device_vector("codes_gpu", firstArray, 1, std::cout);
// raft::update_host(codes.data_handle(), firstArray, (size_t)(roundup * dim), stream);
// raft::resource::sync_stream(handle);
// raft::neighbors::ivf_flat::helpers::pack_host_interleaved(
// codes.data_handle(),
// block.data_handle(),
// n_rows,
// dim,
// veclen);

// RAFT_LOG_INFO("veclen %d", veclen);
// raft::print_host_vector("codes", codes.data_handle(), roundup * dim, std::cout);
// raft::print_host_vector("block", block.data_handle(), roundup * dim, std::cout);
// // auto indices = make_device_vector<IdxT>(handle_, n_rows);
// copy(indices.data_handle(), old_list->indices.data_handle(), n_rows, stream_);

// ivf_flat::helpers::pack_list_data(handle_, *index, codes.view(), label, 0);
// ivf_pq::helpers::erase_list(handle_, index, label);
// ivf_pq::helpers::extend_list_with_codes<IdxT>(
// handle_, index, codes.view(), indices.view(), label);

// auto& new_list = index->lists()[label];
// ASSERT_NE(old_list.get(), new_list.get())
// << "The old list should have been shared and retained after ivf_pq index has erased the "
// "corresponding cluster.";
// auto list_data_size = (n_rows / ivf_pq::kIndexGroupSize) * new_list->data.extent(1) *
// new_list->data.extent(2) * new_list->data.extent(3);

// ASSERT_TRUE(old_list->data.size() >= list_data_size);
// ASSERT_TRUE(new_list->data.size() >= list_data_size);
// ASSERT_TRUE(devArrMatch(old_list->data.data_handle(),
// new_list->data.data_handle(),
// list_data_size,
// Compare<uint8_t>{}));

// // Pack a few vectors back to the list.
// int row_offset = 9;
// int n_vec = 3;
// ASSERT_TRUE(row_offset + n_vec < n_rows);
// size_t offset = row_offset * index->pq_dim();
// auto codes_to_pack = make_device_matrix_view<const uint8_t, uint32_t>(
// codes.data_handle() + offset, n_vec, index->pq_dim());
// ivf_pq::helpers::pack_list_data(handle_, index, codes_to_pack, label, row_offset);
// ASSERT_TRUE(devArrMatch(old_list->data.data_handle(),
// new_list->data.data_handle(),
// list_data_size,
// Compare<uint8_t>{}));

// Another test with the API that take list_data directly
// auto list_data = index->lists()[label]->data.view();
// uint32_t n_take = 4;
// ASSERT_TRUE(row_offset + n_take < n_rows);
// auto codes2 = raft::make_device_matrix<uint8_t>(handle_, n_take, index->pq_dim());
// ivf_pq::helpers::codepacker::unpack(
// handle_, list_data, index->pq_bits(), row_offset, codes2.view());

// // Write it back
// ivf_pq::helpers::codepacker::pack(
// handle_, make_const_mdspan(codes2.view()), index->pq_bits(), row_offset, list_data);
// ASSERT_TRUE(devArrMatch(old_list->data.data_handle(),
// new_list->data.data_handle(),
// list_data_size,
// Compare<uint8_t>{}));
// }

} else if (ivf_pq_pams) {
neighbors::ivf_pq::index_params params;
params.metric = metric;
Expand Down