Skip to content

Commit

Permalink
Factor out a non-generic part of RawIterRange
Browse files Browse the repository at this point in the history
  • Loading branch information
Markus Westerlind committed Jan 21, 2021
1 parent 681fc6a commit adb8127
Showing 1 changed file with 66 additions and 28 deletions.
94 changes: 66 additions & 28 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1658,19 +1658,27 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
/// not track an item count.
pub(crate) struct RawIterRange<T> {
// Pointer to the buckets for the current group.
data: Bucket<T>,

inner: RawIterRangeInner,
}

#[derive(Clone, Copy)]
pub(crate) struct RawIterRangeInner {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
current_group: BitMask,

// Pointer to the buckets for the current group.
data: Bucket<T>,

// Pointer to the next group of control bytes,
// Must be aligned to the group size.
next_ctrl: *const u8,

// Pointer one past the last control byte of this range.
end: *const u8,

// Index to the buckets for the current group.
index: usize,
}

impl<T> RawIterRange<T> {
Expand All @@ -1679,19 +1687,9 @@ impl<T> RawIterRange<T> {
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);

// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);

Self {
current_group,
data,
next_ctrl,
end,
inner: RawIterRangeInner::new(ctrl, len),
}
}

Expand All @@ -1703,15 +1701,15 @@ impl<T> RawIterRange<T> {
#[cfg(feature = "rayon")]
pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
unsafe {
if self.end <= self.next_ctrl {
if self.inner.end <= self.inner.next_ctrl {
// Nothing to split if the group that we are current processing
// is the last one.
(self, None)
} else {
// len is the remaining number of elements after the group that
// we are currently processing. It must be a multiple of the
// group size (small tables are caught by the check above).
let len = offset_from(self.end, self.next_ctrl);
let len = offset_from(self.inner.end, self.inner.next_ctrl);
debug_assert_eq!(len % Group::WIDTH, 0);

// Split the remaining elements into two halves, but round the
Expand All @@ -1723,23 +1721,46 @@ impl<T> RawIterRange<T> {
let mid = (len / 2) & !(Group::WIDTH - 1);

let tail = Self::new(
self.next_ctrl.add(mid),
self.inner.next_ctrl.add(mid),
self.data.next_n(Group::WIDTH).next_n(mid),
len - mid,
);
debug_assert_eq!(
self.data.next_n(Group::WIDTH).next_n(mid).ptr,
tail.data.ptr
);
debug_assert_eq!(self.end, tail.end);
self.end = self.next_ctrl.add(mid);
debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
debug_assert_eq!(self.inner.end, tail.inner.end);
self.inner.end = self.inner.next_ctrl.add(mid);
debug_assert_eq!(self.inner.end.add(Group::WIDTH), tail.inner.next_ctrl);
(self, Some(tail))
}
}
}
}

impl RawIterRangeInner {
/// Returns a `RawIterRange` covering a subset of a table.
///
/// The control byte address must be aligned to the group size.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
let end = ctrl.add(len);

// Load the first group and advance ctrl to point to the next group
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);

Self {
current_group,
next_ctrl,
end,
index: 0,
}
}
}

// We make raw iterators unconditionally Send and Sync, and let the PhantomData
// in the actual iterator implementations determine the real Send/Sync bounds.
unsafe impl<T> Send for RawIterRange<T> {}
Expand All @@ -1750,9 +1771,7 @@ impl<T> Clone for RawIterRange<T> {
fn clone(&self) -> Self {
Self {
data: self.data.clone(),
next_ctrl: self.next_ctrl,
current_group: self.current_group,
end: self.end,
inner: self.inner.clone(),
}
}
}
Expand All @@ -1762,11 +1781,32 @@ impl<T> Iterator for RawIterRange<T> {

#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
match self.inner.next() {
Some(index) => Some(self.data.next_n(index)),
None => None,
}
}
}

#[cfg_attr(feature = "inline-more", inline)]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}

impl<T> FusedIterator for RawIterRange<T> {}

impl Iterator for RawIterRangeInner {
type Item = usize;

#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
if let Some(index) = self.current_group.lowest_set_bit() {
if let Some(group_index) = self.current_group.lowest_set_bit() {
self.current_group = self.current_group.remove_lowest_bit();
return Some(self.data.next_n(index));
return Some(self.index + group_index);
}

if self.next_ctrl >= self.end {
Expand All @@ -1779,7 +1819,7 @@ impl<T> Iterator for RawIterRange<T> {
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
self.current_group = Group::load_aligned(self.next_ctrl).match_full();
self.data = self.data.next_n(Group::WIDTH);
self.index = Group::WIDTH;
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
}
Expand All @@ -1795,8 +1835,6 @@ impl<T> Iterator for RawIterRange<T> {
}
}

impl<T> FusedIterator for RawIterRange<T> {}

/// Iterator which returns a raw pointer to every full bucket in the table.
///
/// For maximum flexibility this iterator is not bound by a lifetime, but you
Expand Down

0 comments on commit adb8127

Please sign in to comment.