From 794d557f67045145fddb607e4b26fcb6d84c71de Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 14:28:53 -0700 Subject: [PATCH 01/38] Deny clippy::all lint group --- src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib.rs b/src/lib.rs index ebe148fe..81138dc2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,6 @@ #![allow(clippy::drop_non_drop)] #![deny( + clippy::all, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, From bf47dbc1b56445a3a183191fca38b559688207f7 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 14:29:41 -0700 Subject: [PATCH 02/38] clippy::drop_non_drop These drop calls seem not to be necessary. --- src/multimap_table.rs | 1 - src/tree_store/btree.rs | 2 -- src/tree_store/btree_base.rs | 3 --- src/tree_store/btree_mutator.rs | 3 --- 4 files changed, 9 deletions(-) diff --git a/src/multimap_table.rs b/src/multimap_table.rs index 83bdd6cc..81fc6da4 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -313,7 +313,6 @@ pub(crate) fn finalize_tree_and_subtree_checksums( } } } - drop(accessor); // TODO: maybe there's a better abstraction, so that we don't need to call into this low-level method? let mut mutator = LeafMutator::new( &mut leaf_page, diff --git a/src/tree_store/btree.rs b/src/tree_store/btree.rs index 7182afae..3da044b9 100644 --- a/src/tree_store/btree.rs +++ b/src/tree_store/btree.rs @@ -192,13 +192,11 @@ impl UntypedBtreeMut { } } - drop(accessor); let mut mutator = BranchMutator::new(&mut page); for (child_index, child_page, child_checksum) in new_children.into_iter().flatten() { mutator.write_child_page(child_index, child_page, child_checksum); } - drop(mutator); branch_checksum(&page, self.key_width) } diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index 0bd73d94..2540a764 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -871,7 +871,6 @@ impl<'b> LeafMutator<'b> { .value_range(i) .map(|(start, end)| end - start) .unwrap_or_default(); - drop(accessor); let value_delta = if overwrite { isize::try_from(value.len()).unwrap() - isize::try_from(existing_value_len).unwrap() @@ -989,7 +988,6 @@ impl<'b> LeafMutator<'b> { let value_start = accessor.value_start(i).unwrap(); let value_end = accessor.value_end(i).unwrap(); let last_value_end = accessor.value_end(accessor.num_pairs() - 1).unwrap(); - drop(accessor); // Update all the pointers let key_ptr_size = if self.fixed_key_size.is_none() { @@ -1086,7 +1084,6 @@ impl<'b> LeafMutator<'b> { self.fixed_value_size, ); let num_pairs = accessor.num_pairs(); - drop(accessor); let mut offset = 4 + size_of::() * i; if self.fixed_key_size.is_none() { offset += size_of::() * num_pairs; diff --git a/src/tree_store/btree_mutator.rs b/src/tree_store/btree_mutator.rs index 4cf14ace..7c50335c 100644 --- a/src/tree_store/btree_mutator.rs +++ b/src/tree_store/btree_mutator.rs @@ -226,7 +226,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let new_page_accessor = LeafAccessor::new(new_page.memory(), K::fixed_width(), V::fixed_width()); let offset = new_page_accessor.offset_of_first_value(); - drop(new_page_accessor); let guard = AccessGuardMut::new(new_page, offset, value.len()); return if position == 0 { Ok(InsertionResult { @@ -280,7 +279,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let new_page_accessor = LeafAccessor::new(page_mut.memory(), K::fixed_width(), V::fixed_width()); let offset = new_page_accessor.offset_of_value(position).unwrap(); - drop(new_page_accessor); let guard = AccessGuardMut::new(page_mut, offset, value.len()); return Ok(InsertionResult { new_root: page_number, @@ -567,7 +565,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { Subtree(new_page.get_page_number(), DEFERRED) }; let (start, end) = accessor.value_range(position).unwrap(); - drop(accessor); let guard = if uncommitted && self.modify_uncommitted { let page_number = page.get_page_number(); let arc = page.to_arc(); From d72dd11ed65caba75e117b0fe7a7624d8a5213f4 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:03:36 -0700 Subject: [PATCH 03/38] clippy::cast_lossless Casts, i.e., the `as` syntax, should generally be avoided where possible. They truncate by default, which is dangerous if it isn't what you want. This lint complains about `as` when the cast is lossless, which means you can use `T::from` or `.into()`. I tried to use `.into()` where possible due to type inference, becuase it's slightly nicer, and avoids having to name the types involved. --- src/lib.rs | 1 + src/tree_store/page_store/base.rs | 20 +++---- src/tree_store/page_store/buddy_allocator.rs | 2 +- src/tree_store/page_store/layout.rs | 22 ++++---- src/tree_store/page_store/page_manager.rs | 25 +++++---- src/tree_store/page_store/region.rs | 20 +++---- src/tree_store/page_store/xxh3.rs | 58 +++++++++++--------- 7 files changed, 78 insertions(+), 70 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 81138dc2..3b592667 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ #![allow(clippy::drop_non_drop)] #![deny( clippy::all, + clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, diff --git a/src/tree_store/page_store/base.rs b/src/tree_store/page_store/base.rs index 6f6cda69..eb52d06c 100644 --- a/src/tree_store/page_store/base.rs +++ b/src/tree_store/page_store/base.rs @@ -35,8 +35,8 @@ impl Ord for PageNumber { match self.region.cmp(&other.region) { Ordering::Less => Ordering::Less, Ordering::Equal => { - let self_order0 = self.page_index * 2u32.pow(self.page_order as u32); - let other_order0 = other.page_index * 2u32.pow(other.page_order as u32); + let self_order0 = self.page_index * 2u32.pow(self.page_order.into()); + let other_order0 = other.page_index * 2u32.pow(other.page_order.into()); assert!( self_order0 != other_order0 || self.page_order == other.page_order, "{self:?} overlaps {other:?}, but is not equal" @@ -72,9 +72,9 @@ impl PageNumber { } pub(crate) fn to_le_bytes(self) -> [u8; 8] { - let mut temp = (0x000F_FFFF & self.page_index) as u64; - temp |= (0x000F_FFFF & self.region as u64) << 20; - temp |= (0b0001_1111 & self.page_order as u64) << 59; + let mut temp = 0x000F_FFFF & u64::from(self.page_index); + temp |= (0x000F_FFFF & u64::from(self.region)) << 20; + temp |= (0b0001_1111 & u64::from(self.page_order)) << 59; temp.to_le_bytes() } @@ -99,8 +99,8 @@ impl PageNumber { if self.region > other.region { return false; } - let self_order0 = self.page_index * 2u32.pow(self.page_order as u32); - let other_order0 = other.page_index * 2u32.pow(other.page_order as u32); + let self_order0 = self.page_index * 2u32.pow(self.page_order.into()); + let other_order0 = other.page_index * 2u32.pow(other.page_order.into()); assert_ne!(self_order0, other_order0, "{self:?} overlaps {other:?}"); self_order0 < other_order0 } @@ -145,9 +145,9 @@ impl PageNumber { page_size: u32, ) -> Range { let regional_start = - region_pages_start + (self.page_index as u64) * self.page_size_bytes(page_size); + region_pages_start + u64::from(self.page_index) * self.page_size_bytes(page_size); debug_assert!(regional_start < region_size); - let region_base = (self.region as u64) * region_size; + let region_base = u64::from(self.region) * region_size; let start = data_section_offset + region_base + regional_start; let end = start + self.page_size_bytes(page_size); start..end @@ -155,7 +155,7 @@ impl PageNumber { pub(crate) fn page_size_bytes(&self, page_size: u32) -> u64 { let pages = 1u64 << self.page_order; - pages * (page_size as u64) + pages * u64::from(page_size) } } diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index 8c967e29..3e209dce 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -618,7 +618,7 @@ mod test { // Check that serialized size is as expected for a full region let max_region_pages = 1024 * 1024; let allocator = BuddyAllocator::new(max_region_pages, max_region_pages); - let max_region_pages = max_region_pages as u64; + let max_region_pages = u64::from(max_region_pages); // 2x because that's the integral of 1/2^x to account for all the 21 orders let allocated_state_bits = 2 * max_region_pages; // + u32 * 21 because of the length field diff --git a/src/tree_store/page_store/layout.rs b/src/tree_store/page_store/layout.rs index 7c8b2a16..68840216 100644 --- a/src/tree_store/page_store/layout.rs +++ b/src/tree_store/page_store/layout.rs @@ -34,10 +34,10 @@ impl RegionLayout { page_capacity: u32, page_size: u32, ) -> RegionLayout { - assert!(desired_usable_bytes <= page_capacity as u64 * page_size as u64); + assert!(desired_usable_bytes <= u64::from(page_capacity) * u64::from(page_size)); let header_pages = RegionHeader::header_pages_expensive(page_size, page_capacity); let num_pages = - round_up_to_multiple_of(desired_usable_bytes, page_size.into()) / page_size as u64; + round_up_to_multiple_of(desired_usable_bytes, page_size.into()) / u64::from(page_size); Self { num_pages: num_pages.try_into().unwrap(), @@ -57,7 +57,7 @@ impl RegionLayout { } pub(super) fn data_section(&self) -> Range { - let header_bytes = self.header_pages as u64 * self.page_size as u64; + let header_bytes = u64::from(self.header_pages) * u64::from(self.page_size); header_bytes..(header_bytes + self.usable_bytes()) } @@ -74,11 +74,11 @@ impl RegionLayout { } pub(super) fn len(&self) -> u64 { - (self.header_pages as u64) * (self.page_size as u64) + self.usable_bytes() + u64::from(self.header_pages) * u64::from(self.page_size) + self.usable_bytes() } pub(super) fn usable_bytes(&self) -> u64 { - self.page_size as u64 * self.num_pages as u64 + u64::from(self.page_size) * u64::from(self.num_pages) } } @@ -128,9 +128,9 @@ impl DatabaseLayout { region_max_data_pages_u32: u32, page_size_u32: u32, ) -> Self { - let page_size = page_size_u32 as u64; - let region_header_pages = region_header_pages_u32 as u64; - let region_max_data_pages = region_max_data_pages_u32 as u64; + let page_size = u64::from(page_size_u32); + let region_header_pages = u64::from(region_header_pages_u32); + let region_max_data_pages = u64::from(region_max_data_pages_u32); // Super-header let mut remaining = file_len - page_size; let full_region_size = (region_header_pages + region_max_data_pages) * page_size; @@ -231,13 +231,13 @@ impl DatabaseLayout { .as_ref() .map(RegionLayout::usable_bytes) .unwrap_or_default(); - (self.num_full_regions as u64) * self.full_region_layout.usable_bytes() + trailing + u64::from(self.num_full_regions) * self.full_region_layout.usable_bytes() + trailing } pub(super) fn region_base_address(&self, region: u32) -> u64 { assert!(region < self.num_regions()); - (self.full_region_layout.page_size() as u64) - + (region as u64) * self.full_region_layout.len() + u64::from(self.full_region_layout.page_size()) + + u64::from(region) * self.full_region_layout.len() } pub(super) fn region_layout(&self, region: u32) -> RegionLayout { diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index efacc619..cfeb8518 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -114,7 +114,10 @@ impl TransactionalMemory { assert!(page_size.is_power_of_two() && page_size >= DB_HEADER_SIZE); let region_size = requested_region_size.unwrap_or(MAX_USABLE_REGION_SPACE); - let region_size = min(region_size, (MAX_PAGE_INDEX as u64 + 1) * page_size as u64); + let region_size = min( + region_size, + (u64::from(MAX_PAGE_INDEX) + 1) * page_size as u64, + ); assert!(region_size.is_power_of_two()); let storage = PagedCachedFile::new( @@ -143,7 +146,7 @@ impl TransactionalMemory { // Make sure that there is enough room to allocate the region tracker into a page let size: u64 = max( MIN_DESIRED_USABLE_BYTES, - page_size as u64 * MIN_USABLE_PAGES as u64, + page_size as u64 * u64::from(MIN_USABLE_PAGES), ); let tracker_space = (page_size * ((region_tracker_required_bytes + page_size - 1) / page_size)) as u64; @@ -615,7 +618,7 @@ impl TransactionalMemory { .free(page_number.page_index, page_number.page_order); let address = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -640,7 +643,7 @@ impl TransactionalMemory { hint: PageHint, ) -> Result { let range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -683,7 +686,7 @@ impl TransactionalMemory { } let address_range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -773,7 +776,7 @@ impl TransactionalMemory { .mark_free(page.page_order, region_index); let address_range = page.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -835,7 +838,7 @@ impl TransactionalMemory { .insert(page_number); let address_range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -925,9 +928,9 @@ impl TransactionalMemory { fn grow(&self, state: &mut InMemoryState, required_order_allocation: u8) -> Result<()> { let layout = state.header.layout(); let required_growth = - 2u64.pow(required_order_allocation.into()) * state.header.page_size() as u64; - let max_region_size = (state.header.layout().full_region_layout().num_pages() as u64) - * (state.header.page_size() as u64); + 2u64.pow(required_order_allocation.into()) * u64::from(state.header.page_size()); + let max_region_size = u64::from(state.header.layout().full_region_layout().num_pages()) + * u64::from(state.header.page_size()); let next_desired_size = if layout.num_full_regions() > 0 { if let Some(trailing) = layout.trailing_region_layout() { if 2 * required_growth < max_region_size - trailing.usable_bytes() { @@ -979,7 +982,7 @@ impl TransactionalMemory { let state = self.state.lock().unwrap(); let mut count = 0u64; for i in 0..state.header.layout().num_regions() { - count += state.get_region(i).count_allocated_pages() as u64; + count += u64::from(state.get_region(i).count_allocated_pages()); } Ok(count) diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index bfeb20df..ef9bc505 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -175,13 +175,13 @@ impl Allocators { let page_size = header.page_size(); let region_header_size = header.layout().full_region_layout().get_header_pages() * page_size; - let region_size = header.layout().full_region_layout().num_pages() as u64 - * page_size as u64 - + region_header_size as u64; + let region_size = u64::from(header.layout().full_region_layout().num_pages()) + * u64::from(page_size) + + u64::from(region_header_size); let range = header.region_tracker().address_range( - page_size as u64, + page_size.into(), region_size, - region_header_size as u64, + region_header_size.into(), page_size, ); let len: usize = (range.end - range.start).try_into().unwrap(); @@ -215,12 +215,12 @@ impl Allocators { ) -> Result { let page_size = layout.full_region_layout().page_size(); let region_header_size = - (layout.full_region_layout().get_header_pages() * page_size) as u64; - let region_size = - layout.full_region_layout().num_pages() as u64 * page_size as u64 + region_header_size; + u64::from(layout.full_region_layout().get_header_pages() * page_size); + let region_size = u64::from(layout.full_region_layout().num_pages()) * u64::from(page_size) + + region_header_size; let mut region_tracker_mem = { let range = region_tracker_page.address_range( - page_size as u64, + page_size.into(), region_size, region_header_size, page_size, @@ -325,7 +325,7 @@ pub(crate) struct RegionHeader {} impl RegionHeader { pub(crate) fn header_pages_expensive(page_size: u32, pages_per_region: u32) -> u32 { - let page_size = page_size as u64; + let page_size = u64::from(page_size); // TODO: this is kind of expensive. Maybe it should be cached let allocator = BuddyAllocator::new(pages_per_region, pages_per_region); let result = 8u64 + allocator.to_vec().len() as u64; diff --git a/src/tree_store/page_store/xxh3.rs b/src/tree_store/page_store/xxh3.rs index c812babe..a98b54cf 100644 --- a/src/tree_store/page_store/xxh3.rs +++ b/src/tree_store/page_store/xxh3.rs @@ -462,14 +462,14 @@ fn hash64_0(secret: &[u8], seed: u64) -> u64 { } fn hash64_1to3(data: &[u8], secret: &[u8], seed: u64) -> u64 { - let x1 = data[0] as u32; - let x2 = data[data.len() >> 1] as u32; - let x3 = (*data.last().unwrap()) as u32; + let x1 = u32::from(data[0]); + let x2 = u32::from(data[data.len() >> 1]); + let x3 = u32::from(*data.last().unwrap()); #[allow(clippy::cast_possible_truncation)] let x4 = data.len() as u32; - let combined = ((x1 << 16) | (x2 << 24) | x3 | (x4 << 8)) as u64; - let mut result = (get_u32(secret, 0) ^ get_u32(secret, 1)) as u64; + let combined = u64::from((x1 << 16) | (x2 << 24) | x3 | (x4 << 8)); + let mut result = u64::from(get_u32(secret, 0) ^ get_u32(secret, 1)); result = result.wrapping_add(seed); result ^= combined; xxh64_avalanche(result) @@ -479,8 +479,8 @@ fn hash64_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u64 { #[allow(clippy::cast_possible_truncation)] let truncate_seed = seed as u32; seed ^= u64::from(truncate_seed.swap_bytes()) << 32; - let x1 = get_u32(data, 0) as u64; - let x2 = get_u32(&data[data.len() - 4..], 0) as u64; + let x1 = u64::from(get_u32(data, 0)); + let x2 = u64::from(get_u32(&data[data.len() - 4..], 0)); let x = x2 | (x1 << 32); let s = (get_u64(secret, 1) ^ get_u64(secret, 2)).wrapping_sub(seed); rrmxmx(x ^ s, data.len() as u64) @@ -595,24 +595,24 @@ fn hash64_large_generic( } fn hash128_0(secret: &[u8], seed: u64) -> u128 { - let high = (hash64_0(&secret[3 * 8..], seed) as u128) << 64; - let low = hash64_0(&secret[8..], seed) as u128; + let high = u128::from(hash64_0(&secret[3 * 8..], seed)) << 64; + let low = u128::from(hash64_0(&secret[8..], seed)); high | low } fn hash128_1to3(data: &[u8], secret: &[u8], seed: u64) -> u128 { - let x1 = data[0] as u32; - let x2 = data[data.len() >> 1] as u32; - let x3 = (*data.last().unwrap()) as u32; + let x1 = u32::from(data[0]); + let x2 = u32::from(data[data.len() >> 1]); + let x3 = u32::from(*data.last().unwrap()); #[allow(clippy::cast_possible_truncation)] let x4 = data.len() as u32; let combined_low = (x1 << 16) | (x2 << 24) | x3 | (x4 << 8); let combined_high: u64 = combined_low.swap_bytes().rotate_left(13).into(); - let s_low = ((get_u32(secret, 0) ^ get_u32(secret, 1)) as u64).wrapping_add(seed); - let s_high = ((get_u32(secret, 2) ^ get_u32(secret, 3)) as u64).wrapping_sub(seed); - let high = (xxh64_avalanche(combined_high ^ s_high) as u128) << 64; - let low = xxh64_avalanche(combined_low as u64 ^ s_low) as u128; + let s_low = u64::from(get_u32(secret, 0) ^ get_u32(secret, 1)).wrapping_add(seed); + let s_high = u64::from(get_u32(secret, 2) ^ get_u32(secret, 3)).wrapping_sub(seed); + let high = (u128::from(xxh64_avalanche(combined_high ^ s_high))) << 64; + let low = u128::from(xxh64_avalanche(u64::from(combined_low) ^ s_low)); high | low } @@ -620,13 +620,17 @@ fn hash128_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u128 { #[allow(clippy::cast_possible_truncation)] let truncate_seed = seed as u32; seed ^= u64::from(truncate_seed.swap_bytes()) << 32; - let x_low = get_u32(data, 0) as u64; - let x_high = u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as u64; + let x_low = u64::from(get_u32(data, 0)); + let x_high = u64::from(u32::from_le_bytes( + data[data.len() - 4..].try_into().unwrap(), + )); let x = x_low | (x_high << 32); let s = (get_u64(secret, 2) ^ get_u64(secret, 3)).wrapping_add(seed); - let mut y = (x ^ s) as u128; - y = y.wrapping_mul(PRIME64[0].wrapping_add((data.len() << 2) as u64) as u128); + let mut y = u128::from(x ^ s); + y = y.wrapping_mul(u128::from( + PRIME64[0].wrapping_add((data.len() << 2) as u64), + )); #[allow(clippy::cast_possible_truncation)] let mut r_low = y as u64; @@ -638,7 +642,7 @@ fn hash128_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u128 { r_low = xorshift(r_low, 28); r_high = xxh3_avalanche(r_high); - (r_high as u128) << 64 | r_low as u128 + u128::from(r_high) << 64 | u128::from(r_low) } fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -649,7 +653,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { let mixed = x_low ^ x_high ^ s_low; let x_high = x_high ^ s_high; - let result = (mixed as u128).wrapping_mul(PRIME64[0] as u128); + let result = u128::from(mixed).wrapping_mul(u128::from(PRIME64[0])); #[allow(clippy::cast_possible_truncation)] let mut r_low = result as u64; let mut r_high = (result >> 64) as u64; @@ -658,7 +662,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_high = r_high.wrapping_add((x_high & 0xFFFF_FFFF).wrapping_mul(PRIME32[1] - 1)); r_low ^= r_high.swap_bytes(); - let result2 = (r_low as u128).wrapping_mul(PRIME64[1] as u128); + let result2 = u128::from(r_low).wrapping_mul(u128::from(PRIME64[1])); #[allow(clippy::cast_possible_truncation)] let mut r2_low = result2 as u64; let mut r2_high = (result2 >> 64) as u64; @@ -666,7 +670,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { r2_low = xxh3_avalanche(r2_low); r2_high = xxh3_avalanche(r2_high); - (r2_high as u128) << 64 | r2_low as u128 + u128::from(r2_high) << 64 | u128::from(r2_low) } fn hash128_0to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -702,7 +706,7 @@ fn hash128_17to128(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_low = xxh3_avalanche(r_low); r_high = 0u64.wrapping_sub(xxh3_avalanche(r_high)); - (r_high as u128) << 64 | r_low as u128 + u128::from(r_high) << 64 | u128::from(r_low) } fn hash128_129to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -746,7 +750,7 @@ fn hash128_129to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_low = xxh3_avalanche(r_low); r_high = 0u64.wrapping_sub(xxh3_avalanche(r_high)); - (r_high as u128) << 64 | r_low as u128 + u128::from(r_high) << 64 | u128::from(r_low) } fn hash128_0to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -804,7 +808,7 @@ fn hash128_large_generic( !(PRIME64[1].wrapping_mul(data.len() as u64)), ); - (high as u128) << 64 | low as u128 + u128::from(high) << 64 | u128::from(low) } #[cfg(test)] From 32bca328289fc0f419c38147d3e42a5d26c41568 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:06:04 -0700 Subject: [PATCH 04/38] clippy::checked_conversions This avoids an assert, because on the next line we do the conversion with an unwrap, which has the same effect. --- src/complex_types.rs | 1 - src/lib.rs | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/complex_types.rs b/src/complex_types.rs index 47521b3d..6513339e 100644 --- a/src/complex_types.rs +++ b/src/complex_types.rs @@ -9,7 +9,6 @@ fn encode_varint_len(len: usize, output: &mut Vec) { output.push(254); output.extend_from_slice(&u16_len.to_le_bytes()) } else { - assert!(len <= u32::MAX as usize); let u32_len: u32 = len.try_into().unwrap(); output.push(255); output.extend_from_slice(&u32_len.to_le_bytes()) diff --git a/src/lib.rs b/src/lib.rs index 3b592667..8f96d06e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,6 +2,7 @@ #![deny( clippy::all, clippy::cast_lossless, + clippy::checked_conversions, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, From 8956d787f24e5282d0805d03460cc8606e091c26 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:07:00 -0700 Subject: [PATCH 05/38] clippy::cloned_instead_of_copied Copied should generally be used where possible, because all types which implement copy implement clone, but not all types which implement copy, so using `.copied()` gives the additional information that the copy is cheap. --- src/lib.rs | 1 + src/transaction_tracker.rs | 2 +- src/tree_store/page_store/bitmap.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 8f96d06e..a5e07c79 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,7 @@ clippy::all, clippy::cast_lossless, clippy::checked_conversions, + clippy::cloned_instead_of_copied, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, diff --git a/src/transaction_tracker.rs b/src/transaction_tracker.rs index 3b4d3b10..421acce1 100644 --- a/src/transaction_tracker.rs +++ b/src/transaction_tracker.rs @@ -243,6 +243,6 @@ impl TransactionTracker { .live_read_transactions .keys() .next() - .cloned() + .copied() } } diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 26c52e3f..a11d0bd8 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -533,7 +533,7 @@ mod test { } else { assert_eq!(allocated.len(), num_pages as usize); } - } else if let Some(to_free) = allocated.iter().choose(&mut rng).cloned() { + } else if let Some(to_free) = allocated.iter().choose(&mut rng).copied() { allocator.clear(to_free); allocated.remove(&to_free); } From e73ff5d32fb3ab131442ed1e662863228485d124 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:08:15 -0700 Subject: [PATCH 06/38] clippy::default_trait_access This one is just kind of annoying, forcing you to write `T::default()` instead of `Default::default()`, so just ignore it. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index a5e07c79..352edcd9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -#![allow(clippy::drop_non_drop)] +#![allow(clippy::drop_non_drop, clippy::default_trait_access)] #![deny( clippy::all, clippy::cast_lossless, From d8dc12f2aa9a1cd8855c917f36e39af796863972 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:10:11 -0700 Subject: [PATCH 07/38] clippy::doc_markdown Clippy wants items in markdown documentation to be inside backticks. Why not, since it makes them formatted nicely. --- src/db.rs | 4 ++-- src/lib.rs | 1 + src/multimap_table.rs | 2 +- src/table.rs | 2 +- src/tree_store/btree.rs | 2 +- src/tree_store/page_store/buddy_allocator.rs | 4 ++-- src/types.rs | 6 +++--- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/db.rs b/src/db.rs index face0bab..79d427b4 100644 --- a/src/db.rs +++ b/src/db.rs @@ -804,7 +804,7 @@ impl RepairSession { self.aborted } - /// Abort the repair process. The coorresponding call to [Builder::open] or [Builder::create] will return an error + /// Abort the repair process. The coorresponding call to [`Builder::open`] or [`Builder::create`] will return an error pub fn abort(&mut self) { self.aborted = true; } @@ -852,7 +852,7 @@ impl Builder { /// Set a callback which will be invoked periodically in the event that the database file needs /// to be repaired. /// - /// The [RepairSession] argument can be used to control the repair process. + /// The [`RepairSession`] argument can be used to control the repair process. /// /// If the database file needs repair, the callback will be invoked at least once. /// There is no upper limit on the number of times it may be called. diff --git a/src/lib.rs b/src/lib.rs index 352edcd9..5fd69acb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ #![allow(clippy::drop_non_drop, clippy::default_trait_access)] #![deny( clippy::all, + clippy::doc_markdown, clippy::cast_lossless, clippy::checked_conversions, clippy::cloned_instead_of_copied, diff --git a/src/multimap_table.rs b/src/multimap_table.rs index 81fc6da4..3f5f83df 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -524,7 +524,7 @@ impl Into for DynamicCollectionType { /// (when type = 2) root (8 bytes): sub tree root page number /// (when type = 2) checksum (16 bytes): sub tree checksum /// -/// NOTE: Even though the [PhantomData] is zero-sized, the inner data DST must be placed last. +/// NOTE: Even though the [`PhantomData`] is zero-sized, the inner data DST must be placed last. /// See [Exotically Sized Types](https://doc.rust-lang.org/nomicon/exotic-sizes.html#dynamically-sized-types-dsts) /// section of the Rustonomicon for more details. #[repr(transparent)] diff --git a/src/table.rs b/src/table.rs index 346e7b0b..8f1e99ac 100644 --- a/src/table.rs +++ b/src/table.rs @@ -223,7 +223,7 @@ impl<'txn, K: Key + 'static, V: MutInPlaceValue + 'static> Table<'txn, K, V> { /// /// If key is already present it is replaced /// - /// The returned reference will have length equal to value_length + /// The returned reference will have length equal to `value_length` pub fn insert_reserve<'a>( &mut self, key: impl Borrow>, diff --git a/src/tree_store/btree.rs b/src/tree_store/btree.rs index 3da044b9..0b9229dd 100644 --- a/src/tree_store/btree.rs +++ b/src/tree_store/btree.rs @@ -551,7 +551,7 @@ impl BtreeMut<'_, K, V> { impl<'a, K: Key + 'a, V: MutInPlaceValue + 'a> BtreeMut<'a, K, V> { /// Reserve space to insert a key-value pair - /// The returned reference will have length equal to value_length + /// The returned reference will have length equal to `value_length` // Return type has the same lifetime as &self, because the tree must not be modified until the mutable guard is dropped pub(crate) fn insert_reserve( &mut self, diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index 3e209dce..b0983bc7 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -463,7 +463,7 @@ impl BuddyAllocator { } } - /// data must have been initialized by Self::init_new(), and page_number must be free + /// data must have been initialized by `Self::init_new()`, and `page_number` must be free pub(crate) fn record_alloc(&mut self, page_number: u32, order: u8) { assert!(order <= self.max_order); // Only record the allocation for the actual page @@ -492,7 +492,7 @@ impl BuddyAllocator { } } - /// data must have been initialized by Self::init_new() + /// data must have been initialized by `Self::init_new()` pub(crate) fn free(&mut self, page_number: u32, order: u8) { debug_assert!(self.get_order_free_mut(order).get(page_number)); debug_assert!( diff --git a/src/types.rs b/src/types.rs index 640322dc..22d09f44 100644 --- a/src/types.rs +++ b/src/types.rs @@ -72,7 +72,7 @@ impl TypeName { } pub trait Value: Debug { - /// SelfType<'a> must be the same type as Self with all lifetimes replaced with 'a + /// `SelfType<'a>` must be the same type as Self with all lifetimes replaced with 'a type SelfType<'a>: Debug + 'a where Self: 'a; @@ -101,9 +101,9 @@ pub trait Value: Debug { } /// Implementing this trait indicates that the type can be mutated in-place as a &mut [u8]. -/// This enables the .insert_reserve() method on Table +/// This enables the `.insert_reserve()` method on Table pub trait MutInPlaceValue: Value { - /// The base type such that &mut [u8] can be safely transmuted to &mut BaseRefType + /// The base type such that &mut [u8] can be safely transmuted to `&mut BaseRefType` type BaseRefType: Debug + ?Sized; // Initialize `data` to a valid value. This method will be called (at some point, not necessarily immediately) From bba2cd5ec5cb83a9b7231026a0a8791a4ee7f322 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:11:26 -0700 Subject: [PATCH 08/38] Ignore benchmark and test temporary files --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index ddb7228e..94e46036 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,6 @@ Cargo.lock # Profiling perf.data* flamegraph.svg + +# benchmark and test temporary files +/.tmp* From 40f8cb5b707feeb52e092d8de1b681d3e9fe667d Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:12:50 -0700 Subject: [PATCH 09/38] clippy::explicit_iter_loop `for x in foo.iter()` is equivalent to `for x in &foo`, which is shorter and more idiomatic --- src/lib.rs | 1 + src/multimap_table.rs | 2 +- src/tree_store/page_store/bitmap.rs | 6 +++--- src/tree_store/page_store/buddy_allocator.rs | 10 +++++----- src/tree_store/page_store/region.rs | 4 ++-- src/tree_store/page_store/savepoint.rs | 4 ++-- 6 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 5fd69acb..d182fb12 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,6 +2,7 @@ #![deny( clippy::all, clippy::doc_markdown, + clippy::explicit_iter_loop, clippy::cast_lossless, clippy::checked_conversions, clippy::cloned_instead_of_copied, diff --git a/src/multimap_table.rs b/src/multimap_table.rs index 3f5f83df..75fa1c5c 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -850,7 +850,7 @@ impl<'a, V: Key + 'static> Drop for MultimapValue<'a, V> { drop(mem::take(&mut self.inner)); if !self.free_on_drop.is_empty() { let mut freed_pages = self.freed_pages.as_ref().unwrap().lock().unwrap(); - for page in self.free_on_drop.iter() { + for page in &self.free_on_drop { if !self.mem.as_ref().unwrap().free_if_uncommitted(*page) { freed_pages.push(*page); } diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index a11d0bd8..2b0de819 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -71,14 +71,14 @@ impl BtreeBitmap { let vecs: Vec> = self.heights.iter().map(|x| x.to_vec()).collect(); let mut data_offset = END_OFFSETS + self.heights.len() * size_of::(); let end_metadata = data_offset; - for serialized in vecs.iter() { + for serialized in &vecs { data_offset += serialized.len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } assert_eq!(end_metadata, result.len()); - for serialized in vecs.iter() { + for serialized in &vecs { result.extend(serialized); } @@ -303,7 +303,7 @@ impl U64GroupedBitmap { pub fn to_vec(&self) -> Vec { let mut result = vec![]; result.extend(self.len.to_le_bytes()); - for x in self.data.iter() { + for x in &self.data { result.extend(x.to_le_bytes()); } result diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index b0983bc7..e1b05114 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -87,21 +87,21 @@ impl BuddyAllocator { let mut data_offset = result.len() + (self.max_order as usize + 1) * 2 * size_of::(); let end_metadata = data_offset; - for order in self.free.iter() { + for order in &self.free { data_offset += order.to_vec().len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } - for order in self.allocated.iter() { + for order in &self.allocated { data_offset += order.to_vec().len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } assert_eq!(end_metadata, result.len()); - for order in self.free.iter() { + for order in &self.free { result.extend(&order.to_vec()); } - for order in self.allocated.iter() { + for order in &self.allocated { result.extend(&order.to_vec()); } @@ -280,7 +280,7 @@ impl BuddyAllocator { } let mut check_result = HashSet::new(); - for page in allocated_check.iter() { + for page in &allocated_check { check_result.extend(page.to_order0()); } assert_eq!(free_check, check_result); diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index ef9bc505..44b88e43 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -41,7 +41,7 @@ impl RegionTracker { let allocator_len: u32 = self.order_trackers[0].to_vec().len().try_into().unwrap(); result.extend(orders.to_le_bytes()); result.extend(allocator_len.to_le_bytes()); - for order in self.order_trackers.iter() { + for order in &self.order_trackers { result.extend(&order.to_vec()); } result @@ -165,7 +165,7 @@ impl Allocators { // Ignore the region tracker because it is an optimistic cache, and so may not match // between repairs of the allocators let mut result = 0; - for allocator in self.region_allocators.iter() { + for allocator in &self.region_allocators { result ^= xxh3_checksum(&allocator.to_vec()); } result diff --git a/src/tree_store/page_store/savepoint.rs b/src/tree_store/page_store/savepoint.rs index 72a4b2e9..1690b146 100644 --- a/src/tree_store/page_store/savepoint.rs +++ b/src/tree_store/page_store/savepoint.rs @@ -138,7 +138,7 @@ impl<'a> SerializedSavepoint<'a> { .unwrap() .to_le_bytes(), ); - for region in savepoint.regional_allocators.iter() { + for region in &savepoint.regional_allocators { assert_eq!(savepoint.regional_allocators[0].len(), region.len()); } result.extend( @@ -147,7 +147,7 @@ impl<'a> SerializedSavepoint<'a> { .to_le_bytes(), ); - for region in savepoint.regional_allocators.iter() { + for region in &savepoint.regional_allocators { result.extend(region); } Self::Owned(result) From 6d1e4ddd9f42e788e2fb81a3ef59055b49cb406b Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:14:40 -0700 Subject: [PATCH 10/38] Allow clippy::if_not_else MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This complains if you do `if !x { … } else { … }`, on the basis that you could avoid the `!` and just switch the bodies of the if and else. I like this one, but it's subjective and creates a lot of code churn, so I allowed it. If you like it, think about removing it from the allow block. --- src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index d182fb12..23f6c263 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,8 @@ -#![allow(clippy::drop_non_drop, clippy::default_trait_access)] +#![allow( + clippy::drop_non_drop, + clippy::default_trait_access, + clippy::if_not_else +)] #![deny( clippy::all, clippy::doc_markdown, From 7b6f0c1bf5a5353b0e176897d2d72c736827dac5 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:16:56 -0700 Subject: [PATCH 11/38] Allow clippy::inline_always Clippy doesn't like this and suggests always profiling first, but I have no idea if these instances are correct or not, so allow them. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 23f6c263..d028e459 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,8 @@ #![allow( clippy::drop_non_drop, clippy::default_trait_access, - clippy::if_not_else + clippy::if_not_else, + clippy::inline_always )] #![deny( clippy::all, From 350bff584af0d2596e9b424dc01b6b84db445694 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:18:22 -0700 Subject: [PATCH 12/38] Allow clippy::iter_not_returning_iterator Clippy complains if the return type from a function called `iter` does not implement `Iterator`. This is an API break or a new implementation of `Iterator`, so just allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index d028e459..b0416e76 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,8 @@ clippy::drop_non_drop, clippy::default_trait_access, clippy::if_not_else, - clippy::inline_always + clippy::inline_always, + clippy::iter_not_returning_iterator )] #![deny( clippy::all, From 0ac4c459a3b7f1b779def5672bde028817dc6f58 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:21:02 -0700 Subject: [PATCH 13/38] Allow clippy::let_underscore_drop Clippy doesn't like if you use `let _ = x` when `x` implements `Drop`, since it's not necessarily clear that `_` will invoke `Drop`. (Unlike an `_`-prefixed identifier, which will not drop.) This is kind of subjective, so allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index b0416e76..b356e27e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,7 +3,8 @@ clippy::default_trait_access, clippy::if_not_else, clippy::inline_always, - clippy::iter_not_returning_iterator + clippy::iter_not_returning_iterator, + clippy::let_underscore_drop )] #![deny( clippy::all, From 184710d1f0723169ce5c1cdd5425fc7a6a9b8ba7 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:23:07 -0700 Subject: [PATCH 14/38] Deny clippy::map_unwrap_or Clippy prefers `map_or` to `map + unwrap_or`. Why not. --- src/lib.rs | 3 ++- src/transactions.rs | 3 +-- src/tree_store/btree.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b356e27e..162afee6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,7 +17,8 @@ clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, - clippy::disallowed_methods + clippy::disallowed_methods, + clippy::map_unwrap_or )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/transactions.rs b/src/transactions.rs index 6f59dada..17c97d88 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -1065,8 +1065,7 @@ impl WriteTransaction { let free_until_transaction = self .transaction_tracker .oldest_live_read_transaction() - .map(|x| x.next()) - .unwrap_or(self.transaction_id); + .map_or(self.transaction_id, |x| x.next()); let user_root = self .tables diff --git a/src/tree_store/btree.rs b/src/tree_store/btree.rs index 0b9229dd..991bfad6 100644 --- a/src/tree_store/btree.rs +++ b/src/tree_store/btree.rs @@ -611,7 +611,7 @@ impl RawBtree { } pub(crate) fn len(&self) -> Result { - Ok(self.root.map(|x| x.length).unwrap_or(0)) + Ok(self.root.map_or(0, |x| x.length)) } pub(crate) fn verify_checksum(&self) -> Result { @@ -809,7 +809,7 @@ impl Btree { } pub(crate) fn len(&self) -> Result { - Ok(self.root.map(|x| x.length).unwrap_or(0)) + Ok(self.root.map_or(0, |x| x.length)) } pub(crate) fn stats(&self) -> Result { From 8c89ad21ac19cf2e022d6efc0fdea4ab2bb97eb5 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:23:55 -0700 Subject: [PATCH 15/38] Allow clippy::missing_errors_doc and clippy::missing_panics_doc Clippy wants you to write docs about panics and errors. Probably a good idea, but who has time for that. Allow it. --- src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 162afee6..3426bf34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,9 @@ clippy::if_not_else, clippy::inline_always, clippy::iter_not_returning_iterator, - clippy::let_underscore_drop + clippy::let_underscore_drop, + clippy::missing_errors_doc, + clippy::missing_panics_doc )] #![deny( clippy::all, From 5133f2ce58c328c2804d3bcf38e22b22de55944a Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:25:00 -0700 Subject: [PATCH 16/38] Deny clippy::match_wildcard_for_single_variants Replace a `_` match for the single variant it matches. Seems reasonable. --- src/lib.rs | 3 ++- src/tree_store/btree_iters.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 3426bf34..300b3a9b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,7 +20,8 @@ clippy::cast_precision_loss, clippy::cast_sign_loss, clippy::disallowed_methods, - clippy::map_unwrap_or + clippy::map_unwrap_or, + clippy::match_wildcard_for_single_variants )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/tree_store/btree_iters.rs b/src/tree_store/btree_iters.rs index 3675de98..5a15bf97 100644 --- a/src/tree_store/btree_iters.rs +++ b/src/tree_store/btree_iters.rs @@ -138,7 +138,7 @@ impl RangeIterState { .entry_ranges(*entry)?; Some(EntryGuard::new(page.clone(), key, value)) } - _ => None, + Internal { .. } => None, } } } From 1b00918df70e0ab80b6aa572177b5224feceae66 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:26:05 -0700 Subject: [PATCH 17/38] Allow clippy::module_name_repetitions Clippy doesn't like repitition of module names inside types. Who cares. Allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 300b3a9b..9a0517ca 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,8 @@ clippy::iter_not_returning_iterator, clippy::let_underscore_drop, clippy::missing_errors_doc, - clippy::missing_panics_doc + clippy::missing_panics_doc, + clippy::module_name_repetitions )] #![deny( clippy::all, From 7b38788012cf67f70bbad3ffbb08b5bbdd91ce4b Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:26:55 -0700 Subject: [PATCH 18/38] Allow clippy::needless_pass_by_value Clippy complains if you pass a type by value when it isn't consumed, so it could be passed by reference. I think this should be evaluated on a case-by-case basis, depending on ergonomics, so allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 9a0517ca..b71cb371 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,7 +7,8 @@ clippy::let_underscore_drop, clippy::missing_errors_doc, clippy::missing_panics_doc, - clippy::module_name_repetitions + clippy::module_name_repetitions, + clippy::needless_pass_by_value )] #![deny( clippy::all, From 336cfe17456be000c1b1918792d33f704f7fc094 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:28:26 -0700 Subject: [PATCH 19/38] Allow clippy::option_option `Option>` is clearly an insane type, but I dare not touch it, because I have no idea what it's supposed to mean, so allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index b71cb371..0e7032fb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,8 @@ clippy::missing_errors_doc, clippy::missing_panics_doc, clippy::module_name_repetitions, - clippy::needless_pass_by_value + clippy::needless_pass_by_value, + clippy::option_option )] #![deny( clippy::all, From 94e04f557a82968b8109565c927d45affe7525f6 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:29:29 -0700 Subject: [PATCH 20/38] Deny clippy::range_plus_one Using `a..=b` is clearer and shorter than `a..(b + 1)`. --- src/lib.rs | 3 ++- src/multimap_table.rs | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0e7032fb..2df92a8d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,7 +24,8 @@ clippy::cast_sign_loss, clippy::disallowed_methods, clippy::map_unwrap_or, - clippy::match_wildcard_for_single_variants + clippy::match_wildcard_for_single_variants, + clippy::range_plus_one )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/multimap_table.rs b/src/multimap_table.rs index 75fa1c5c..a2a42ca7 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -590,7 +590,7 @@ impl DynamicCollection { fn as_subtree(&self) -> BtreeHeader { assert!(matches!(self.collection_type(), SubtreeV2)); BtreeHeader::from_le_bytes( - self.data[1..(1 + BtreeHeader::serialized_size())] + self.data[1..=BtreeHeader::serialized_size()] .try_into() .unwrap(), ) @@ -726,7 +726,7 @@ impl UntypedDynamicCollection { fn as_subtree(&self) -> BtreeHeader { assert!(matches!(self.collection_type(), SubtreeV2)); BtreeHeader::from_le_bytes( - self.data[1..(1 + BtreeHeader::serialized_size())] + self.data[1..=BtreeHeader::serialized_size()] .try_into() .unwrap(), ) From ada26a65672faa0239a266c395ce22324fd718ce Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:32:40 -0700 Subject: [PATCH 21/38] Deny clippy::type_repetition_in_bounds In these `where` clauses, `'b` outlives `'a`, so it's enough to declare that `Self: 'b`. --- src/complex_types.rs | 1 - src/lib.rs | 3 ++- src/multimap_table.rs | 1 - src/transaction_tracker.rs | 1 - src/tree_store/page_store/savepoint.rs | 1 - src/tree_store/table_tree.rs | 2 -- src/tree_store/table_tree_base.rs | 1 - src/types.rs | 10 ---------- 8 files changed, 2 insertions(+), 18 deletions(-) diff --git a/src/complex_types.rs b/src/complex_types.rs index 6513339e..fb265abe 100644 --- a/src/complex_types.rs +++ b/src/complex_types.rs @@ -66,7 +66,6 @@ impl Value for Vec { fn as_bytes<'a, 'b: 'a>(value: &'a Vec>) -> Vec where - Self: 'a, Self: 'b, { let mut result = if let Some(width) = T::fixed_width() { diff --git a/src/lib.rs b/src/lib.rs index 2df92a8d..5cc1b5de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,8 @@ clippy::disallowed_methods, clippy::map_unwrap_or, clippy::match_wildcard_for_single_variants, - clippy::range_plus_one + clippy::range_plus_one, + clippy::type_repetition_in_bounds )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/multimap_table.rs b/src/multimap_table.rs index a2a42ca7..d70b58e1 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -562,7 +562,6 @@ impl Value for &DynamicCollection { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { &value.data diff --git a/src/transaction_tracker.rs b/src/transaction_tracker.rs index 421acce1..9eb00b47 100644 --- a/src/transaction_tracker.rs +++ b/src/transaction_tracker.rs @@ -65,7 +65,6 @@ impl Value for SavepointId { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b, { value.0.to_le_bytes() diff --git a/src/tree_store/page_store/savepoint.rs b/src/tree_store/page_store/savepoint.rs index 1690b146..f42b848a 100644 --- a/src/tree_store/page_store/savepoint.rs +++ b/src/tree_store/page_store/savepoint.rs @@ -276,7 +276,6 @@ impl<'data> Value for SerializedSavepoint<'data> { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b, { value.data() diff --git a/src/tree_store/table_tree.rs b/src/tree_store/table_tree.rs index b13a8610..45ed34a2 100644 --- a/src/tree_store/table_tree.rs +++ b/src/tree_store/table_tree.rs @@ -51,7 +51,6 @@ impl Value for FreedTableKey { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> [u8; 2 * size_of::()] where - Self: 'a, Self: 'b, { let mut result = [0u8; 2 * size_of::()]; @@ -147,7 +146,6 @@ impl Value for FreedPageList<'_> { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'b [u8] where - Self: 'a, Self: 'b, { value.data diff --git a/src/tree_store/table_tree_base.rs b/src/tree_store/table_tree_base.rs index 19bb13c3..0a8de2e0 100644 --- a/src/tree_store/table_tree_base.rs +++ b/src/tree_store/table_tree_base.rs @@ -446,7 +446,6 @@ impl Value for InternalTableDefinition { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { let mut result = vec![value.get_type().into()]; diff --git a/src/types.rs b/src/types.rs index 22d09f44..d90fe699 100644 --- a/src/types.rs +++ b/src/types.rs @@ -93,7 +93,6 @@ pub trait Value: Debug { /// Serialize the value to a slice fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b; /// Globally unique identifier for this type @@ -152,7 +151,6 @@ impl Value for () { fn as_bytes<'a, 'b: 'a>(_: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { &[] @@ -194,7 +192,6 @@ impl Value for bool { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { match value { @@ -241,7 +238,6 @@ impl Value for Option { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { let mut result = vec![0]; @@ -299,7 +295,6 @@ impl Value for &[u8] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { value @@ -337,7 +332,6 @@ impl Value for &[u8; N] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8; N] where - Self: 'a, Self: 'b, { value @@ -390,7 +384,6 @@ impl Value for [T; N] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { if let Some(fixed) = T::fixed_width() { @@ -470,7 +463,6 @@ impl Value for &str { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a str where - Self: 'a, Self: 'b, { value @@ -510,7 +502,6 @@ impl Value for String { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a str where - Self: 'a, Self: 'b, { value.as_str() @@ -546,7 +537,6 @@ impl Value for char { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> [u8; 3] where - Self: 'a, Self: 'b, { let bytes = u32::from(*value).to_le_bytes(); From 4afb26d85eb732186ef7361edb38dadefc81686d Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:34:57 -0700 Subject: [PATCH 22/38] Deny clippy::uninlined_format_args This is nice, since it reminds you where you can inline things in format strings. --- src/error.rs | 2 +- src/lib.rs | 3 ++- src/table.rs | 2 +- src/tree_store/btree_base.rs | 8 ++++---- src/tree_store/page_store/bitmap.rs | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/error.rs b/src/error.rs index b9e6ce43..b533ae1f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -109,7 +109,7 @@ impl TableError { | TableError::TypeDefinitionChanged { .. } | TableError::TableDoesNotExist(_) | TableError::TableAlreadyOpen(_, _) => { - StorageError::Corrupted(format!("{}: {}", msg, &self)) + StorageError::Corrupted(format!("{msg}: {self}")) } TableError::Storage(storage) => storage, } diff --git a/src/lib.rs b/src/lib.rs index 5cc1b5de..783c2107 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -26,7 +26,8 @@ clippy::map_unwrap_or, clippy::match_wildcard_for_single_variants, clippy::range_plus_one, - clippy::type_repetition_in_bounds + clippy::type_repetition_in_bounds, + clippy::uninlined_format_args )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/table.rs b/src/table.rs index 8f1e99ac..428df95b 100644 --- a/src/table.rs +++ b/src/table.rs @@ -304,7 +304,7 @@ fn debug_helper( first: Result, AccessGuard)>>, last: Result, AccessGuard)>>, ) -> std::fmt::Result { - write!(f, "Table [ name: \"{}\", ", name)?; + write!(f, "Table [ name: \"{name}\", ")?; if let Ok(len) = len { if len == 0 { write!(f, "No entries")?; diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index 2540a764..56876a02 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -300,9 +300,9 @@ impl<'a> LeafAccessor<'a> { pub(super) fn print_node(&self, include_value: bool) { let mut i = 0; while let Some(entry) = self.entry(i) { - eprint!(" key_{}={:?}", i, K::from_bytes(entry.key())); + eprint!(" key_{i}={:?}", K::from_bytes(entry.key())); if include_value { - eprint!(" value_{}={:?}", i, V::from_bytes(entry.value())); + eprint!(" value_{i}={:?}", V::from_bytes(entry.value())); } i += 1; } @@ -1129,8 +1129,8 @@ impl<'a: 'b, 'b, T: Page + 'a> BranchAccessor<'a, 'b, T> { for i in 0..(self.count_children() - 1) { if let Some(child) = self.child_page(i + 1) { let key = self.key(i).unwrap(); - eprint!(" key_{}={:?}", i, K::from_bytes(key)); - eprint!(" child_{}={:?}", i + 1, child); + eprint!(" key_{i}={:?}", K::from_bytes(key)); + eprint!(" child_{}={child:?}", i + 1); } } eprint!("]"); diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 2b0de819..058395ab 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -403,7 +403,7 @@ impl U64GroupedBitmap { } pub fn clear(&mut self, bit: u32) { - assert!(bit < self.len, "{} must be less than {}", bit, self.len); + assert!(bit < self.len, "{bit} must be less than {}", self.len); let (index, bit_index) = self.data_index_of(bit); self.data[index] &= !Self::select_mask(bit_index); } From b29613c3f67dbc89115f17b21c116c28d92a5f68 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:35:47 -0700 Subject: [PATCH 23/38] Allow clippy::unnecessary_wraps This should probably be denied, but in some cases, a function returns `Result` or `Option` for API compatibility reasons, and I didn't want to wade into that. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 783c2107..8986c635 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,7 +9,8 @@ clippy::missing_panics_doc, clippy::module_name_repetitions, clippy::needless_pass_by_value, - clippy::option_option + clippy::option_option, + clippy::unnecessary_wraps )] #![deny( clippy::all, From 39f27743400ece2f9e5fd8cc066ff15670a68e74 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:38:08 -0700 Subject: [PATCH 24/38] Deny clippy::semicolon_if_nothing_returned You can omit a semicolon after a `()` expression, but seems fine to deny it for consistent formatting. Add an exception for this lunacy: fn from_bytes<'a>(_data: &'a [u8]) -> () where Self: 'a, { () } --- src/complex_types.rs | 4 ++-- src/lib.rs | 3 ++- src/transactions.rs | 8 ++++---- src/tree_store/btree_base.rs | 2 +- src/tree_store/page_store/page_manager.rs | 2 +- src/tree_store/table_tree_base.rs | 4 ++-- src/types.rs | 2 +- 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/complex_types.rs b/src/complex_types.rs index fb265abe..258937f1 100644 --- a/src/complex_types.rs +++ b/src/complex_types.rs @@ -7,11 +7,11 @@ fn encode_varint_len(len: usize, output: &mut Vec) { } else if len <= u16::MAX.into() { let u16_len: u16 = len.try_into().unwrap(); output.push(254); - output.extend_from_slice(&u16_len.to_le_bytes()) + output.extend_from_slice(&u16_len.to_le_bytes()); } else { let u32_len: u32 = len.try_into().unwrap(); output.push(255); - output.extend_from_slice(&u32_len.to_le_bytes()) + output.extend_from_slice(&u32_len.to_le_bytes()); } } diff --git a/src/lib.rs b/src/lib.rs index 8986c635..34e426ee 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,8 @@ clippy::match_wildcard_for_single_variants, clippy::range_plus_one, clippy::type_repetition_in_bounds, - clippy::uninlined_format_args + clippy::uninlined_format_args, + clippy::semicolon_if_nothing_returned )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/transactions.rs b/src/transactions.rs index 17c97d88..c990d852 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -533,7 +533,7 @@ impl WriteTransaction { println!("Tables"); for p in table_pages { all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } let system_table_allocators = self @@ -550,7 +550,7 @@ impl WriteTransaction { println!("System tables"); for p in system_table_pages { all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } println!("Free table"); @@ -558,7 +558,7 @@ impl WriteTransaction { for p in freed_iter { let p = p.unwrap(); all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } } println!("Pending free (i.e. in freed table)"); @@ -574,7 +574,7 @@ impl WriteTransaction { for i in 0..value.len() { let p = value.get(i); all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } } if !all_allocated.is_empty() { diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index 56876a02..9d60a08b 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -510,7 +510,7 @@ impl<'a, 'b> LeafBuilder<'a, 'b> { pub(super) fn push(&mut self, key: &'a [u8], value: &'a [u8]) { self.total_key_bytes += key.len(); self.total_value_bytes += value.len(); - self.pairs.push((key, value)) + self.pairs.push((key, value)); } pub(super) fn push_all_except( diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index cfeb8518..1496c861 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -281,7 +281,7 @@ impl TransactionalMemory { } pub(crate) fn clear_read_cache(&self) { - self.storage.invalidate_cache_all() + self.storage.invalidate_cache_all(); } pub(crate) fn clear_cache_and_reload(&mut self) -> Result { diff --git a/src/tree_store/table_tree_base.rs b/src/tree_store/table_tree_base.rs index 0a8de2e0..d6d643af 100644 --- a/src/tree_store/table_tree_base.rs +++ b/src/tree_store/table_tree_base.rs @@ -462,14 +462,14 @@ impl Value for InternalTableDefinition { result.extend_from_slice(&u32::try_from(fixed).unwrap().to_le_bytes()); } else { result.push(0); - result.extend_from_slice(&[0; size_of::()]) + result.extend_from_slice(&[0; size_of::()]); } if let Some(fixed) = value.private_get_fixed_value_size() { result.push(1); result.extend_from_slice(&u32::try_from(fixed).unwrap().to_le_bytes()); } else { result.push(0); - result.extend_from_slice(&[0; size_of::()]) + result.extend_from_slice(&[0; size_of::()]); } result.extend_from_slice( &u32::try_from(value.private_get_key_alignment()) diff --git a/src/types.rs b/src/types.rs index d90fe699..ab244a9b 100644 --- a/src/types.rs +++ b/src/types.rs @@ -141,7 +141,7 @@ impl Value for () { Some(0) } - #[allow(clippy::unused_unit)] + #[allow(clippy::unused_unit, clippy::semicolon_if_nothing_returned)] fn from_bytes<'a>(_data: &'a [u8]) -> () where Self: 'a, From 9457f4f38e0e4318296abaec6efd290832b588f7 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:39:52 -0700 Subject: [PATCH 25/38] Allow clippy::too_many_lines You're not the boss of me. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 34e426ee..50b2db2a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,8 @@ clippy::module_name_repetitions, clippy::needless_pass_by_value, clippy::option_option, - clippy::unnecessary_wraps + clippy::unnecessary_wraps, + clippy::too_many_lines )] #![deny( clippy::all, From 29cbee07d2a367265ed2b3616985be2cad0e2d3c Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:40:26 -0700 Subject: [PATCH 26/38] Allow clippy::similar_names This is just silly. I'll make similar names if I want to. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 50b2db2a..c7096053 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,7 +11,8 @@ clippy::needless_pass_by_value, clippy::option_option, clippy::unnecessary_wraps, - clippy::too_many_lines + clippy::too_many_lines, + clippy::similar_names )] #![deny( clippy::all, From 39de8d7bbd51bde6154ed0f944d828d2547d2f1a Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:40:59 -0700 Subject: [PATCH 27/38] Allow clippy::wildcard_imports More nanny-state bullshit. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index c7096053..8c361246 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,7 +12,8 @@ clippy::option_option, clippy::unnecessary_wraps, clippy::too_many_lines, - clippy::similar_names + clippy::similar_names, + clippy::wildcard_imports )] #![deny( clippy::all, From 97c5eceb30f14ff33f69484f3b9f1610761aa855 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:41:55 -0700 Subject: [PATCH 28/38] Allow clippy::unreadable_literal I do actually think that most of these could use some `_` separators, the decimal literals should be in groups of three, and the hex should be maybe every 4 bytes, or something like that, but this is subjective. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 8c361246..66d293e3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,7 +13,8 @@ clippy::unnecessary_wraps, clippy::too_many_lines, clippy::similar_names, - clippy::wildcard_imports + clippy::wildcard_imports, + clippy::unreadable_literal )] #![deny( clippy::all, From 7ddfcae051e57b8b7060f21d9ae68597b59fd7de Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:43:43 -0700 Subject: [PATCH 29/38] Deny clippy::redundant_else These seem reasonable to remove. These else blocks are after an if block which either returns or breaks the loop, so these get rid of some nesting. --- src/db.rs | 4 ++-- src/lib.rs | 3 ++- src/tree_store/page_store/cached_file.rs | 6 +++--- src/tree_store/page_store/page_manager.rs | 9 ++++----- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/db.rs b/src/db.rs index 79d427b4..1ff1ffde 100644 --- a/src/db.rs +++ b/src/db.rs @@ -453,9 +453,9 @@ impl Database { if !progress { break; - } else { - compacted = true; } + + compacted = true; } Ok(compacted) diff --git a/src/lib.rs b/src/lib.rs index 66d293e3..e258ceb2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -33,7 +33,8 @@ clippy::range_plus_one, clippy::type_repetition_in_bounds, clippy::uninlined_format_args, - clippy::semicolon_if_nothing_returned + clippy::semicolon_if_nothing_returned, + clippy::redundant_else )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index 0999a3b0..8fc13092 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -92,10 +92,10 @@ impl LRUWriteCache { if let Some((k, v)) = self.cache.pop_lowest_priority() { if let Some(v_inner) = v { return Some((k, v_inner)); - } else { - // Value is borrowed by take_value(). We can't evict it, so put it back. - self.cache.insert(k, v); } + + // Value is borrowed by take_value(). We can't evict it, so put it back. + self.cache.insert(k, v); } else { break; } diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index 1496c861..f4bcbf79 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -892,12 +892,11 @@ impl TransactionalMemory { page, required_order, ))); - } else { - // Mark the region, if it's full - state - .get_region_tracker_mut() - .mark_full(required_order, candidate_region); } + // Mark the region, if it's full + state + .get_region_tracker_mut() + .mark_full(required_order, candidate_region); } } From e807a45886d798ee3547047c3977376a8a88184e Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:45:35 -0700 Subject: [PATCH 30/38] Deny clippy::unused_self It seems clearer to avoid taking `&self` if it isn't used, and these aren't public APIs, so it's backwards compatible. --- src/lib.rs | 3 ++- src/tree_store/page_store/bitmap.rs | 10 +++++----- src/tree_store/page_store/page_manager.rs | 10 ++++------ 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e258ceb2..34414d2a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,7 +34,8 @@ clippy::type_repetition_in_bounds, clippy::uninlined_format_args, clippy::semicolon_if_nothing_returned, - clippy::redundant_else + clippy::redundant_else, + clippy::unused_self )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 058395ab..ec13ddc5 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -327,7 +327,7 @@ impl U64GroupedBitmap { Self { len, data } } - fn data_index_of(&self, bit: u32) -> (usize, usize) { + fn data_index_of(bit: u32) -> (usize, usize) { ((bit as usize) / 64, (bit as usize) % 64) } @@ -363,7 +363,7 @@ impl U64GroupedBitmap { fn first_unset(&self, start_bit: u32, end_bit: u32) -> Option { assert_eq!(end_bit, (start_bit - start_bit % 64) + 64); - let (index, bit) = self.data_index_of(start_bit); + let (index, bit) = Self::data_index_of(start_bit); let mask = (1 << bit) - 1; let group = self.data[index]; let group = group | mask; @@ -386,7 +386,7 @@ impl U64GroupedBitmap { pub fn get(&self, bit: u32) -> bool { assert!(bit < self.len); - let (index, bit_index) = self.data_index_of(bit); + let (index, bit_index) = Self::data_index_of(bit); let group = self.data[index]; group & U64GroupedBitmap::select_mask(bit_index) != 0 } @@ -394,7 +394,7 @@ impl U64GroupedBitmap { // Returns true iff the bit's group is all set pub fn set(&mut self, bit: u32) -> bool { assert!(bit < self.len); - let (index, bit_index) = self.data_index_of(bit); + let (index, bit_index) = Self::data_index_of(bit); let mut group = self.data[index]; group |= Self::select_mask(bit_index); self.data[index] = group; @@ -404,7 +404,7 @@ impl U64GroupedBitmap { pub fn clear(&mut self, bit: u32) { assert!(bit < self.len, "{bit} must be less than {}", self.len); - let (index, bit_index) = self.data_index_of(bit); + let (index, bit_index) = Self::data_index_of(bit); self.data[index] &= !Self::select_mask(bit_index); } } diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index f4bcbf79..0d9943a3 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -504,7 +504,7 @@ impl TransactionalMemory { let mut state = self.state.lock().unwrap(); // Trim surplus file space, before finalizing the commit let shrunk = if allow_trim { - self.try_shrink(&mut state)? + Self::try_shrink(&mut state)? } else { false }; @@ -810,13 +810,12 @@ impl TransactionalMemory { let mut state = self.state.lock().unwrap(); let page_number = if let Some(page_number) = - self.allocate_helper_retry(&mut state, required_order, lowest)? + Self::allocate_helper_retry(&mut state, required_order, lowest)? { page_number } else { self.grow(&mut state, required_order)?; - self.allocate_helper_retry(&mut state, required_order, lowest)? - .unwrap() + Self::allocate_helper_retry(&mut state, required_order, lowest)?.unwrap() }; #[cfg(debug_assertions)] @@ -868,7 +867,6 @@ impl TransactionalMemory { } fn allocate_helper_retry( - &self, state: &mut InMemoryState, required_order: u8, lowest: bool, @@ -900,7 +898,7 @@ impl TransactionalMemory { } } - fn try_shrink(&self, state: &mut InMemoryState) -> Result { + fn try_shrink(state: &mut InMemoryState) -> Result { let layout = state.header.layout(); let last_region_index = layout.num_regions() - 1; let last_allocator = state.get_region(last_region_index); From 9165919bca6e9e8b536ac44892f52fc59dd7064e Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:46:38 -0700 Subject: [PATCH 31/38] Allow clippy::must_use_candidate This is a reasonable lint, but it's subjective and would be a larger change, so I was too lazy to do it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 34414d2a..9d54756c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,8 @@ clippy::too_many_lines, clippy::similar_names, clippy::wildcard_imports, - clippy::unreadable_literal + clippy::unreadable_literal, + clippy::must_use_candidate )] #![deny( clippy::all, From a8f6294a3ef9e913595fc66575f1179548cdeeef Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:48:17 -0700 Subject: [PATCH 32/38] Deny clippy::match_same_arms Match arms with duplicate captures and bodies can be deduplicated. Seems reasonable to me. --- src/lib.rs | 3 ++- src/tree_store/btree_iters.rs | 3 +-- src/tree_store/table_tree_base.rs | 37 ++++++++++++++----------------- 3 files changed, 20 insertions(+), 23 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 9d54756c..dbef0b80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -36,7 +36,8 @@ clippy::uninlined_format_args, clippy::semicolon_if_nothing_returned, clippy::redundant_else, - clippy::unused_self + clippy::unused_self, + clippy::match_same_arms )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/tree_store/btree_iters.rs b/src/tree_store/btree_iters.rs index 5a15bf97..788c8564 100644 --- a/src/tree_store/btree_iters.rs +++ b/src/tree_store/btree_iters.rs @@ -33,8 +33,7 @@ pub enum RangeIterState { impl RangeIterState { fn page_number(&self) -> PageNumber { match self { - Leaf { page, .. } => page.get_page_number(), - Internal { page, .. } => page.get_page_number(), + Leaf { page, .. } | Internal { page, .. } => page.get_page_number(), } } diff --git a/src/tree_store/table_tree_base.rs b/src/tree_store/table_tree_base.rs index d6d643af..b3f3dbd1 100644 --- a/src/tree_store/table_tree_base.rs +++ b/src/tree_store/table_tree_base.rs @@ -106,11 +106,8 @@ impl InternalTableDefinition { table_root, table_length, .. - } => { - *table_root = root; - *table_length = length; } - InternalTableDefinition::Multimap { + | InternalTableDefinition::Multimap { table_root, table_length, .. @@ -261,22 +258,22 @@ impl InternalTableDefinition { fn private_get_root(&self) -> Option { match self { - InternalTableDefinition::Normal { table_root, .. } => *table_root, - InternalTableDefinition::Multimap { table_root, .. } => *table_root, + InternalTableDefinition::Normal { table_root, .. } + | InternalTableDefinition::Multimap { table_root, .. } => *table_root, } } pub(crate) fn get_length(&self) -> u64 { match self { - InternalTableDefinition::Normal { table_length, .. } => *table_length, - InternalTableDefinition::Multimap { table_length, .. } => *table_length, + InternalTableDefinition::Normal { table_length, .. } + | InternalTableDefinition::Multimap { table_length, .. } => *table_length, } } fn private_get_fixed_key_size(&self) -> Option { match self { - InternalTableDefinition::Normal { fixed_key_size, .. } => *fixed_key_size, - InternalTableDefinition::Multimap { fixed_key_size, .. } => *fixed_key_size, + InternalTableDefinition::Normal { fixed_key_size, .. } + | InternalTableDefinition::Multimap { fixed_key_size, .. } => *fixed_key_size, } } @@ -284,8 +281,8 @@ impl InternalTableDefinition { match self { InternalTableDefinition::Normal { fixed_value_size, .. - } => *fixed_value_size, - InternalTableDefinition::Multimap { + } + | InternalTableDefinition::Multimap { fixed_value_size, .. } => *fixed_value_size, } @@ -293,8 +290,8 @@ impl InternalTableDefinition { fn private_get_key_alignment(&self) -> usize { match self { - InternalTableDefinition::Normal { key_alignment, .. } => *key_alignment, - InternalTableDefinition::Multimap { key_alignment, .. } => *key_alignment, + InternalTableDefinition::Normal { key_alignment, .. } + | InternalTableDefinition::Multimap { key_alignment, .. } => *key_alignment, } } @@ -302,8 +299,8 @@ impl InternalTableDefinition { match self { InternalTableDefinition::Normal { value_alignment, .. - } => *value_alignment, - InternalTableDefinition::Multimap { + } + | InternalTableDefinition::Multimap { value_alignment, .. } => *value_alignment, } @@ -318,15 +315,15 @@ impl InternalTableDefinition { fn private_key_type(&self) -> TypeName { match self { - InternalTableDefinition::Normal { key_type, .. } => key_type.clone(), - InternalTableDefinition::Multimap { key_type, .. } => key_type.clone(), + InternalTableDefinition::Normal { key_type, .. } + | InternalTableDefinition::Multimap { key_type, .. } => key_type.clone(), } } fn private_value_type(&self) -> TypeName { match self { - InternalTableDefinition::Normal { value_type, .. } => value_type.clone(), - InternalTableDefinition::Multimap { value_type, .. } => value_type.clone(), + InternalTableDefinition::Normal { value_type, .. } + | InternalTableDefinition::Multimap { value_type, .. } => value_type.clone(), } } } From be7584792f86ec316dbb253c79c072c82f1bff09 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:50:44 -0700 Subject: [PATCH 33/38] Deny clippy::trivially_copy_pass_by_ref This seems reasonable, small copy values should be passed by value. --- src/lib.rs | 3 ++- src/transaction_tracker.rs | 8 ++++---- src/tree_store/page_store/cached_file.rs | 24 ++++++++++++------------ src/tree_store/page_store/lru_cache.rs | 12 ++++++------ 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index dbef0b80..c6baf54c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,7 +37,8 @@ clippy::semicolon_if_nothing_returned, clippy::redundant_else, clippy::unused_self, - clippy::match_same_arms + clippy::match_same_arms, + clippy::trivially_copy_pass_by_ref )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/transaction_tracker.rs b/src/transaction_tracker.rs index 9eb00b47..40285e43 100644 --- a/src/transaction_tracker.rs +++ b/src/transaction_tracker.rs @@ -16,11 +16,11 @@ impl TransactionId { Self(value) } - pub(crate) fn raw_id(&self) -> u64 { + pub(crate) fn raw_id(self) -> u64 { self.0 } - pub(crate) fn next(&self) -> TransactionId { + pub(crate) fn next(self) -> TransactionId { TransactionId(self.0 + 1) } @@ -30,7 +30,7 @@ impl TransactionId { next } - pub(crate) fn parent(&self) -> Option { + pub(crate) fn parent(self) -> Option { if self.0 == 0 { None } else { @@ -43,7 +43,7 @@ impl TransactionId { pub(crate) struct SavepointId(pub u64); impl SavepointId { - pub(crate) fn next(&self) -> SavepointId { + pub(crate) fn next(self) -> SavepointId { SavepointId(self.0 + 1) } } diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index 8fc13092..61e03cc9 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -29,7 +29,7 @@ impl Drop for WritablePage { self.buffer .lock() .unwrap() - .return_value(&self.offset, self.data.clone()); + .return_value(self.offset, self.data.clone()); } } @@ -63,11 +63,11 @@ impl LRUWriteCache { assert!(self.cache.insert(key, Some(value)).is_none()); } - fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { + fn get(&self, key: u64) -> Option<&Arc<[u8]>> { self.cache.get(key).map(|x| x.as_ref().unwrap()) } - fn remove(&mut self, key: &u64) -> Option> { + fn remove(&mut self, key: u64) -> Option> { if let Some(value) = self.cache.remove(key) { assert!(value.is_some()); return value; @@ -75,11 +75,11 @@ impl LRUWriteCache { None } - fn return_value(&mut self, key: &u64, value: Arc<[u8]>) { + fn return_value(&mut self, key: u64, value: Arc<[u8]>) { assert!(self.cache.get_mut(key).unwrap().replace(value).is_none()); } - fn take_value(&mut self, key: &u64) -> Option> { + fn take_value(&mut self, key: u64) -> Option> { if let Some(value) = self.cache.get_mut(key) { let result = value.take().unwrap(); return Some(result); @@ -296,7 +296,7 @@ impl PagedCachedFile { if !matches!(hint, PageHint::Clean) { let lock = self.write_buffer.lock().unwrap(); - if let Some(cached) = lock.get(&offset) { + if let Some(cached) = lock.get(offset) { #[cfg(feature = "cache_metrics")] self.reads_hits.fetch_add(1, Ordering::Release); debug_assert_eq!(cached.len(), len); @@ -307,7 +307,7 @@ impl PagedCachedFile { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); { let read_lock = self.read_cache[cache_slot].read().unwrap(); - if let Some(cached) = read_lock.get(&offset) { + if let Some(cached) = read_lock.get(offset) { #[cfg(feature = "cache_metrics")] self.reads_hits.fetch_add(1, Ordering::Release); debug_assert_eq!(cached.len(), len); @@ -345,7 +345,7 @@ impl PagedCachedFile { // Discard pending writes to the given range pub(super) fn cancel_pending_write(&self, offset: u64, _len: usize) { assert_eq!(0, offset % self.page_size); - if let Some(removed) = self.write_buffer.lock().unwrap().remove(&offset) { + if let Some(removed) = self.write_buffer.lock().unwrap().remove(offset) { self.write_buffer_bytes .fetch_sub(removed.len(), Ordering::Release); } @@ -357,7 +357,7 @@ impl PagedCachedFile { pub(super) fn invalidate_cache(&self, offset: u64, len: usize) { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(removed) = lock.remove(&offset) { + if let Some(removed) = lock.remove(offset) { assert_eq!(len, removed.len()); self.read_cache_bytes .fetch_sub(removed.len(), Ordering::AcqRel); @@ -384,7 +384,7 @@ impl PagedCachedFile { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); let existing = { let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(removed) = lock.remove(&offset) { + if let Some(removed) = lock.remove(offset) { assert_eq!( len, removed.len(), @@ -399,7 +399,7 @@ impl PagedCachedFile { } }; - let data = if let Some(removed) = lock.take_value(&offset) { + let data = if let Some(removed) = lock.take_value(offset) { removed } else { let previous = self.write_buffer_bytes.fetch_add(len, Ordering::AcqRel); @@ -429,7 +429,7 @@ impl PagedCachedFile { self.read_direct(offset, len)?.into() }; lock.insert(offset, result); - lock.take_value(&offset).unwrap() + lock.take_value(offset).unwrap() }; Ok(WritablePage { buffer: self.write_buffer.clone(), diff --git a/src/tree_store/page_store/lru_cache.rs b/src/tree_store/page_store/lru_cache.rs index 5df77e12..d26d9006 100644 --- a/src/tree_store/page_store/lru_cache.rs +++ b/src/tree_store/page_store/lru_cache.rs @@ -31,8 +31,8 @@ impl LRUCache { result } - pub(crate) fn remove(&mut self, key: &u64) -> Option { - if let Some((value, _)) = self.cache.remove(key) { + pub(crate) fn remove(&mut self, key: u64) -> Option { + if let Some((value, _)) = self.cache.remove(&key) { if self.lru_queue.len() > 2 * self.cache.len() { // Cycle two elements of the LRU queue to ensure it doesn't grow without bound for _ in 0..2 { @@ -50,8 +50,8 @@ impl LRUCache { } } - pub(crate) fn get(&self, key: &u64) -> Option<&T> { - if let Some((value, second_chance)) = self.cache.get(key) { + pub(crate) fn get(&self, key: u64) -> Option<&T> { + if let Some((value, second_chance)) = self.cache.get(&key) { second_chance.store(true, Ordering::Release); Some(value) } else { @@ -59,8 +59,8 @@ impl LRUCache { } } - pub(crate) fn get_mut(&mut self, key: &u64) -> Option<&mut T> { - if let Some((value, second_chance)) = self.cache.get_mut(key) { + pub(crate) fn get_mut(&mut self, key: u64) -> Option<&mut T> { + if let Some((value, second_chance)) = self.cache.get_mut(&key) { second_chance.store(true, Ordering::Release); Some(value) } else { From b3299f57c81004d006014fc70ba4f9e77adfd3e9 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:52:49 -0700 Subject: [PATCH 34/38] Allow clippy::redundant_closure_for_method_calls I think this is actually a good one, but fixing it was annoying, so allow it. --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index c6baf54c..30dfeb20 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -15,7 +15,8 @@ clippy::similar_names, clippy::wildcard_imports, clippy::unreadable_literal, - clippy::must_use_candidate + clippy::must_use_candidate, + clippy::redundant_closure_for_method_calls )] #![deny( clippy::all, From 185c2626dbabb533b42334cb346747fea6c40724 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 15:55:37 -0700 Subject: [PATCH 35/38] Deny clippy::transmute_ptr_to_ptr Clippy doesn't like mem::transmute for transmuting pointers to pointers. I think the logic is that `mem::transmute` will transmute absolutely any type into any other type, as long as they're the same size. A pointer cast is much more limited, so it seems reasonable to use it instead. --- src/lib.rs | 3 ++- src/multimap_table.rs | 4 ++-- src/tree_store/table_tree.rs | 3 +-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 30dfeb20..2a6804cb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,7 +39,8 @@ clippy::redundant_else, clippy::unused_self, clippy::match_same_arms, - clippy::trivially_copy_pass_by_ref + clippy::trivially_copy_pass_by_ref, + clippy::transmute_ptr_to_ptr )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/multimap_table.rs b/src/multimap_table.rs index d70b58e1..69886761 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -574,7 +574,7 @@ impl Value for &DynamicCollection { impl DynamicCollection { fn new(data: &[u8]) -> &Self { - unsafe { mem::transmute(data) } + unsafe { &*(data as *const [u8] as *const DynamicCollection) } } fn collection_type(&self) -> DynamicCollectionType { @@ -700,7 +700,7 @@ impl UntypedDynamicCollection { } fn new(data: &[u8]) -> &Self { - unsafe { mem::transmute(data) } + unsafe { &*(data as *const [u8] as *const UntypedDynamicCollection) } } fn make_subtree_data(header: BtreeHeader) -> Vec { diff --git a/src/tree_store/table_tree.rs b/src/tree_store/table_tree.rs index 45ed34a2..ad3990ab 100644 --- a/src/tree_store/table_tree.rs +++ b/src/tree_store/table_tree.rs @@ -14,7 +14,6 @@ use crate::types::{Key, MutInPlaceValue, TypeName, Value}; use crate::{DatabaseStats, Result}; use std::cmp::max; use std::collections::{BTreeMap, HashMap}; -use std::mem; use std::mem::size_of; use std::ops::RangeFull; use std::sync::{Arc, Mutex}; @@ -166,7 +165,7 @@ impl MutInPlaceValue for FreedPageList<'_> { } fn from_bytes_mut(data: &mut [u8]) -> &mut Self::BaseRefType { - unsafe { mem::transmute(data) } + unsafe { &mut *(data as *mut [u8] as *mut FreedPageListMut) } } } From 5c13a9397931597488d868be9388e27df793a251 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Fri, 1 Nov 2024 16:01:42 -0700 Subject: [PATCH 36/38] Sort and clean up allow and deny blocks --- src/lib.rs | 36 ++++++------------------------------ 1 file changed, 6 insertions(+), 30 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2a6804cb..a5f5dcab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,5 @@ +#![deny(clippy::all, clippy::pedantic, clippy::disallowed_methods)] #![allow( - clippy::drop_non_drop, clippy::default_trait_access, clippy::if_not_else, clippy::inline_always, @@ -8,39 +8,15 @@ clippy::missing_errors_doc, clippy::missing_panics_doc, clippy::module_name_repetitions, + clippy::must_use_candidate, clippy::needless_pass_by_value, clippy::option_option, - clippy::unnecessary_wraps, - clippy::too_many_lines, + clippy::redundant_closure_for_method_calls, clippy::similar_names, - clippy::wildcard_imports, + clippy::too_many_lines, + clippy::unnecessary_wraps, clippy::unreadable_literal, - clippy::must_use_candidate, - clippy::redundant_closure_for_method_calls -)] -#![deny( - clippy::all, - clippy::doc_markdown, - clippy::explicit_iter_loop, - clippy::cast_lossless, - clippy::checked_conversions, - clippy::cloned_instead_of_copied, - clippy::cast_possible_truncation, - clippy::cast_possible_wrap, - clippy::cast_precision_loss, - clippy::cast_sign_loss, - clippy::disallowed_methods, - clippy::map_unwrap_or, - clippy::match_wildcard_for_single_variants, - clippy::range_plus_one, - clippy::type_repetition_in_bounds, - clippy::uninlined_format_args, - clippy::semicolon_if_nothing_returned, - clippy::redundant_else, - clippy::unused_self, - clippy::match_same_arms, - clippy::trivially_copy_pass_by_ref, - clippy::transmute_ptr_to_ptr + clippy::wildcard_imports )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] From d2f12e8ac2dc8d9695025dd9a102f6c9c02f05e3 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Sun, 3 Nov 2024 10:24:55 -0800 Subject: [PATCH 37/38] Use pointer::cast --- src/tree_store/page_store/xxh3.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/tree_store/page_store/xxh3.rs b/src/tree_store/page_store/xxh3.rs index a98b54cf..173e3e94 100644 --- a/src/tree_store/page_store/xxh3.rs +++ b/src/tree_store/page_store/xxh3.rs @@ -182,7 +182,7 @@ unsafe fn scramble_accumulators_avx2( let shifted = _mm256_srli_epi64::<47>(a); let b = _mm256_xor_si256(a, shifted); - let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); + let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); let c = _mm256_xor_si256(b, s); let c_high = _mm256_shuffle_epi32::<49>(c); @@ -190,7 +190,7 @@ unsafe fn scramble_accumulators_avx2( let high = _mm256_mul_epu32(c_high, simd_prime); let high = _mm256_slli_epi64::<32>(high); let result = _mm256_add_epi64(low, high); - _mm256_storeu_si256((accumulators_ptr as *mut __m256i).add(i), result); + _mm256_storeu_si256((accumulators_ptr.cast::<__m256i>()).add(i), result); } } @@ -308,9 +308,9 @@ unsafe fn gen_secret_avx2(seed: u64) -> [u8; DEFAULT_SECRET.len()] { let output_ptr = output.as_mut_ptr(); let secret_ptr = DEFAULT_SECRET.as_ptr(); for i in 0..6 { - let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); + let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); let x = _mm256_add_epi64(s, simd_seed); - _mm256_storeu_si256((output_ptr as *mut __m256i).add(i), x); + _mm256_storeu_si256((output_ptr.cast::<__m256i>()).add(i), x); } output @@ -361,8 +361,8 @@ unsafe fn accumulate_stripe_avx2(accumulators: &mut [u64; 8], data: &[u8], secre assert!(data.len() >= STRIPE_LENGTH); assert!(secret.len() >= STRIPE_LENGTH); for i in 0..(STRIPE_LENGTH / 32) { - let x = _mm256_loadu_si256((data_ptr as *const __m256i).add(i)); - let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); + let x = _mm256_loadu_si256((data_ptr.cast::<__m256i>()).add(i)); + let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); let z = _mm256_xor_si256(x, s); let z_low = _mm256_shuffle_epi32::<49>(z); @@ -373,7 +373,7 @@ unsafe fn accumulate_stripe_avx2(accumulators: &mut [u64; 8], data: &[u8], secre let result = _mm256_loadu_si256((accumulator_ptr as *const __m256i).add(i)); let result = _mm256_add_epi64(result, shuffled); let result = _mm256_add_epi64(result, product); - _mm256_storeu_si256((accumulator_ptr as *mut __m256i).add(i), result); + _mm256_storeu_si256((accumulator_ptr.cast::<__m256i>()).add(i), result); } } @@ -384,7 +384,7 @@ fn accumulate_stripe_generic(accumulators: &mut [u64; 8], data: &[u8], secret: & let y = x ^ get_u64(&secret[i * 8..], 0); accumulators[i ^ 1] = accumulators[i ^ 1].wrapping_add(x); let z = (y & 0xFFFF_FFFF) * (y >> 32); - accumulators[i] = accumulators[i].wrapping_add(z) + accumulators[i] = accumulators[i].wrapping_add(z); } } From e78e8f2176ac5b47ea7eb1243576d6a18b1cb6d9 Mon Sep 17 00:00:00 2001 From: Casey Rodarmor Date: Sun, 3 Nov 2024 10:40:54 -0800 Subject: [PATCH 38/38] Revert changes to xxh3 and allow lints --- src/tree_store/page_store/mod.rs | 2 +- src/tree_store/page_store/xxh3.rs | 74 +++++++++++++++---------------- 2 files changed, 36 insertions(+), 40 deletions(-) diff --git a/src/tree_store/page_store/mod.rs b/src/tree_store/page_store/mod.rs index 4d42547c..143c2da7 100644 --- a/src/tree_store/page_store/mod.rs +++ b/src/tree_store/page_store/mod.rs @@ -10,7 +10,7 @@ mod lru_cache; mod page_manager; mod region; mod savepoint; -#[allow(dead_code)] +#[allow(clippy::pedantic, dead_code)] mod xxh3; pub(crate) use base::{Page, PageHint, PageNumber, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH}; diff --git a/src/tree_store/page_store/xxh3.rs b/src/tree_store/page_store/xxh3.rs index 173e3e94..c812babe 100644 --- a/src/tree_store/page_store/xxh3.rs +++ b/src/tree_store/page_store/xxh3.rs @@ -182,7 +182,7 @@ unsafe fn scramble_accumulators_avx2( let shifted = _mm256_srli_epi64::<47>(a); let b = _mm256_xor_si256(a, shifted); - let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); + let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); let c = _mm256_xor_si256(b, s); let c_high = _mm256_shuffle_epi32::<49>(c); @@ -190,7 +190,7 @@ unsafe fn scramble_accumulators_avx2( let high = _mm256_mul_epu32(c_high, simd_prime); let high = _mm256_slli_epi64::<32>(high); let result = _mm256_add_epi64(low, high); - _mm256_storeu_si256((accumulators_ptr.cast::<__m256i>()).add(i), result); + _mm256_storeu_si256((accumulators_ptr as *mut __m256i).add(i), result); } } @@ -308,9 +308,9 @@ unsafe fn gen_secret_avx2(seed: u64) -> [u8; DEFAULT_SECRET.len()] { let output_ptr = output.as_mut_ptr(); let secret_ptr = DEFAULT_SECRET.as_ptr(); for i in 0..6 { - let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); + let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); let x = _mm256_add_epi64(s, simd_seed); - _mm256_storeu_si256((output_ptr.cast::<__m256i>()).add(i), x); + _mm256_storeu_si256((output_ptr as *mut __m256i).add(i), x); } output @@ -361,8 +361,8 @@ unsafe fn accumulate_stripe_avx2(accumulators: &mut [u64; 8], data: &[u8], secre assert!(data.len() >= STRIPE_LENGTH); assert!(secret.len() >= STRIPE_LENGTH); for i in 0..(STRIPE_LENGTH / 32) { - let x = _mm256_loadu_si256((data_ptr.cast::<__m256i>()).add(i)); - let s = _mm256_loadu_si256((secret_ptr.cast::<__m256i>()).add(i)); + let x = _mm256_loadu_si256((data_ptr as *const __m256i).add(i)); + let s = _mm256_loadu_si256((secret_ptr as *const __m256i).add(i)); let z = _mm256_xor_si256(x, s); let z_low = _mm256_shuffle_epi32::<49>(z); @@ -373,7 +373,7 @@ unsafe fn accumulate_stripe_avx2(accumulators: &mut [u64; 8], data: &[u8], secre let result = _mm256_loadu_si256((accumulator_ptr as *const __m256i).add(i)); let result = _mm256_add_epi64(result, shuffled); let result = _mm256_add_epi64(result, product); - _mm256_storeu_si256((accumulator_ptr.cast::<__m256i>()).add(i), result); + _mm256_storeu_si256((accumulator_ptr as *mut __m256i).add(i), result); } } @@ -384,7 +384,7 @@ fn accumulate_stripe_generic(accumulators: &mut [u64; 8], data: &[u8], secret: & let y = x ^ get_u64(&secret[i * 8..], 0); accumulators[i ^ 1] = accumulators[i ^ 1].wrapping_add(x); let z = (y & 0xFFFF_FFFF) * (y >> 32); - accumulators[i] = accumulators[i].wrapping_add(z); + accumulators[i] = accumulators[i].wrapping_add(z) } } @@ -462,14 +462,14 @@ fn hash64_0(secret: &[u8], seed: u64) -> u64 { } fn hash64_1to3(data: &[u8], secret: &[u8], seed: u64) -> u64 { - let x1 = u32::from(data[0]); - let x2 = u32::from(data[data.len() >> 1]); - let x3 = u32::from(*data.last().unwrap()); + let x1 = data[0] as u32; + let x2 = data[data.len() >> 1] as u32; + let x3 = (*data.last().unwrap()) as u32; #[allow(clippy::cast_possible_truncation)] let x4 = data.len() as u32; - let combined = u64::from((x1 << 16) | (x2 << 24) | x3 | (x4 << 8)); - let mut result = u64::from(get_u32(secret, 0) ^ get_u32(secret, 1)); + let combined = ((x1 << 16) | (x2 << 24) | x3 | (x4 << 8)) as u64; + let mut result = (get_u32(secret, 0) ^ get_u32(secret, 1)) as u64; result = result.wrapping_add(seed); result ^= combined; xxh64_avalanche(result) @@ -479,8 +479,8 @@ fn hash64_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u64 { #[allow(clippy::cast_possible_truncation)] let truncate_seed = seed as u32; seed ^= u64::from(truncate_seed.swap_bytes()) << 32; - let x1 = u64::from(get_u32(data, 0)); - let x2 = u64::from(get_u32(&data[data.len() - 4..], 0)); + let x1 = get_u32(data, 0) as u64; + let x2 = get_u32(&data[data.len() - 4..], 0) as u64; let x = x2 | (x1 << 32); let s = (get_u64(secret, 1) ^ get_u64(secret, 2)).wrapping_sub(seed); rrmxmx(x ^ s, data.len() as u64) @@ -595,24 +595,24 @@ fn hash64_large_generic( } fn hash128_0(secret: &[u8], seed: u64) -> u128 { - let high = u128::from(hash64_0(&secret[3 * 8..], seed)) << 64; - let low = u128::from(hash64_0(&secret[8..], seed)); + let high = (hash64_0(&secret[3 * 8..], seed) as u128) << 64; + let low = hash64_0(&secret[8..], seed) as u128; high | low } fn hash128_1to3(data: &[u8], secret: &[u8], seed: u64) -> u128 { - let x1 = u32::from(data[0]); - let x2 = u32::from(data[data.len() >> 1]); - let x3 = u32::from(*data.last().unwrap()); + let x1 = data[0] as u32; + let x2 = data[data.len() >> 1] as u32; + let x3 = (*data.last().unwrap()) as u32; #[allow(clippy::cast_possible_truncation)] let x4 = data.len() as u32; let combined_low = (x1 << 16) | (x2 << 24) | x3 | (x4 << 8); let combined_high: u64 = combined_low.swap_bytes().rotate_left(13).into(); - let s_low = u64::from(get_u32(secret, 0) ^ get_u32(secret, 1)).wrapping_add(seed); - let s_high = u64::from(get_u32(secret, 2) ^ get_u32(secret, 3)).wrapping_sub(seed); - let high = (u128::from(xxh64_avalanche(combined_high ^ s_high))) << 64; - let low = u128::from(xxh64_avalanche(u64::from(combined_low) ^ s_low)); + let s_low = ((get_u32(secret, 0) ^ get_u32(secret, 1)) as u64).wrapping_add(seed); + let s_high = ((get_u32(secret, 2) ^ get_u32(secret, 3)) as u64).wrapping_sub(seed); + let high = (xxh64_avalanche(combined_high ^ s_high) as u128) << 64; + let low = xxh64_avalanche(combined_low as u64 ^ s_low) as u128; high | low } @@ -620,17 +620,13 @@ fn hash128_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u128 { #[allow(clippy::cast_possible_truncation)] let truncate_seed = seed as u32; seed ^= u64::from(truncate_seed.swap_bytes()) << 32; - let x_low = u64::from(get_u32(data, 0)); - let x_high = u64::from(u32::from_le_bytes( - data[data.len() - 4..].try_into().unwrap(), - )); + let x_low = get_u32(data, 0) as u64; + let x_high = u32::from_le_bytes(data[data.len() - 4..].try_into().unwrap()) as u64; let x = x_low | (x_high << 32); let s = (get_u64(secret, 2) ^ get_u64(secret, 3)).wrapping_add(seed); - let mut y = u128::from(x ^ s); - y = y.wrapping_mul(u128::from( - PRIME64[0].wrapping_add((data.len() << 2) as u64), - )); + let mut y = (x ^ s) as u128; + y = y.wrapping_mul(PRIME64[0].wrapping_add((data.len() << 2) as u64) as u128); #[allow(clippy::cast_possible_truncation)] let mut r_low = y as u64; @@ -642,7 +638,7 @@ fn hash128_4to8(data: &[u8], secret: &[u8], mut seed: u64) -> u128 { r_low = xorshift(r_low, 28); r_high = xxh3_avalanche(r_high); - u128::from(r_high) << 64 | u128::from(r_low) + (r_high as u128) << 64 | r_low as u128 } fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -653,7 +649,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { let mixed = x_low ^ x_high ^ s_low; let x_high = x_high ^ s_high; - let result = u128::from(mixed).wrapping_mul(u128::from(PRIME64[0])); + let result = (mixed as u128).wrapping_mul(PRIME64[0] as u128); #[allow(clippy::cast_possible_truncation)] let mut r_low = result as u64; let mut r_high = (result >> 64) as u64; @@ -662,7 +658,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_high = r_high.wrapping_add((x_high & 0xFFFF_FFFF).wrapping_mul(PRIME32[1] - 1)); r_low ^= r_high.swap_bytes(); - let result2 = u128::from(r_low).wrapping_mul(u128::from(PRIME64[1])); + let result2 = (r_low as u128).wrapping_mul(PRIME64[1] as u128); #[allow(clippy::cast_possible_truncation)] let mut r2_low = result2 as u64; let mut r2_high = (result2 >> 64) as u64; @@ -670,7 +666,7 @@ fn hash128_9to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { r2_low = xxh3_avalanche(r2_low); r2_high = xxh3_avalanche(r2_high); - u128::from(r2_high) << 64 | u128::from(r2_low) + (r2_high as u128) << 64 | r2_low as u128 } fn hash128_0to16(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -706,7 +702,7 @@ fn hash128_17to128(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_low = xxh3_avalanche(r_low); r_high = 0u64.wrapping_sub(xxh3_avalanche(r_high)); - u128::from(r_high) << 64 | u128::from(r_low) + (r_high as u128) << 64 | r_low as u128 } fn hash128_129to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -750,7 +746,7 @@ fn hash128_129to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { r_low = xxh3_avalanche(r_low); r_high = 0u64.wrapping_sub(xxh3_avalanche(r_high)); - u128::from(r_high) << 64 | u128::from(r_low) + (r_high as u128) << 64 | r_low as u128 } fn hash128_0to240(data: &[u8], secret: &[u8], seed: u64) -> u128 { @@ -808,7 +804,7 @@ fn hash128_large_generic( !(PRIME64[1].wrapping_mul(data.len() as u64)), ); - u128::from(high) << 64 | u128::from(low) + (high as u128) << 64 | low as u128 } #[cfg(test)]