diff --git a/.gitignore b/.gitignore index ddb7228e..94e46036 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,6 @@ Cargo.lock # Profiling perf.data* flamegraph.svg + +# benchmark and test temporary files +/.tmp* diff --git a/src/complex_types.rs b/src/complex_types.rs index 47521b3d..258937f1 100644 --- a/src/complex_types.rs +++ b/src/complex_types.rs @@ -7,12 +7,11 @@ fn encode_varint_len(len: usize, output: &mut Vec) { } else if len <= u16::MAX.into() { let u16_len: u16 = len.try_into().unwrap(); output.push(254); - output.extend_from_slice(&u16_len.to_le_bytes()) + output.extend_from_slice(&u16_len.to_le_bytes()); } else { - assert!(len <= u32::MAX as usize); let u32_len: u32 = len.try_into().unwrap(); output.push(255); - output.extend_from_slice(&u32_len.to_le_bytes()) + output.extend_from_slice(&u32_len.to_le_bytes()); } } @@ -67,7 +66,6 @@ impl Value for Vec { fn as_bytes<'a, 'b: 'a>(value: &'a Vec>) -> Vec where - Self: 'a, Self: 'b, { let mut result = if let Some(width) = T::fixed_width() { diff --git a/src/db.rs b/src/db.rs index face0bab..1ff1ffde 100644 --- a/src/db.rs +++ b/src/db.rs @@ -453,9 +453,9 @@ impl Database { if !progress { break; - } else { - compacted = true; } + + compacted = true; } Ok(compacted) @@ -804,7 +804,7 @@ impl RepairSession { self.aborted } - /// Abort the repair process. The coorresponding call to [Builder::open] or [Builder::create] will return an error + /// Abort the repair process. The coorresponding call to [`Builder::open`] or [`Builder::create`] will return an error pub fn abort(&mut self) { self.aborted = true; } @@ -852,7 +852,7 @@ impl Builder { /// Set a callback which will be invoked periodically in the event that the database file needs /// to be repaired. /// - /// The [RepairSession] argument can be used to control the repair process. + /// The [`RepairSession`] argument can be used to control the repair process. /// /// If the database file needs repair, the callback will be invoked at least once. /// There is no upper limit on the number of times it may be called. diff --git a/src/error.rs b/src/error.rs index b9e6ce43..b533ae1f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -109,7 +109,7 @@ impl TableError { | TableError::TypeDefinitionChanged { .. } | TableError::TableDoesNotExist(_) | TableError::TableAlreadyOpen(_, _) => { - StorageError::Corrupted(format!("{}: {}", msg, &self)) + StorageError::Corrupted(format!("{msg}: {self}")) } TableError::Storage(storage) => storage, } diff --git a/src/lib.rs b/src/lib.rs index ebe148fe..a5f5dcab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,10 +1,22 @@ -#![allow(clippy::drop_non_drop)] -#![deny( - clippy::cast_possible_truncation, - clippy::cast_possible_wrap, - clippy::cast_precision_loss, - clippy::cast_sign_loss, - clippy::disallowed_methods +#![deny(clippy::all, clippy::pedantic, clippy::disallowed_methods)] +#![allow( + clippy::default_trait_access, + clippy::if_not_else, + clippy::inline_always, + clippy::iter_not_returning_iterator, + clippy::let_underscore_drop, + clippy::missing_errors_doc, + clippy::missing_panics_doc, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::needless_pass_by_value, + clippy::option_option, + clippy::redundant_closure_for_method_calls, + clippy::similar_names, + clippy::too_many_lines, + clippy::unnecessary_wraps, + clippy::unreadable_literal, + clippy::wildcard_imports )] // TODO remove this once wasi no longer requires nightly #![cfg_attr(target_os = "wasi", feature(wasi_ext))] diff --git a/src/multimap_table.rs b/src/multimap_table.rs index 83bdd6cc..69886761 100644 --- a/src/multimap_table.rs +++ b/src/multimap_table.rs @@ -313,7 +313,6 @@ pub(crate) fn finalize_tree_and_subtree_checksums( } } } - drop(accessor); // TODO: maybe there's a better abstraction, so that we don't need to call into this low-level method? let mut mutator = LeafMutator::new( &mut leaf_page, @@ -525,7 +524,7 @@ impl Into for DynamicCollectionType { /// (when type = 2) root (8 bytes): sub tree root page number /// (when type = 2) checksum (16 bytes): sub tree checksum /// -/// NOTE: Even though the [PhantomData] is zero-sized, the inner data DST must be placed last. +/// NOTE: Even though the [`PhantomData`] is zero-sized, the inner data DST must be placed last. /// See [Exotically Sized Types](https://doc.rust-lang.org/nomicon/exotic-sizes.html#dynamically-sized-types-dsts) /// section of the Rustonomicon for more details. #[repr(transparent)] @@ -563,7 +562,6 @@ impl Value for &DynamicCollection { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { &value.data @@ -576,7 +574,7 @@ impl Value for &DynamicCollection { impl DynamicCollection { fn new(data: &[u8]) -> &Self { - unsafe { mem::transmute(data) } + unsafe { &*(data as *const [u8] as *const DynamicCollection) } } fn collection_type(&self) -> DynamicCollectionType { @@ -591,7 +589,7 @@ impl DynamicCollection { fn as_subtree(&self) -> BtreeHeader { assert!(matches!(self.collection_type(), SubtreeV2)); BtreeHeader::from_le_bytes( - self.data[1..(1 + BtreeHeader::serialized_size())] + self.data[1..=BtreeHeader::serialized_size()] .try_into() .unwrap(), ) @@ -702,7 +700,7 @@ impl UntypedDynamicCollection { } fn new(data: &[u8]) -> &Self { - unsafe { mem::transmute(data) } + unsafe { &*(data as *const [u8] as *const UntypedDynamicCollection) } } fn make_subtree_data(header: BtreeHeader) -> Vec { @@ -727,7 +725,7 @@ impl UntypedDynamicCollection { fn as_subtree(&self) -> BtreeHeader { assert!(matches!(self.collection_type(), SubtreeV2)); BtreeHeader::from_le_bytes( - self.data[1..(1 + BtreeHeader::serialized_size())] + self.data[1..=BtreeHeader::serialized_size()] .try_into() .unwrap(), ) @@ -851,7 +849,7 @@ impl<'a, V: Key + 'static> Drop for MultimapValue<'a, V> { drop(mem::take(&mut self.inner)); if !self.free_on_drop.is_empty() { let mut freed_pages = self.freed_pages.as_ref().unwrap().lock().unwrap(); - for page in self.free_on_drop.iter() { + for page in &self.free_on_drop { if !self.mem.as_ref().unwrap().free_if_uncommitted(*page) { freed_pages.push(*page); } diff --git a/src/table.rs b/src/table.rs index 346e7b0b..428df95b 100644 --- a/src/table.rs +++ b/src/table.rs @@ -223,7 +223,7 @@ impl<'txn, K: Key + 'static, V: MutInPlaceValue + 'static> Table<'txn, K, V> { /// /// If key is already present it is replaced /// - /// The returned reference will have length equal to value_length + /// The returned reference will have length equal to `value_length` pub fn insert_reserve<'a>( &mut self, key: impl Borrow>, @@ -304,7 +304,7 @@ fn debug_helper( first: Result, AccessGuard)>>, last: Result, AccessGuard)>>, ) -> std::fmt::Result { - write!(f, "Table [ name: \"{}\", ", name)?; + write!(f, "Table [ name: \"{name}\", ")?; if let Ok(len) = len { if len == 0 { write!(f, "No entries")?; diff --git a/src/transaction_tracker.rs b/src/transaction_tracker.rs index 3b4d3b10..40285e43 100644 --- a/src/transaction_tracker.rs +++ b/src/transaction_tracker.rs @@ -16,11 +16,11 @@ impl TransactionId { Self(value) } - pub(crate) fn raw_id(&self) -> u64 { + pub(crate) fn raw_id(self) -> u64 { self.0 } - pub(crate) fn next(&self) -> TransactionId { + pub(crate) fn next(self) -> TransactionId { TransactionId(self.0 + 1) } @@ -30,7 +30,7 @@ impl TransactionId { next } - pub(crate) fn parent(&self) -> Option { + pub(crate) fn parent(self) -> Option { if self.0 == 0 { None } else { @@ -43,7 +43,7 @@ impl TransactionId { pub(crate) struct SavepointId(pub u64); impl SavepointId { - pub(crate) fn next(&self) -> SavepointId { + pub(crate) fn next(self) -> SavepointId { SavepointId(self.0 + 1) } } @@ -65,7 +65,6 @@ impl Value for SavepointId { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b, { value.0.to_le_bytes() @@ -243,6 +242,6 @@ impl TransactionTracker { .live_read_transactions .keys() .next() - .cloned() + .copied() } } diff --git a/src/transactions.rs b/src/transactions.rs index 6f59dada..c990d852 100644 --- a/src/transactions.rs +++ b/src/transactions.rs @@ -533,7 +533,7 @@ impl WriteTransaction { println!("Tables"); for p in table_pages { all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } let system_table_allocators = self @@ -550,7 +550,7 @@ impl WriteTransaction { println!("System tables"); for p in system_table_pages { all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } println!("Free table"); @@ -558,7 +558,7 @@ impl WriteTransaction { for p in freed_iter { let p = p.unwrap(); all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } } println!("Pending free (i.e. in freed table)"); @@ -574,7 +574,7 @@ impl WriteTransaction { for i in 0..value.len() { let p = value.get(i); all_allocated.remove(&p); - println!("{p:?}") + println!("{p:?}"); } } if !all_allocated.is_empty() { @@ -1065,8 +1065,7 @@ impl WriteTransaction { let free_until_transaction = self .transaction_tracker .oldest_live_read_transaction() - .map(|x| x.next()) - .unwrap_or(self.transaction_id); + .map_or(self.transaction_id, |x| x.next()); let user_root = self .tables diff --git a/src/tree_store/btree.rs b/src/tree_store/btree.rs index 7182afae..991bfad6 100644 --- a/src/tree_store/btree.rs +++ b/src/tree_store/btree.rs @@ -192,13 +192,11 @@ impl UntypedBtreeMut { } } - drop(accessor); let mut mutator = BranchMutator::new(&mut page); for (child_index, child_page, child_checksum) in new_children.into_iter().flatten() { mutator.write_child_page(child_index, child_page, child_checksum); } - drop(mutator); branch_checksum(&page, self.key_width) } @@ -553,7 +551,7 @@ impl BtreeMut<'_, K, V> { impl<'a, K: Key + 'a, V: MutInPlaceValue + 'a> BtreeMut<'a, K, V> { /// Reserve space to insert a key-value pair - /// The returned reference will have length equal to value_length + /// The returned reference will have length equal to `value_length` // Return type has the same lifetime as &self, because the tree must not be modified until the mutable guard is dropped pub(crate) fn insert_reserve( &mut self, @@ -613,7 +611,7 @@ impl RawBtree { } pub(crate) fn len(&self) -> Result { - Ok(self.root.map(|x| x.length).unwrap_or(0)) + Ok(self.root.map_or(0, |x| x.length)) } pub(crate) fn verify_checksum(&self) -> Result { @@ -811,7 +809,7 @@ impl Btree { } pub(crate) fn len(&self) -> Result { - Ok(self.root.map(|x| x.length).unwrap_or(0)) + Ok(self.root.map_or(0, |x| x.length)) } pub(crate) fn stats(&self) -> Result { diff --git a/src/tree_store/btree_base.rs b/src/tree_store/btree_base.rs index 0bd73d94..9d60a08b 100644 --- a/src/tree_store/btree_base.rs +++ b/src/tree_store/btree_base.rs @@ -300,9 +300,9 @@ impl<'a> LeafAccessor<'a> { pub(super) fn print_node(&self, include_value: bool) { let mut i = 0; while let Some(entry) = self.entry(i) { - eprint!(" key_{}={:?}", i, K::from_bytes(entry.key())); + eprint!(" key_{i}={:?}", K::from_bytes(entry.key())); if include_value { - eprint!(" value_{}={:?}", i, V::from_bytes(entry.value())); + eprint!(" value_{i}={:?}", V::from_bytes(entry.value())); } i += 1; } @@ -510,7 +510,7 @@ impl<'a, 'b> LeafBuilder<'a, 'b> { pub(super) fn push(&mut self, key: &'a [u8], value: &'a [u8]) { self.total_key_bytes += key.len(); self.total_value_bytes += value.len(); - self.pairs.push((key, value)) + self.pairs.push((key, value)); } pub(super) fn push_all_except( @@ -871,7 +871,6 @@ impl<'b> LeafMutator<'b> { .value_range(i) .map(|(start, end)| end - start) .unwrap_or_default(); - drop(accessor); let value_delta = if overwrite { isize::try_from(value.len()).unwrap() - isize::try_from(existing_value_len).unwrap() @@ -989,7 +988,6 @@ impl<'b> LeafMutator<'b> { let value_start = accessor.value_start(i).unwrap(); let value_end = accessor.value_end(i).unwrap(); let last_value_end = accessor.value_end(accessor.num_pairs() - 1).unwrap(); - drop(accessor); // Update all the pointers let key_ptr_size = if self.fixed_key_size.is_none() { @@ -1086,7 +1084,6 @@ impl<'b> LeafMutator<'b> { self.fixed_value_size, ); let num_pairs = accessor.num_pairs(); - drop(accessor); let mut offset = 4 + size_of::() * i; if self.fixed_key_size.is_none() { offset += size_of::() * num_pairs; @@ -1132,8 +1129,8 @@ impl<'a: 'b, 'b, T: Page + 'a> BranchAccessor<'a, 'b, T> { for i in 0..(self.count_children() - 1) { if let Some(child) = self.child_page(i + 1) { let key = self.key(i).unwrap(); - eprint!(" key_{}={:?}", i, K::from_bytes(key)); - eprint!(" child_{}={:?}", i + 1, child); + eprint!(" key_{i}={:?}", K::from_bytes(key)); + eprint!(" child_{}={child:?}", i + 1); } } eprint!("]"); diff --git a/src/tree_store/btree_iters.rs b/src/tree_store/btree_iters.rs index 3675de98..788c8564 100644 --- a/src/tree_store/btree_iters.rs +++ b/src/tree_store/btree_iters.rs @@ -33,8 +33,7 @@ pub enum RangeIterState { impl RangeIterState { fn page_number(&self) -> PageNumber { match self { - Leaf { page, .. } => page.get_page_number(), - Internal { page, .. } => page.get_page_number(), + Leaf { page, .. } | Internal { page, .. } => page.get_page_number(), } } @@ -138,7 +137,7 @@ impl RangeIterState { .entry_ranges(*entry)?; Some(EntryGuard::new(page.clone(), key, value)) } - _ => None, + Internal { .. } => None, } } } diff --git a/src/tree_store/btree_mutator.rs b/src/tree_store/btree_mutator.rs index 4cf14ace..7c50335c 100644 --- a/src/tree_store/btree_mutator.rs +++ b/src/tree_store/btree_mutator.rs @@ -226,7 +226,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let new_page_accessor = LeafAccessor::new(new_page.memory(), K::fixed_width(), V::fixed_width()); let offset = new_page_accessor.offset_of_first_value(); - drop(new_page_accessor); let guard = AccessGuardMut::new(new_page, offset, value.len()); return if position == 0 { Ok(InsertionResult { @@ -280,7 +279,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { let new_page_accessor = LeafAccessor::new(page_mut.memory(), K::fixed_width(), V::fixed_width()); let offset = new_page_accessor.offset_of_value(position).unwrap(); - drop(new_page_accessor); let guard = AccessGuardMut::new(page_mut, offset, value.len()); return Ok(InsertionResult { new_root: page_number, @@ -567,7 +565,6 @@ impl<'a, 'b, K: Key, V: Value> MutateHelper<'a, 'b, K, V> { Subtree(new_page.get_page_number(), DEFERRED) }; let (start, end) = accessor.value_range(position).unwrap(); - drop(accessor); let guard = if uncommitted && self.modify_uncommitted { let page_number = page.get_page_number(); let arc = page.to_arc(); diff --git a/src/tree_store/page_store/base.rs b/src/tree_store/page_store/base.rs index 6f6cda69..eb52d06c 100644 --- a/src/tree_store/page_store/base.rs +++ b/src/tree_store/page_store/base.rs @@ -35,8 +35,8 @@ impl Ord for PageNumber { match self.region.cmp(&other.region) { Ordering::Less => Ordering::Less, Ordering::Equal => { - let self_order0 = self.page_index * 2u32.pow(self.page_order as u32); - let other_order0 = other.page_index * 2u32.pow(other.page_order as u32); + let self_order0 = self.page_index * 2u32.pow(self.page_order.into()); + let other_order0 = other.page_index * 2u32.pow(other.page_order.into()); assert!( self_order0 != other_order0 || self.page_order == other.page_order, "{self:?} overlaps {other:?}, but is not equal" @@ -72,9 +72,9 @@ impl PageNumber { } pub(crate) fn to_le_bytes(self) -> [u8; 8] { - let mut temp = (0x000F_FFFF & self.page_index) as u64; - temp |= (0x000F_FFFF & self.region as u64) << 20; - temp |= (0b0001_1111 & self.page_order as u64) << 59; + let mut temp = 0x000F_FFFF & u64::from(self.page_index); + temp |= (0x000F_FFFF & u64::from(self.region)) << 20; + temp |= (0b0001_1111 & u64::from(self.page_order)) << 59; temp.to_le_bytes() } @@ -99,8 +99,8 @@ impl PageNumber { if self.region > other.region { return false; } - let self_order0 = self.page_index * 2u32.pow(self.page_order as u32); - let other_order0 = other.page_index * 2u32.pow(other.page_order as u32); + let self_order0 = self.page_index * 2u32.pow(self.page_order.into()); + let other_order0 = other.page_index * 2u32.pow(other.page_order.into()); assert_ne!(self_order0, other_order0, "{self:?} overlaps {other:?}"); self_order0 < other_order0 } @@ -145,9 +145,9 @@ impl PageNumber { page_size: u32, ) -> Range { let regional_start = - region_pages_start + (self.page_index as u64) * self.page_size_bytes(page_size); + region_pages_start + u64::from(self.page_index) * self.page_size_bytes(page_size); debug_assert!(regional_start < region_size); - let region_base = (self.region as u64) * region_size; + let region_base = u64::from(self.region) * region_size; let start = data_section_offset + region_base + regional_start; let end = start + self.page_size_bytes(page_size); start..end @@ -155,7 +155,7 @@ impl PageNumber { pub(crate) fn page_size_bytes(&self, page_size: u32) -> u64 { let pages = 1u64 << self.page_order; - pages * (page_size as u64) + pages * u64::from(page_size) } } diff --git a/src/tree_store/page_store/bitmap.rs b/src/tree_store/page_store/bitmap.rs index 26c52e3f..ec13ddc5 100644 --- a/src/tree_store/page_store/bitmap.rs +++ b/src/tree_store/page_store/bitmap.rs @@ -71,14 +71,14 @@ impl BtreeBitmap { let vecs: Vec> = self.heights.iter().map(|x| x.to_vec()).collect(); let mut data_offset = END_OFFSETS + self.heights.len() * size_of::(); let end_metadata = data_offset; - for serialized in vecs.iter() { + for serialized in &vecs { data_offset += serialized.len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } assert_eq!(end_metadata, result.len()); - for serialized in vecs.iter() { + for serialized in &vecs { result.extend(serialized); } @@ -303,7 +303,7 @@ impl U64GroupedBitmap { pub fn to_vec(&self) -> Vec { let mut result = vec![]; result.extend(self.len.to_le_bytes()); - for x in self.data.iter() { + for x in &self.data { result.extend(x.to_le_bytes()); } result @@ -327,7 +327,7 @@ impl U64GroupedBitmap { Self { len, data } } - fn data_index_of(&self, bit: u32) -> (usize, usize) { + fn data_index_of(bit: u32) -> (usize, usize) { ((bit as usize) / 64, (bit as usize) % 64) } @@ -363,7 +363,7 @@ impl U64GroupedBitmap { fn first_unset(&self, start_bit: u32, end_bit: u32) -> Option { assert_eq!(end_bit, (start_bit - start_bit % 64) + 64); - let (index, bit) = self.data_index_of(start_bit); + let (index, bit) = Self::data_index_of(start_bit); let mask = (1 << bit) - 1; let group = self.data[index]; let group = group | mask; @@ -386,7 +386,7 @@ impl U64GroupedBitmap { pub fn get(&self, bit: u32) -> bool { assert!(bit < self.len); - let (index, bit_index) = self.data_index_of(bit); + let (index, bit_index) = Self::data_index_of(bit); let group = self.data[index]; group & U64GroupedBitmap::select_mask(bit_index) != 0 } @@ -394,7 +394,7 @@ impl U64GroupedBitmap { // Returns true iff the bit's group is all set pub fn set(&mut self, bit: u32) -> bool { assert!(bit < self.len); - let (index, bit_index) = self.data_index_of(bit); + let (index, bit_index) = Self::data_index_of(bit); let mut group = self.data[index]; group |= Self::select_mask(bit_index); self.data[index] = group; @@ -403,8 +403,8 @@ impl U64GroupedBitmap { } pub fn clear(&mut self, bit: u32) { - assert!(bit < self.len, "{} must be less than {}", bit, self.len); - let (index, bit_index) = self.data_index_of(bit); + assert!(bit < self.len, "{bit} must be less than {}", self.len); + let (index, bit_index) = Self::data_index_of(bit); self.data[index] &= !Self::select_mask(bit_index); } } @@ -533,7 +533,7 @@ mod test { } else { assert_eq!(allocated.len(), num_pages as usize); } - } else if let Some(to_free) = allocated.iter().choose(&mut rng).cloned() { + } else if let Some(to_free) = allocated.iter().choose(&mut rng).copied() { allocator.clear(to_free); allocated.remove(&to_free); } diff --git a/src/tree_store/page_store/buddy_allocator.rs b/src/tree_store/page_store/buddy_allocator.rs index 8c967e29..e1b05114 100644 --- a/src/tree_store/page_store/buddy_allocator.rs +++ b/src/tree_store/page_store/buddy_allocator.rs @@ -87,21 +87,21 @@ impl BuddyAllocator { let mut data_offset = result.len() + (self.max_order as usize + 1) * 2 * size_of::(); let end_metadata = data_offset; - for order in self.free.iter() { + for order in &self.free { data_offset += order.to_vec().len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } - for order in self.allocated.iter() { + for order in &self.allocated { data_offset += order.to_vec().len(); let offset_u32: u32 = data_offset.try_into().unwrap(); result.extend(offset_u32.to_le_bytes()); } assert_eq!(end_metadata, result.len()); - for order in self.free.iter() { + for order in &self.free { result.extend(&order.to_vec()); } - for order in self.allocated.iter() { + for order in &self.allocated { result.extend(&order.to_vec()); } @@ -280,7 +280,7 @@ impl BuddyAllocator { } let mut check_result = HashSet::new(); - for page in allocated_check.iter() { + for page in &allocated_check { check_result.extend(page.to_order0()); } assert_eq!(free_check, check_result); @@ -463,7 +463,7 @@ impl BuddyAllocator { } } - /// data must have been initialized by Self::init_new(), and page_number must be free + /// data must have been initialized by `Self::init_new()`, and `page_number` must be free pub(crate) fn record_alloc(&mut self, page_number: u32, order: u8) { assert!(order <= self.max_order); // Only record the allocation for the actual page @@ -492,7 +492,7 @@ impl BuddyAllocator { } } - /// data must have been initialized by Self::init_new() + /// data must have been initialized by `Self::init_new()` pub(crate) fn free(&mut self, page_number: u32, order: u8) { debug_assert!(self.get_order_free_mut(order).get(page_number)); debug_assert!( @@ -618,7 +618,7 @@ mod test { // Check that serialized size is as expected for a full region let max_region_pages = 1024 * 1024; let allocator = BuddyAllocator::new(max_region_pages, max_region_pages); - let max_region_pages = max_region_pages as u64; + let max_region_pages = u64::from(max_region_pages); // 2x because that's the integral of 1/2^x to account for all the 21 orders let allocated_state_bits = 2 * max_region_pages; // + u32 * 21 because of the length field diff --git a/src/tree_store/page_store/cached_file.rs b/src/tree_store/page_store/cached_file.rs index 0999a3b0..61e03cc9 100644 --- a/src/tree_store/page_store/cached_file.rs +++ b/src/tree_store/page_store/cached_file.rs @@ -29,7 +29,7 @@ impl Drop for WritablePage { self.buffer .lock() .unwrap() - .return_value(&self.offset, self.data.clone()); + .return_value(self.offset, self.data.clone()); } } @@ -63,11 +63,11 @@ impl LRUWriteCache { assert!(self.cache.insert(key, Some(value)).is_none()); } - fn get(&self, key: &u64) -> Option<&Arc<[u8]>> { + fn get(&self, key: u64) -> Option<&Arc<[u8]>> { self.cache.get(key).map(|x| x.as_ref().unwrap()) } - fn remove(&mut self, key: &u64) -> Option> { + fn remove(&mut self, key: u64) -> Option> { if let Some(value) = self.cache.remove(key) { assert!(value.is_some()); return value; @@ -75,11 +75,11 @@ impl LRUWriteCache { None } - fn return_value(&mut self, key: &u64, value: Arc<[u8]>) { + fn return_value(&mut self, key: u64, value: Arc<[u8]>) { assert!(self.cache.get_mut(key).unwrap().replace(value).is_none()); } - fn take_value(&mut self, key: &u64) -> Option> { + fn take_value(&mut self, key: u64) -> Option> { if let Some(value) = self.cache.get_mut(key) { let result = value.take().unwrap(); return Some(result); @@ -92,10 +92,10 @@ impl LRUWriteCache { if let Some((k, v)) = self.cache.pop_lowest_priority() { if let Some(v_inner) = v { return Some((k, v_inner)); - } else { - // Value is borrowed by take_value(). We can't evict it, so put it back. - self.cache.insert(k, v); } + + // Value is borrowed by take_value(). We can't evict it, so put it back. + self.cache.insert(k, v); } else { break; } @@ -296,7 +296,7 @@ impl PagedCachedFile { if !matches!(hint, PageHint::Clean) { let lock = self.write_buffer.lock().unwrap(); - if let Some(cached) = lock.get(&offset) { + if let Some(cached) = lock.get(offset) { #[cfg(feature = "cache_metrics")] self.reads_hits.fetch_add(1, Ordering::Release); debug_assert_eq!(cached.len(), len); @@ -307,7 +307,7 @@ impl PagedCachedFile { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); { let read_lock = self.read_cache[cache_slot].read().unwrap(); - if let Some(cached) = read_lock.get(&offset) { + if let Some(cached) = read_lock.get(offset) { #[cfg(feature = "cache_metrics")] self.reads_hits.fetch_add(1, Ordering::Release); debug_assert_eq!(cached.len(), len); @@ -345,7 +345,7 @@ impl PagedCachedFile { // Discard pending writes to the given range pub(super) fn cancel_pending_write(&self, offset: u64, _len: usize) { assert_eq!(0, offset % self.page_size); - if let Some(removed) = self.write_buffer.lock().unwrap().remove(&offset) { + if let Some(removed) = self.write_buffer.lock().unwrap().remove(offset) { self.write_buffer_bytes .fetch_sub(removed.len(), Ordering::Release); } @@ -357,7 +357,7 @@ impl PagedCachedFile { pub(super) fn invalidate_cache(&self, offset: u64, len: usize) { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(removed) = lock.remove(&offset) { + if let Some(removed) = lock.remove(offset) { assert_eq!(len, removed.len()); self.read_cache_bytes .fetch_sub(removed.len(), Ordering::AcqRel); @@ -384,7 +384,7 @@ impl PagedCachedFile { let cache_slot: usize = (offset % Self::lock_stripes()).try_into().unwrap(); let existing = { let mut lock = self.read_cache[cache_slot].write().unwrap(); - if let Some(removed) = lock.remove(&offset) { + if let Some(removed) = lock.remove(offset) { assert_eq!( len, removed.len(), @@ -399,7 +399,7 @@ impl PagedCachedFile { } }; - let data = if let Some(removed) = lock.take_value(&offset) { + let data = if let Some(removed) = lock.take_value(offset) { removed } else { let previous = self.write_buffer_bytes.fetch_add(len, Ordering::AcqRel); @@ -429,7 +429,7 @@ impl PagedCachedFile { self.read_direct(offset, len)?.into() }; lock.insert(offset, result); - lock.take_value(&offset).unwrap() + lock.take_value(offset).unwrap() }; Ok(WritablePage { buffer: self.write_buffer.clone(), diff --git a/src/tree_store/page_store/layout.rs b/src/tree_store/page_store/layout.rs index 7c8b2a16..68840216 100644 --- a/src/tree_store/page_store/layout.rs +++ b/src/tree_store/page_store/layout.rs @@ -34,10 +34,10 @@ impl RegionLayout { page_capacity: u32, page_size: u32, ) -> RegionLayout { - assert!(desired_usable_bytes <= page_capacity as u64 * page_size as u64); + assert!(desired_usable_bytes <= u64::from(page_capacity) * u64::from(page_size)); let header_pages = RegionHeader::header_pages_expensive(page_size, page_capacity); let num_pages = - round_up_to_multiple_of(desired_usable_bytes, page_size.into()) / page_size as u64; + round_up_to_multiple_of(desired_usable_bytes, page_size.into()) / u64::from(page_size); Self { num_pages: num_pages.try_into().unwrap(), @@ -57,7 +57,7 @@ impl RegionLayout { } pub(super) fn data_section(&self) -> Range { - let header_bytes = self.header_pages as u64 * self.page_size as u64; + let header_bytes = u64::from(self.header_pages) * u64::from(self.page_size); header_bytes..(header_bytes + self.usable_bytes()) } @@ -74,11 +74,11 @@ impl RegionLayout { } pub(super) fn len(&self) -> u64 { - (self.header_pages as u64) * (self.page_size as u64) + self.usable_bytes() + u64::from(self.header_pages) * u64::from(self.page_size) + self.usable_bytes() } pub(super) fn usable_bytes(&self) -> u64 { - self.page_size as u64 * self.num_pages as u64 + u64::from(self.page_size) * u64::from(self.num_pages) } } @@ -128,9 +128,9 @@ impl DatabaseLayout { region_max_data_pages_u32: u32, page_size_u32: u32, ) -> Self { - let page_size = page_size_u32 as u64; - let region_header_pages = region_header_pages_u32 as u64; - let region_max_data_pages = region_max_data_pages_u32 as u64; + let page_size = u64::from(page_size_u32); + let region_header_pages = u64::from(region_header_pages_u32); + let region_max_data_pages = u64::from(region_max_data_pages_u32); // Super-header let mut remaining = file_len - page_size; let full_region_size = (region_header_pages + region_max_data_pages) * page_size; @@ -231,13 +231,13 @@ impl DatabaseLayout { .as_ref() .map(RegionLayout::usable_bytes) .unwrap_or_default(); - (self.num_full_regions as u64) * self.full_region_layout.usable_bytes() + trailing + u64::from(self.num_full_regions) * self.full_region_layout.usable_bytes() + trailing } pub(super) fn region_base_address(&self, region: u32) -> u64 { assert!(region < self.num_regions()); - (self.full_region_layout.page_size() as u64) - + (region as u64) * self.full_region_layout.len() + u64::from(self.full_region_layout.page_size()) + + u64::from(region) * self.full_region_layout.len() } pub(super) fn region_layout(&self, region: u32) -> RegionLayout { diff --git a/src/tree_store/page_store/lru_cache.rs b/src/tree_store/page_store/lru_cache.rs index 5df77e12..d26d9006 100644 --- a/src/tree_store/page_store/lru_cache.rs +++ b/src/tree_store/page_store/lru_cache.rs @@ -31,8 +31,8 @@ impl LRUCache { result } - pub(crate) fn remove(&mut self, key: &u64) -> Option { - if let Some((value, _)) = self.cache.remove(key) { + pub(crate) fn remove(&mut self, key: u64) -> Option { + if let Some((value, _)) = self.cache.remove(&key) { if self.lru_queue.len() > 2 * self.cache.len() { // Cycle two elements of the LRU queue to ensure it doesn't grow without bound for _ in 0..2 { @@ -50,8 +50,8 @@ impl LRUCache { } } - pub(crate) fn get(&self, key: &u64) -> Option<&T> { - if let Some((value, second_chance)) = self.cache.get(key) { + pub(crate) fn get(&self, key: u64) -> Option<&T> { + if let Some((value, second_chance)) = self.cache.get(&key) { second_chance.store(true, Ordering::Release); Some(value) } else { @@ -59,8 +59,8 @@ impl LRUCache { } } - pub(crate) fn get_mut(&mut self, key: &u64) -> Option<&mut T> { - if let Some((value, second_chance)) = self.cache.get_mut(key) { + pub(crate) fn get_mut(&mut self, key: u64) -> Option<&mut T> { + if let Some((value, second_chance)) = self.cache.get_mut(&key) { second_chance.store(true, Ordering::Release); Some(value) } else { diff --git a/src/tree_store/page_store/mod.rs b/src/tree_store/page_store/mod.rs index 4d42547c..143c2da7 100644 --- a/src/tree_store/page_store/mod.rs +++ b/src/tree_store/page_store/mod.rs @@ -10,7 +10,7 @@ mod lru_cache; mod page_manager; mod region; mod savepoint; -#[allow(dead_code)] +#[allow(clippy::pedantic, dead_code)] mod xxh3; pub(crate) use base::{Page, PageHint, PageNumber, MAX_PAIR_LENGTH, MAX_VALUE_LENGTH}; diff --git a/src/tree_store/page_store/page_manager.rs b/src/tree_store/page_store/page_manager.rs index efacc619..0d9943a3 100644 --- a/src/tree_store/page_store/page_manager.rs +++ b/src/tree_store/page_store/page_manager.rs @@ -114,7 +114,10 @@ impl TransactionalMemory { assert!(page_size.is_power_of_two() && page_size >= DB_HEADER_SIZE); let region_size = requested_region_size.unwrap_or(MAX_USABLE_REGION_SPACE); - let region_size = min(region_size, (MAX_PAGE_INDEX as u64 + 1) * page_size as u64); + let region_size = min( + region_size, + (u64::from(MAX_PAGE_INDEX) + 1) * page_size as u64, + ); assert!(region_size.is_power_of_two()); let storage = PagedCachedFile::new( @@ -143,7 +146,7 @@ impl TransactionalMemory { // Make sure that there is enough room to allocate the region tracker into a page let size: u64 = max( MIN_DESIRED_USABLE_BYTES, - page_size as u64 * MIN_USABLE_PAGES as u64, + page_size as u64 * u64::from(MIN_USABLE_PAGES), ); let tracker_space = (page_size * ((region_tracker_required_bytes + page_size - 1) / page_size)) as u64; @@ -278,7 +281,7 @@ impl TransactionalMemory { } pub(crate) fn clear_read_cache(&self) { - self.storage.invalidate_cache_all() + self.storage.invalidate_cache_all(); } pub(crate) fn clear_cache_and_reload(&mut self) -> Result { @@ -501,7 +504,7 @@ impl TransactionalMemory { let mut state = self.state.lock().unwrap(); // Trim surplus file space, before finalizing the commit let shrunk = if allow_trim { - self.try_shrink(&mut state)? + Self::try_shrink(&mut state)? } else { false }; @@ -615,7 +618,7 @@ impl TransactionalMemory { .free(page_number.page_index, page_number.page_order); let address = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -640,7 +643,7 @@ impl TransactionalMemory { hint: PageHint, ) -> Result { let range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -683,7 +686,7 @@ impl TransactionalMemory { } let address_range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -773,7 +776,7 @@ impl TransactionalMemory { .mark_free(page.page_order, region_index); let address_range = page.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -807,13 +810,12 @@ impl TransactionalMemory { let mut state = self.state.lock().unwrap(); let page_number = if let Some(page_number) = - self.allocate_helper_retry(&mut state, required_order, lowest)? + Self::allocate_helper_retry(&mut state, required_order, lowest)? { page_number } else { self.grow(&mut state, required_order)?; - self.allocate_helper_retry(&mut state, required_order, lowest)? - .unwrap() + Self::allocate_helper_retry(&mut state, required_order, lowest)?.unwrap() }; #[cfg(debug_assertions)] @@ -835,7 +837,7 @@ impl TransactionalMemory { .insert(page_number); let address_range = page_number.address_range( - self.page_size as u64, + self.page_size.into(), self.region_size, self.region_header_with_padding_size, self.page_size, @@ -865,7 +867,6 @@ impl TransactionalMemory { } fn allocate_helper_retry( - &self, state: &mut InMemoryState, required_order: u8, lowest: bool, @@ -889,16 +890,15 @@ impl TransactionalMemory { page, required_order, ))); - } else { - // Mark the region, if it's full - state - .get_region_tracker_mut() - .mark_full(required_order, candidate_region); } + // Mark the region, if it's full + state + .get_region_tracker_mut() + .mark_full(required_order, candidate_region); } } - fn try_shrink(&self, state: &mut InMemoryState) -> Result { + fn try_shrink(state: &mut InMemoryState) -> Result { let layout = state.header.layout(); let last_region_index = layout.num_regions() - 1; let last_allocator = state.get_region(last_region_index); @@ -925,9 +925,9 @@ impl TransactionalMemory { fn grow(&self, state: &mut InMemoryState, required_order_allocation: u8) -> Result<()> { let layout = state.header.layout(); let required_growth = - 2u64.pow(required_order_allocation.into()) * state.header.page_size() as u64; - let max_region_size = (state.header.layout().full_region_layout().num_pages() as u64) - * (state.header.page_size() as u64); + 2u64.pow(required_order_allocation.into()) * u64::from(state.header.page_size()); + let max_region_size = u64::from(state.header.layout().full_region_layout().num_pages()) + * u64::from(state.header.page_size()); let next_desired_size = if layout.num_full_regions() > 0 { if let Some(trailing) = layout.trailing_region_layout() { if 2 * required_growth < max_region_size - trailing.usable_bytes() { @@ -979,7 +979,7 @@ impl TransactionalMemory { let state = self.state.lock().unwrap(); let mut count = 0u64; for i in 0..state.header.layout().num_regions() { - count += state.get_region(i).count_allocated_pages() as u64; + count += u64::from(state.get_region(i).count_allocated_pages()); } Ok(count) diff --git a/src/tree_store/page_store/region.rs b/src/tree_store/page_store/region.rs index bfeb20df..44b88e43 100644 --- a/src/tree_store/page_store/region.rs +++ b/src/tree_store/page_store/region.rs @@ -41,7 +41,7 @@ impl RegionTracker { let allocator_len: u32 = self.order_trackers[0].to_vec().len().try_into().unwrap(); result.extend(orders.to_le_bytes()); result.extend(allocator_len.to_le_bytes()); - for order in self.order_trackers.iter() { + for order in &self.order_trackers { result.extend(&order.to_vec()); } result @@ -165,7 +165,7 @@ impl Allocators { // Ignore the region tracker because it is an optimistic cache, and so may not match // between repairs of the allocators let mut result = 0; - for allocator in self.region_allocators.iter() { + for allocator in &self.region_allocators { result ^= xxh3_checksum(&allocator.to_vec()); } result @@ -175,13 +175,13 @@ impl Allocators { let page_size = header.page_size(); let region_header_size = header.layout().full_region_layout().get_header_pages() * page_size; - let region_size = header.layout().full_region_layout().num_pages() as u64 - * page_size as u64 - + region_header_size as u64; + let region_size = u64::from(header.layout().full_region_layout().num_pages()) + * u64::from(page_size) + + u64::from(region_header_size); let range = header.region_tracker().address_range( - page_size as u64, + page_size.into(), region_size, - region_header_size as u64, + region_header_size.into(), page_size, ); let len: usize = (range.end - range.start).try_into().unwrap(); @@ -215,12 +215,12 @@ impl Allocators { ) -> Result { let page_size = layout.full_region_layout().page_size(); let region_header_size = - (layout.full_region_layout().get_header_pages() * page_size) as u64; - let region_size = - layout.full_region_layout().num_pages() as u64 * page_size as u64 + region_header_size; + u64::from(layout.full_region_layout().get_header_pages() * page_size); + let region_size = u64::from(layout.full_region_layout().num_pages()) * u64::from(page_size) + + region_header_size; let mut region_tracker_mem = { let range = region_tracker_page.address_range( - page_size as u64, + page_size.into(), region_size, region_header_size, page_size, @@ -325,7 +325,7 @@ pub(crate) struct RegionHeader {} impl RegionHeader { pub(crate) fn header_pages_expensive(page_size: u32, pages_per_region: u32) -> u32 { - let page_size = page_size as u64; + let page_size = u64::from(page_size); // TODO: this is kind of expensive. Maybe it should be cached let allocator = BuddyAllocator::new(pages_per_region, pages_per_region); let result = 8u64 + allocator.to_vec().len() as u64; diff --git a/src/tree_store/page_store/savepoint.rs b/src/tree_store/page_store/savepoint.rs index 72a4b2e9..f42b848a 100644 --- a/src/tree_store/page_store/savepoint.rs +++ b/src/tree_store/page_store/savepoint.rs @@ -138,7 +138,7 @@ impl<'a> SerializedSavepoint<'a> { .unwrap() .to_le_bytes(), ); - for region in savepoint.regional_allocators.iter() { + for region in &savepoint.regional_allocators { assert_eq!(savepoint.regional_allocators[0].len(), region.len()); } result.extend( @@ -147,7 +147,7 @@ impl<'a> SerializedSavepoint<'a> { .to_le_bytes(), ); - for region in savepoint.regional_allocators.iter() { + for region in &savepoint.regional_allocators { result.extend(region); } Self::Owned(result) @@ -276,7 +276,6 @@ impl<'data> Value for SerializedSavepoint<'data> { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b, { value.data() diff --git a/src/tree_store/table_tree.rs b/src/tree_store/table_tree.rs index b13a8610..ad3990ab 100644 --- a/src/tree_store/table_tree.rs +++ b/src/tree_store/table_tree.rs @@ -14,7 +14,6 @@ use crate::types::{Key, MutInPlaceValue, TypeName, Value}; use crate::{DatabaseStats, Result}; use std::cmp::max; use std::collections::{BTreeMap, HashMap}; -use std::mem; use std::mem::size_of; use std::ops::RangeFull; use std::sync::{Arc, Mutex}; @@ -51,7 +50,6 @@ impl Value for FreedTableKey { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> [u8; 2 * size_of::()] where - Self: 'a, Self: 'b, { let mut result = [0u8; 2 * size_of::()]; @@ -147,7 +145,6 @@ impl Value for FreedPageList<'_> { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'b [u8] where - Self: 'a, Self: 'b, { value.data @@ -168,7 +165,7 @@ impl MutInPlaceValue for FreedPageList<'_> { } fn from_bytes_mut(data: &mut [u8]) -> &mut Self::BaseRefType { - unsafe { mem::transmute(data) } + unsafe { &mut *(data as *mut [u8] as *mut FreedPageListMut) } } } diff --git a/src/tree_store/table_tree_base.rs b/src/tree_store/table_tree_base.rs index 19bb13c3..b3f3dbd1 100644 --- a/src/tree_store/table_tree_base.rs +++ b/src/tree_store/table_tree_base.rs @@ -106,11 +106,8 @@ impl InternalTableDefinition { table_root, table_length, .. - } => { - *table_root = root; - *table_length = length; } - InternalTableDefinition::Multimap { + | InternalTableDefinition::Multimap { table_root, table_length, .. @@ -261,22 +258,22 @@ impl InternalTableDefinition { fn private_get_root(&self) -> Option { match self { - InternalTableDefinition::Normal { table_root, .. } => *table_root, - InternalTableDefinition::Multimap { table_root, .. } => *table_root, + InternalTableDefinition::Normal { table_root, .. } + | InternalTableDefinition::Multimap { table_root, .. } => *table_root, } } pub(crate) fn get_length(&self) -> u64 { match self { - InternalTableDefinition::Normal { table_length, .. } => *table_length, - InternalTableDefinition::Multimap { table_length, .. } => *table_length, + InternalTableDefinition::Normal { table_length, .. } + | InternalTableDefinition::Multimap { table_length, .. } => *table_length, } } fn private_get_fixed_key_size(&self) -> Option { match self { - InternalTableDefinition::Normal { fixed_key_size, .. } => *fixed_key_size, - InternalTableDefinition::Multimap { fixed_key_size, .. } => *fixed_key_size, + InternalTableDefinition::Normal { fixed_key_size, .. } + | InternalTableDefinition::Multimap { fixed_key_size, .. } => *fixed_key_size, } } @@ -284,8 +281,8 @@ impl InternalTableDefinition { match self { InternalTableDefinition::Normal { fixed_value_size, .. - } => *fixed_value_size, - InternalTableDefinition::Multimap { + } + | InternalTableDefinition::Multimap { fixed_value_size, .. } => *fixed_value_size, } @@ -293,8 +290,8 @@ impl InternalTableDefinition { fn private_get_key_alignment(&self) -> usize { match self { - InternalTableDefinition::Normal { key_alignment, .. } => *key_alignment, - InternalTableDefinition::Multimap { key_alignment, .. } => *key_alignment, + InternalTableDefinition::Normal { key_alignment, .. } + | InternalTableDefinition::Multimap { key_alignment, .. } => *key_alignment, } } @@ -302,8 +299,8 @@ impl InternalTableDefinition { match self { InternalTableDefinition::Normal { value_alignment, .. - } => *value_alignment, - InternalTableDefinition::Multimap { + } + | InternalTableDefinition::Multimap { value_alignment, .. } => *value_alignment, } @@ -318,15 +315,15 @@ impl InternalTableDefinition { fn private_key_type(&self) -> TypeName { match self { - InternalTableDefinition::Normal { key_type, .. } => key_type.clone(), - InternalTableDefinition::Multimap { key_type, .. } => key_type.clone(), + InternalTableDefinition::Normal { key_type, .. } + | InternalTableDefinition::Multimap { key_type, .. } => key_type.clone(), } } fn private_value_type(&self) -> TypeName { match self { - InternalTableDefinition::Normal { value_type, .. } => value_type.clone(), - InternalTableDefinition::Multimap { value_type, .. } => value_type.clone(), + InternalTableDefinition::Normal { value_type, .. } + | InternalTableDefinition::Multimap { value_type, .. } => value_type.clone(), } } } @@ -446,7 +443,6 @@ impl Value for InternalTableDefinition { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { let mut result = vec![value.get_type().into()]; @@ -463,14 +459,14 @@ impl Value for InternalTableDefinition { result.extend_from_slice(&u32::try_from(fixed).unwrap().to_le_bytes()); } else { result.push(0); - result.extend_from_slice(&[0; size_of::()]) + result.extend_from_slice(&[0; size_of::()]); } if let Some(fixed) = value.private_get_fixed_value_size() { result.push(1); result.extend_from_slice(&u32::try_from(fixed).unwrap().to_le_bytes()); } else { result.push(0); - result.extend_from_slice(&[0; size_of::()]) + result.extend_from_slice(&[0; size_of::()]); } result.extend_from_slice( &u32::try_from(value.private_get_key_alignment()) diff --git a/src/types.rs b/src/types.rs index 640322dc..ab244a9b 100644 --- a/src/types.rs +++ b/src/types.rs @@ -72,7 +72,7 @@ impl TypeName { } pub trait Value: Debug { - /// SelfType<'a> must be the same type as Self with all lifetimes replaced with 'a + /// `SelfType<'a>` must be the same type as Self with all lifetimes replaced with 'a type SelfType<'a>: Debug + 'a where Self: 'a; @@ -93,7 +93,6 @@ pub trait Value: Debug { /// Serialize the value to a slice fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Self::AsBytes<'a> where - Self: 'a, Self: 'b; /// Globally unique identifier for this type @@ -101,9 +100,9 @@ pub trait Value: Debug { } /// Implementing this trait indicates that the type can be mutated in-place as a &mut [u8]. -/// This enables the .insert_reserve() method on Table +/// This enables the `.insert_reserve()` method on Table pub trait MutInPlaceValue: Value { - /// The base type such that &mut [u8] can be safely transmuted to &mut BaseRefType + /// The base type such that &mut [u8] can be safely transmuted to `&mut BaseRefType` type BaseRefType: Debug + ?Sized; // Initialize `data` to a valid value. This method will be called (at some point, not necessarily immediately) @@ -142,7 +141,7 @@ impl Value for () { Some(0) } - #[allow(clippy::unused_unit)] + #[allow(clippy::unused_unit, clippy::semicolon_if_nothing_returned)] fn from_bytes<'a>(_data: &'a [u8]) -> () where Self: 'a, @@ -152,7 +151,6 @@ impl Value for () { fn as_bytes<'a, 'b: 'a>(_: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { &[] @@ -194,7 +192,6 @@ impl Value for bool { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { match value { @@ -241,7 +238,6 @@ impl Value for Option { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { let mut result = vec![0]; @@ -299,7 +295,6 @@ impl Value for &[u8] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8] where - Self: 'a, Self: 'b, { value @@ -337,7 +332,6 @@ impl Value for &[u8; N] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a [u8; N] where - Self: 'a, Self: 'b, { value @@ -390,7 +384,6 @@ impl Value for [T; N] { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> Vec where - Self: 'a, Self: 'b, { if let Some(fixed) = T::fixed_width() { @@ -470,7 +463,6 @@ impl Value for &str { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a str where - Self: 'a, Self: 'b, { value @@ -510,7 +502,6 @@ impl Value for String { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> &'a str where - Self: 'a, Self: 'b, { value.as_str() @@ -546,7 +537,6 @@ impl Value for char { fn as_bytes<'a, 'b: 'a>(value: &'a Self::SelfType<'b>) -> [u8; 3] where - Self: 'a, Self: 'b, { let bytes = u32::from(*value).to_le_bytes();