diff --git a/Cargo.toml b/Cargo.toml index 1f415f6893..6497e79b97 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/quickwit-oss/tantivy" readme = "README.md" keywords = ["search", "information", "retrieval"] edition = "2021" -rust-version = "1.62" +rust-version = "1.63" exclude = ["benches/*.json", "benches/*.txt"] [dependencies] diff --git a/columnar/src/column_index/merge/shuffled.rs b/columnar/src/column_index/merge/shuffled.rs index 6acf199ff5..f93b896354 100644 --- a/columnar/src/column_index/merge/shuffled.rs +++ b/columnar/src/column_index/merge/shuffled.rs @@ -140,7 +140,7 @@ mod tests { #[test] fn test_merge_column_index_optional_shuffle() { let optional_index: ColumnIndex = OptionalIndex::for_test(2, &[0]).into(); - let column_indexes = vec![optional_index, ColumnIndex::Full]; + let column_indexes = [optional_index, ColumnIndex::Full]; let row_addrs = vec![ RowAddr { segment_ord: 0u32, diff --git a/columnar/src/column_values/mod.rs b/columnar/src/column_values/mod.rs index 8a88a8fa43..ef5de51543 100644 --- a/columnar/src/column_values/mod.rs +++ b/columnar/src/column_values/mod.rs @@ -75,7 +75,7 @@ pub trait ColumnValues: Send + Sync + DowncastSync { let out_and_idx_chunks = output .chunks_exact_mut(4) .into_remainder() - .into_iter() + .iter_mut() .zip(indexes.chunks_exact(4).remainder()); for (out, idx) in out_and_idx_chunks { *out = self.get_val(*idx); @@ -102,7 +102,7 @@ pub trait ColumnValues: Send + Sync + DowncastSync { let out_and_idx_chunks = output .chunks_exact_mut(4) .into_remainder() - .into_iter() + .iter_mut() .zip(indexes.chunks_exact(4).remainder()); for (out, idx) in out_and_idx_chunks { *out = Some(self.get_val(*idx)); diff --git a/columnar/src/column_values/u128_based/compact_space/mod.rs b/columnar/src/column_values/u128_based/compact_space/mod.rs index c05705ad71..f246c7b0cc 100644 --- a/columnar/src/column_values/u128_based/compact_space/mod.rs +++ b/columnar/src/column_values/u128_based/compact_space/mod.rs @@ -148,7 +148,7 @@ impl CompactSpace { .binary_search_by_key(&compact, |range_mapping| range_mapping.compact_start) // Correctness: Overflow. The first range starts at compact space 0, the error from // binary search can never be 0 - .map_or_else(|e| e - 1, |v| v); + .unwrap_or_else(|e| e - 1); let range_mapping = &self.ranges_mapping[pos]; let diff = compact - range_mapping.compact_start; diff --git a/common/src/serialize.rs b/common/src/serialize.rs index 69b94090fd..181d61e54c 100644 --- a/common/src/serialize.rs +++ b/common/src/serialize.rs @@ -290,8 +290,7 @@ impl<'a> BinarySerializable for Cow<'a, [u8]> { #[cfg(test)] pub mod test { - use super::{VInt, *}; - use crate::serialize::BinarySerializable; + use super::*; pub fn fixed_size_test() { let mut buffer = Vec::new(); O::default().serialize(&mut buffer).unwrap(); diff --git a/src/aggregation/bucket/histogram/histogram.rs b/src/aggregation/bucket/histogram/histogram.rs index c5ed340ffc..26853c4af0 100644 --- a/src/aggregation/bucket/histogram/histogram.rs +++ b/src/aggregation/bucket/histogram/histogram.rs @@ -1,7 +1,5 @@ use std::cmp::Ordering; -use columnar::ColumnType; -use itertools::Itertools; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use tantivy_bitpacker::minmax; @@ -17,7 +15,7 @@ use crate::aggregation::intermediate_agg_result::{ IntermediateHistogramBucketEntry, }; use crate::aggregation::segment_agg_result::{ - build_segment_agg_collector, AggregationLimits, SegmentAggregationCollector, + build_segment_agg_collector, SegmentAggregationCollector, }; use crate::aggregation::*; use crate::TantivyError; diff --git a/src/aggregation/bucket/range.rs b/src/aggregation/bucket/range.rs index bf4a865f7d..2e29d97ae2 100644 --- a/src/aggregation/bucket/range.rs +++ b/src/aggregation/bucket/range.rs @@ -1,7 +1,6 @@ use std::fmt::Debug; use std::ops::Range; -use columnar::{ColumnType, MonotonicallyMappableToU64}; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; @@ -450,7 +449,6 @@ pub(crate) fn range_to_key(range: &Range, field_type: &ColumnType) -> crate #[cfg(test)] mod tests { - use columnar::MonotonicallyMappableToU64; use serde_json::Value; use super::*; @@ -459,7 +457,6 @@ mod tests { exec_request, exec_request_with_query, get_test_index_2_segments, get_test_index_with_num_docs, }; - use crate::aggregation::AggregationLimits; pub fn get_collector_from_ranges( ranges: Vec, diff --git a/src/aggregation/metric/percentiles.rs b/src/aggregation/metric/percentiles.rs index ef9e126c51..4a6bac3f0c 100644 --- a/src/aggregation/metric/percentiles.rs +++ b/src/aggregation/metric/percentiles.rs @@ -1,6 +1,5 @@ use std::fmt::Debug; -use columnar::ColumnType; use serde::{Deserialize, Serialize}; use super::*; diff --git a/src/aggregation/metric/stats.rs b/src/aggregation/metric/stats.rs index ea490e57f9..bd0c522691 100644 --- a/src/aggregation/metric/stats.rs +++ b/src/aggregation/metric/stats.rs @@ -1,4 +1,3 @@ -use columnar::ColumnType; use serde::{Deserialize, Serialize}; use super::*; diff --git a/src/aggregation/mod.rs b/src/aggregation/mod.rs index 4e4b37f4f9..fbb2925dd1 100644 --- a/src/aggregation/mod.rs +++ b/src/aggregation/mod.rs @@ -417,7 +417,6 @@ mod tests { use time::OffsetDateTime; use super::agg_req::Aggregations; - use super::segment_agg_result::AggregationLimits; use super::*; use crate::indexer::NoMergePolicy; use crate::query::{AllQuery, TermQuery}; diff --git a/src/collector/histogram_collector.rs b/src/collector/histogram_collector.rs index d5ca1b44f7..51105e7b1c 100644 --- a/src/collector/histogram_collector.rs +++ b/src/collector/histogram_collector.rs @@ -160,7 +160,7 @@ mod tests { use super::{add_vecs, HistogramCollector, HistogramComputer}; use crate::schema::{Schema, FAST}; use crate::time::{Date, Month}; - use crate::{doc, query, DateTime, Index}; + use crate::{query, DateTime, Index}; #[test] fn test_add_histograms_simple() { diff --git a/src/collector/tests.rs b/src/collector/tests.rs index ff8a64abd0..7af7c6d8ce 100644 --- a/src/collector/tests.rs +++ b/src/collector/tests.rs @@ -1,15 +1,11 @@ use columnar::{BytesColumn, Column}; use super::*; -use crate::collector::{Count, FilterCollector, TopDocs}; -use crate::index::SegmentReader; use crate::query::{AllQuery, QueryParser}; use crate::schema::{Schema, FAST, TEXT}; use crate::time::format_description::well_known::Rfc3339; use crate::time::OffsetDateTime; -use crate::{ - doc, DateTime, DocAddress, DocId, Index, Score, Searcher, SegmentOrdinal, TantivyDocument, -}; +use crate::{DateTime, DocAddress, Index, Searcher, TantivyDocument}; pub const TEST_COLLECTOR_WITH_SCORE: TestCollector = TestCollector { compute_score: true, diff --git a/src/directory/composite_file.rs b/src/directory/composite_file.rs index d33b67b95f..11d8929f10 100644 --- a/src/directory/composite_file.rs +++ b/src/directory/composite_file.rs @@ -1,6 +1,5 @@ use std::collections::HashMap; use std::io::{self, Read, Write}; -use std::iter::ExactSizeIterator; use std::ops::Range; use common::{BinarySerializable, CountingWriter, HasLen, VInt}; diff --git a/src/directory/directory.rs b/src/directory/directory.rs index 570307aeb6..19df314d97 100644 --- a/src/directory/directory.rs +++ b/src/directory/directory.rs @@ -1,5 +1,4 @@ use std::io::Write; -use std::marker::{Send, Sync}; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; @@ -40,6 +39,7 @@ impl RetryPolicy { /// The `DirectoryLock` is an object that represents a file lock. /// /// It is associated with a lock file, that gets deleted on `Drop.` +#[allow(dead_code)] pub struct DirectoryLock(Box); struct DirectoryLockGuard { diff --git a/src/directory/tests.rs b/src/directory/tests.rs index 2ef1868c2c..a2c8473ceb 100644 --- a/src/directory/tests.rs +++ b/src/directory/tests.rs @@ -1,6 +1,6 @@ use std::io::Write; use std::mem; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::sync::atomic::Ordering::SeqCst; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::Arc; diff --git a/src/directory/watch_event_router.rs b/src/directory/watch_event_router.rs index d47fc2d22f..28fd83c468 100644 --- a/src/directory/watch_event_router.rs +++ b/src/directory/watch_event_router.rs @@ -32,6 +32,7 @@ pub struct WatchCallbackList { /// file change is detected. #[must_use = "This `WatchHandle` controls the lifetime of the watch and should therefore be used."] #[derive(Clone)] +#[allow(dead_code)] pub struct WatchHandle(Arc); impl WatchHandle { diff --git a/src/fastfield/mod.rs b/src/fastfield/mod.rs index 44b01ad78a..e0689650b7 100644 --- a/src/fastfield/mod.rs +++ b/src/fastfield/mod.rs @@ -79,7 +79,7 @@ mod tests { use std::ops::{Range, RangeInclusive}; use std::path::Path; - use columnar::{Column, MonotonicallyMappableToU64, StrColumn}; + use columnar::StrColumn; use common::{ByteCount, HasLen, TerminatingWrite}; use once_cell::sync::Lazy; use rand::prelude::SliceRandom; diff --git a/src/index/segment_id.rs b/src/index/segment_id.rs index 5e2cf1b324..e66aa95a94 100644 --- a/src/index/segment_id.rs +++ b/src/index/segment_id.rs @@ -1,4 +1,4 @@ -use std::cmp::{Ord, Ordering}; +use std::cmp::Ordering; use std::error::Error; use std::fmt; use std::str::FromStr; diff --git a/src/index/segment_reader.rs b/src/index/segment_reader.rs index e6b098b84c..186d01358a 100644 --- a/src/index/segment_reader.rs +++ b/src/index/segment_reader.rs @@ -516,8 +516,8 @@ impl fmt::Debug for SegmentReader { mod test { use super::*; use crate::index::Index; - use crate::schema::{Schema, SchemaBuilder, Term, STORED, TEXT}; - use crate::{DocId, IndexWriter}; + use crate::schema::{SchemaBuilder, Term, STORED, TEXT}; + use crate::IndexWriter; #[test] fn test_merge_field_meta_data_same() { diff --git a/src/indexer/flat_map_with_buffer.rs b/src/indexer/flat_map_with_buffer.rs index 88b509cdbe..9f2a1924ef 100644 --- a/src/indexer/flat_map_with_buffer.rs +++ b/src/indexer/flat_map_with_buffer.rs @@ -22,6 +22,7 @@ where } } +#[allow(dead_code)] pub trait FlatMapWithBufferIter: Iterator { /// Function similar to `flat_map`, but allows reusing a shared `Vec`. fn flat_map_with_buffer(self, fill_buffer: F) -> FlatMapWithBuffer diff --git a/src/indexer/log_merge_policy.rs b/src/indexer/log_merge_policy.rs index b35b489c5e..726deb578a 100644 --- a/src/indexer/log_merge_policy.rs +++ b/src/indexer/log_merge_policy.rs @@ -145,7 +145,6 @@ mod tests { use super::*; use crate::index::SegmentMetaInventory; - use crate::indexer::merge_policy::MergePolicy; use crate::schema::INDEXED; use crate::{schema, SegmentId}; diff --git a/src/indexer/merge_policy.rs b/src/indexer/merge_policy.rs index d1f4dd6a66..4215caaac8 100644 --- a/src/indexer/merge_policy.rs +++ b/src/indexer/merge_policy.rs @@ -39,7 +39,6 @@ impl MergePolicy for NoMergePolicy { pub mod tests { use super::*; - use crate::index::{SegmentId, SegmentMeta}; /// `MergePolicy` useful for test purposes. /// diff --git a/src/indexer/merger.rs b/src/indexer/merger.rs index f203d8c581..4cc4557136 100644 --- a/src/indexer/merger.rs +++ b/src/indexer/merger.rs @@ -576,7 +576,7 @@ impl IndexMerger { // // Overall the reliable way to know if we have actual frequencies loaded or not // is to check whether the actual decoded array is empty or not. - if has_term_freq != !postings.block_cursor.freqs().is_empty() { + if has_term_freq == postings.block_cursor.freqs().is_empty() { return Err(DataCorruption::comment_only( "Term freqs are inconsistent across segments", ) diff --git a/src/postings/compression/mod.rs b/src/postings/compression/mod.rs index f8a8a3193e..3928be51ba 100644 --- a/src/postings/compression/mod.rs +++ b/src/postings/compression/mod.rs @@ -14,7 +14,6 @@ pub fn compressed_block_size(num_bits: u8) -> usize { pub struct BlockEncoder { bitpacker: BitPacker4x, pub output: [u8; COMPRESSED_BLOCK_MAX_SIZE], - pub output_len: usize, } impl Default for BlockEncoder { @@ -28,7 +27,6 @@ impl BlockEncoder { BlockEncoder { bitpacker: BitPacker4x::new(), output: [0u8; COMPRESSED_BLOCK_MAX_SIZE], - output_len: 0, } } diff --git a/src/postings/skip.rs b/src/postings/skip.rs index 1f5eb3577b..fe5a8df882 100644 --- a/src/postings/skip.rs +++ b/src/postings/skip.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use crate::directory::OwnedBytes; use crate::postings::compression::{compressed_block_size, COMPRESSION_BLOCK_SIZE}; use crate::query::Bm25Weight; diff --git a/src/postings/term_info.rs b/src/postings/term_info.rs index 4f3045d7f9..94e6403048 100644 --- a/src/postings/term_info.rs +++ b/src/postings/term_info.rs @@ -1,5 +1,4 @@ use std::io; -use std::iter::ExactSizeIterator; use std::ops::Range; use common::{BinarySerializable, FixedSize}; diff --git a/src/query/exist_query.rs b/src/query/exist_query.rs index fac03293dd..f028ebaa9f 100644 --- a/src/query/exist_query.rs +++ b/src/query/exist_query.rs @@ -149,7 +149,7 @@ mod tests { use crate::query::exist_query::ExistsQuery; use crate::query::{BooleanQuery, RangeQuery}; use crate::schema::{Facet, FacetOptions, Schema, FAST, INDEXED, STRING, TEXT}; - use crate::{doc, Index, Searcher}; + use crate::{Index, Searcher}; #[test] fn test_exists_query_simple() -> crate::Result<()> { diff --git a/src/query/fuzzy_query.rs b/src/query/fuzzy_query.rs index 7b7379acd8..a2e3f2a6ba 100644 --- a/src/query/fuzzy_query.rs +++ b/src/query/fuzzy_query.rs @@ -84,7 +84,7 @@ pub struct FuzzyTermQuery { distance: u8, /// Should a transposition cost 1 or 2? transposition_cost_one: bool, - /// + /// is a starts with query prefix: bool, } diff --git a/src/query/range_query/range_query.rs b/src/query/range_query/range_query.rs index 056dc23b6f..ac2327c7a5 100644 --- a/src/query/range_query/range_query.rs +++ b/src/query/range_query/range_query.rs @@ -477,7 +477,7 @@ mod tests { use crate::schema::{ Field, IntoIpv6Addr, Schema, TantivyDocument, FAST, INDEXED, STORED, TEXT, }; - use crate::{doc, Index, IndexWriter}; + use crate::{Index, IndexWriter}; #[test] fn test_range_query_simple() -> crate::Result<()> { diff --git a/src/query/term_query/term_query.rs b/src/query/term_query/term_query.rs index 832d07895b..fed4ca4813 100644 --- a/src/query/term_query/term_query.rs +++ b/src/query/term_query/term_query.rs @@ -139,7 +139,7 @@ mod tests { use crate::collector::{Count, TopDocs}; use crate::query::{Query, QueryParser, TermQuery}; use crate::schema::{IndexRecordOption, IntoIpv6Addr, Schema, INDEXED, STORED}; - use crate::{doc, Index, IndexWriter, Term}; + use crate::{Index, IndexWriter, Term}; #[test] fn search_ip_test() { diff --git a/src/query/vec_docset.rs b/src/query/vec_docset.rs index f4e1b505f6..5c87a71ce7 100644 --- a/src/query/vec_docset.rs +++ b/src/query/vec_docset.rs @@ -53,8 +53,7 @@ impl HasLen for VecDocSet { pub mod tests { use super::*; - use crate::docset::{DocSet, COLLECT_BLOCK_BUFFER_LEN}; - use crate::DocId; + use crate::docset::COLLECT_BLOCK_BUFFER_LEN; #[test] pub fn test_vec_postings() { diff --git a/src/schema/document/de.rs b/src/schema/document/de.rs index 9e56bc1cfb..e38486ab18 100644 --- a/src/schema/document/de.rs +++ b/src/schema/document/de.rs @@ -819,7 +819,6 @@ mod tests { use crate::schema::document::existing_type_impls::JsonObjectIter; use crate::schema::document::se::BinaryValueSerializer; use crate::schema::document::{ReferenceValue, ReferenceValueLeaf}; - use crate::schema::OwnedValue; fn serialize_value<'a>(value: ReferenceValue<'a, &'a serde_json::Value>) -> Vec { let mut writer = Vec::new(); diff --git a/src/schema/document/default_document.rs b/src/schema/document/default_document.rs index eda44ee8e2..fcf374dfed 100644 --- a/src/schema/document/default_document.rs +++ b/src/schema/document/default_document.rs @@ -256,7 +256,6 @@ impl DocParsingError { #[cfg(test)] mod tests { - use crate::schema::document::default_document::TantivyDocument; use crate::schema::*; #[test] diff --git a/src/schema/document/owned_value.rs b/src/schema/document/owned_value.rs index 3369dd9790..3dc7a1f67d 100644 --- a/src/schema/document/owned_value.rs +++ b/src/schema/document/owned_value.rs @@ -443,9 +443,7 @@ impl<'a> Iterator for ObjectMapIter<'a> { mod tests { use super::*; use crate::schema::{BytesOptions, Schema}; - use crate::time::format_description::well_known::Rfc3339; - use crate::time::OffsetDateTime; - use crate::{DateTime, Document, TantivyDocument}; + use crate::{Document, TantivyDocument}; #[test] fn test_parse_bytes_doc() { diff --git a/src/schema/field_entry.rs b/src/schema/field_entry.rs index 9fa643ca03..8d2f9b2309 100644 --- a/src/schema/field_entry.rs +++ b/src/schema/field_entry.rs @@ -136,7 +136,6 @@ impl FieldEntry { #[cfg(test)] mod tests { - use serde_json; use super::*; use crate::schema::{Schema, TextFieldIndexing, TEXT}; diff --git a/src/schema/schema.rs b/src/schema/schema.rs index 9fec25c053..d3215a37c3 100644 --- a/src/schema/schema.rs +++ b/src/schema/schema.rs @@ -6,10 +6,8 @@ use serde::de::{SeqAccess, Visitor}; use serde::ser::SerializeSeq; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use super::ip_options::IpAddrOptions; use super::*; use crate::json_utils::split_json_path; -use crate::schema::bytes_options::BytesOptions; use crate::TantivyError; /// Tantivy has a very strict schema. @@ -421,9 +419,7 @@ mod tests { use matches::{assert_matches, matches}; use pretty_assertions::assert_eq; - use serde_json; - use crate::schema::document::Value; use crate::schema::field_type::ValueParsingError; use crate::schema::schema::DocParsingError::InvalidJson; use crate::schema::*; diff --git a/src/schema/term.rs b/src/schema/term.rs index 2bfac53e82..63e79922a0 100644 --- a/src/schema/term.rs +++ b/src/schema/term.rs @@ -1,4 +1,3 @@ -use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::net::Ipv6Addr; use std::{fmt, str}; diff --git a/src/store/compression_lz4_block.rs b/src/store/compression_lz4_block.rs index 0464510b8b..08ecd9e4f5 100644 --- a/src/store/compression_lz4_block.rs +++ b/src/store/compression_lz4_block.rs @@ -1,4 +1,3 @@ -use core::convert::TryInto; use std::io::{self}; use std::mem; diff --git a/src/store/compressors.rs b/src/store/compressors.rs index 89205c99d0..541855aa9a 100644 --- a/src/store/compressors.rs +++ b/src/store/compressors.rs @@ -2,12 +2,6 @@ use std::io; use serde::{Deserialize, Deserializer, Serialize}; -pub trait StoreCompressor { - fn compress(&self, uncompressed: &[u8], compressed: &mut Vec) -> io::Result<()>; - fn decompress(&self, compressed: &[u8], decompressed: &mut Vec) -> io::Result<()>; - fn get_compressor_id() -> u8; -} - /// Compressor can be used on `IndexSettings` to choose /// the compressor used to compress the doc store. /// diff --git a/src/store/decompressors.rs b/src/store/decompressors.rs index 2c3173ae29..4d0319aca8 100644 --- a/src/store/decompressors.rs +++ b/src/store/decompressors.rs @@ -4,12 +4,6 @@ use serde::{Deserialize, Serialize}; use super::Compressor; -pub trait StoreCompressor { - fn compress(&self, uncompressed: &[u8], compressed: &mut Vec) -> io::Result<()>; - fn decompress(&self, compressed: &[u8], decompressed: &mut Vec) -> io::Result<()>; - fn get_compressor_id() -> u8; -} - /// Decompressor is deserialized from the doc store footer, when opening an index. #[derive(Clone, Debug, Copy, PartialEq, Eq, Serialize, Deserialize)] pub enum Decompressor { @@ -86,7 +80,6 @@ impl Decompressor { #[cfg(test)] mod tests { use super::*; - use crate::store::Compressor; #[test] fn compressor_decompressor_id_test() { diff --git a/src/store/index/mod.rs b/src/store/index/mod.rs index 9e657b31b1..13c252e924 100644 --- a/src/store/index/mod.rs +++ b/src/store/index/mod.rs @@ -41,7 +41,7 @@ mod tests { use std::io; - use proptest::strategy::{BoxedStrategy, Strategy}; + use proptest::prelude::*; use super::{SkipIndex, SkipIndexBuilder}; use crate::directory::OwnedBytes; @@ -227,8 +227,6 @@ mod tests { } } - use proptest::prelude::*; - proptest! { #![proptest_config(ProptestConfig::with_cases(20))] #[test] diff --git a/sstable/benches/ord_to_term.rs b/sstable/benches/ord_to_term.rs index f29b719b34..9285af2e4e 100644 --- a/sstable/benches/ord_to_term.rs +++ b/sstable/benches/ord_to_term.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use common::file_slice::FileSlice; use common::OwnedBytes; use criterion::{criterion_group, criterion_main, Criterion}; -use tantivy_sstable::{self, Dictionary, MonotonicU64SSTable}; +use tantivy_sstable::{Dictionary, MonotonicU64SSTable}; fn make_test_sstable(suffix: &str) -> FileSlice { let mut builder = Dictionary::::builder(Vec::new()).unwrap(); diff --git a/sstable/benches/stream_bench.rs b/sstable/benches/stream_bench.rs index 2b29a5e993..d8df433e9a 100644 --- a/sstable/benches/stream_bench.rs +++ b/sstable/benches/stream_bench.rs @@ -5,7 +5,7 @@ use common::file_slice::FileSlice; use criterion::{criterion_group, criterion_main, Criterion}; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; -use tantivy_sstable::{self, Dictionary, MonotonicU64SSTable}; +use tantivy_sstable::{Dictionary, MonotonicU64SSTable}; const CHARSET: &'static [u8] = b"abcdefghij"; diff --git a/stacker/src/expull.rs b/stacker/src/expull.rs index cbda3b8e94..28a14f2147 100644 --- a/stacker/src/expull.rs +++ b/stacker/src/expull.rs @@ -151,7 +151,6 @@ impl ExpUnrolledLinkedList { mod tests { use common::{read_u32_vint, write_u32_vint}; - use super::super::MemoryArena; use super::*; #[test]