Skip to content

Commit

Permalink
change(deps): bump rocksdb from 0.18.0 to 0.19.0 (#5071)
Browse files Browse the repository at this point in the history
* cargo upgrade --workspace rocksdb

* Add a deny.toml exception for bindgen 0.59

* Move `valid()` and `status()` methods to raw iterators

* Update some outdated comments

* Panic on iterator failures

We might want to change this if there are any common failure modes.

* allow(clippy::unwrap_in_result) in some methods
  • Loading branch information
teor2345 authored Sep 5, 2022
1 parent 524e9ab commit fc624d0
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 27 deletions.
31 changes: 25 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions deny.toml
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,10 @@ skip-tree = [

# wait for primitive-types to upgrade
{ name = "proc-macro-crate", version = "=0.1.5" },

# wait for zcash_script to upgrade bindgen
# https://github.com/ZcashFoundation/zcash_script/issues/40
{ name = "bindgen", version = "=0.59.2" },
]

# This section is considered when running `cargo deny check sources`.
Expand Down
2 changes: 1 addition & 1 deletion zebra-state/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ metrics = "0.18.1"
mset = "0.1.0"
regex = "1.6.0"
rlimit = "0.8.3"
rocksdb = { version = "0.18.0", default_features = false, features = ["lz4"] }
rocksdb = { version = "0.19.0", default_features = false, features = ["lz4"] }
serde = { version = "1.0.144", features = ["serde_derive"] }
tempfile = "3.3.0"
thiserror = "1.0.33"
Expand Down
4 changes: 4 additions & 0 deletions zebra-state/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -714,6 +714,7 @@ impl Service<Request> for StateService {
let timer = CodeTimer::start();

// TODO: move this work into the future, like Block and Transaction?
// move disk reads to a blocking thread (#2188)
let rsp = Ok(Response::Depth(self.best_depth(hash)));

// The work is all done, the future just returns the result.
Expand All @@ -734,6 +735,7 @@ impl Service<Request> for StateService {
let timer = CodeTimer::start();

// TODO: move this work into the future, like Block and Transaction?
// move disk reads to a blocking thread (#2188)
let rsp = Ok(Response::Tip(self.best_tip()));

// The work is all done, the future just returns the result.
Expand All @@ -752,6 +754,7 @@ impl Service<Request> for StateService {
let timer = CodeTimer::start();

// TODO: move this work into the future, like Block and Transaction?
// move disk reads to a blocking thread (#2188)
let rsp = Ok(Response::BlockLocator(
self.block_locator().unwrap_or_default(),
));
Expand Down Expand Up @@ -836,6 +839,7 @@ impl Service<Request> for StateService {

let fut = self.pending_utxos.queue(outpoint);

// TODO: move disk reads (in `any_utxo()`) to a blocking thread (#2188)
if let Some(utxo) = self.any_utxo(&outpoint) {
self.pending_utxos.respond(&outpoint, utxo);
}
Expand Down
48 changes: 30 additions & 18 deletions zebra-state/src/service/finalized_state/disk_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,10 @@ impl ReadDisk for DiskDb {
// Empty column families return invalid forward iterators.
//
// Checking iterator validity does not seem to cause database hangs.
!self
.db
.iterator_cf(cf, rocksdb::IteratorMode::Start)
.valid()
let iterator = self.db.iterator_cf(cf, rocksdb::IteratorMode::Start);
let raw_iterator: rocksdb::DBRawIteratorWithThreadMode<DB> = iterator.into();

!raw_iterator.valid()
}

#[allow(clippy::unwrap_in_result)]
Expand All @@ -228,12 +228,10 @@ impl ReadDisk for DiskDb {
// We use `get_pinned_cf` to avoid taking ownership of the serialized
// value, because we're going to deserialize it anyways, which avoids an
// extra copy
//
// TODO: move disk reads to a blocking thread (#2188)
let value_bytes = self
.db
.get_pinned_cf(cf, key_bytes)
.expect("expected that disk errors would not occur");
.expect("unexpected database failure");

value_bytes.map(V::from_bytes)
}
Expand All @@ -247,14 +245,13 @@ impl ReadDisk for DiskDb {

// We use `get_pinned_cf` to avoid taking ownership of the serialized
// value, because we don't use the value at all. This avoids an extra copy.
//
// TODO: move disk reads to a blocking thread (#2188)
self.db
.get_pinned_cf(cf, key_bytes)
.expect("expected that disk errors would not occur")
.expect("unexpected database failure")
.is_some()
}

#[allow(clippy::unwrap_in_result)]
fn zs_first_key_value<C, K, V>(&self, cf: &C) -> Option<(K, V)>
where
C: rocksdb::AsColumnFamilyRef,
Expand All @@ -264,10 +261,14 @@ impl ReadDisk for DiskDb {
// Reading individual values from iterators does not seem to cause database hangs.
self.db
.iterator_cf(cf, rocksdb::IteratorMode::Start)
.next()
.map(|(key_bytes, value_bytes)| (K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
.next()?
.map(|(key_bytes, value_bytes)| {
Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
})
.expect("unexpected database failure")
}

#[allow(clippy::unwrap_in_result)]
fn zs_last_key_value<C, K, V>(&self, cf: &C) -> Option<(K, V)>
where
C: rocksdb::AsColumnFamilyRef,
Expand All @@ -277,10 +278,14 @@ impl ReadDisk for DiskDb {
// Reading individual values from iterators does not seem to cause database hangs.
self.db
.iterator_cf(cf, rocksdb::IteratorMode::End)
.next()
.map(|(key_bytes, value_bytes)| (K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
.next()?
.map(|(key_bytes, value_bytes)| {
Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
})
.expect("unexpected database failure")
}

#[allow(clippy::unwrap_in_result)]
fn zs_next_key_value_from<C, K, V>(&self, cf: &C, lower_bound: &K) -> Option<(K, V)>
where
C: rocksdb::AsColumnFamilyRef,
Expand All @@ -293,10 +298,14 @@ impl ReadDisk for DiskDb {
// Reading individual values from iterators does not seem to cause database hangs.
self.db
.iterator_cf(cf, from)
.next()
.map(|(key_bytes, value_bytes)| (K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
.next()?
.map(|(key_bytes, value_bytes)| {
Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
})
.expect("unexpected database failure")
}

#[allow(clippy::unwrap_in_result)]
fn zs_prev_key_value_back_from<C, K, V>(&self, cf: &C, upper_bound: &K) -> Option<(K, V)>
where
C: rocksdb::AsColumnFamilyRef,
Expand All @@ -309,8 +318,11 @@ impl ReadDisk for DiskDb {
// Reading individual values from iterators does not seem to cause database hangs.
self.db
.iterator_cf(cf, from)
.next()
.map(|(key_bytes, value_bytes)| (K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
.next()?
.map(|(key_bytes, value_bytes)| {
Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes)))
})
.expect("unexpected database failure")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ use zebra_chain::{
};

use crate::{
service::finalized_state::{disk_db::DiskDb, disk_format::tests::KV, FinalizedState},
service::finalized_state::{
disk_db::{DiskDb, DB},
disk_format::tests::KV,
FinalizedState,
},
Config,
};

Expand Down Expand Up @@ -129,6 +133,7 @@ fn snapshot_raw_rocksdb_column_family_data(db: &DiskDb, original_cf_names: &[Str
// The default raw data serialization is very verbose, so we hex-encode the bytes.
let cf_data: Vec<KV> = cf_iter
.by_ref()
.map(|result| result.expect("unexpected database error"))
.map(|(key, value)| KV::new(key, value))
.collect();

Expand All @@ -144,8 +149,10 @@ fn snapshot_raw_rocksdb_column_family_data(db: &DiskDb, original_cf_names: &[Str
insta::assert_ron_snapshot!(format!("{}_raw_data", cf_name), cf_data);
}

let raw_cf_iter: rocksdb::DBRawIteratorWithThreadMode<DB> = cf_iter.into();

assert_eq!(
cf_iter.status(),
raw_cf_iter.status(),
Ok(()),
"unexpected column family iterator error",
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,7 @@ fn snapshot_transparent_address_data(state: &FinalizedState, height: u32) {
.count();

let addresses: Vec<transparent::Address> = addresses
.map(|result| result.expect("unexpected database error"))
.map(|(key, _value)| transparent::Address::from_bytes(key))
.collect();

Expand Down

0 comments on commit fc624d0

Please sign in to comment.