Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update dependencies to get rid of the yanked deps #4994

Merged
merged 10 commits into from
Dec 15, 2023
256 changes: 199 additions & 57 deletions Cargo.lock

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -120,12 +120,12 @@ fnv = "1"
fs2 = "0.4"
futures = "0.3"
hex = "0.4"
hyper = "0.14"
hyper = "1"
itertools = "0.10"
lazy_static = "1"
libsecp256k1 = "0.7"
log = "0.4"
lru = "0.7"
lru = "0.9"
jimmygchen marked this conversation as resolved.
Show resolved Hide resolved
maplit = "1"
num_cpus = "1"
parking_lot = "0.12"
Expand All @@ -143,7 +143,7 @@ rusqlite = { version = "0.28", features = ["bundled"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_repr = "0.1"
serde_yaml = "0.8"
serde_yaml = "0.9"
sha2 = "0.9"
slog = { version = "2", features = ["max_level_trace", "release_max_level_trace", "nested-values"] }
slog-async = "2"
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/beacon_chain/src/beacon_proposer_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@ use lru::LruCache;
use smallvec::SmallVec;
use state_processing::state_advance::partial_state_advance;
use std::cmp::Ordering;
use std::num::NonZeroUsize;
use types::non_zero_usize::new_non_zero_usize;
use types::{
BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot,
Unsigned,
};

/// The number of sets of proposer indices that should be cached.
const CACHE_SIZE: usize = 16;
const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16);

/// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being
/// incorrect is non-substantial from a consensus perspective (and probably also from a
Expand Down
7 changes: 5 additions & 2 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ use slog::{debug, error, Logger};
use slot_clock::SlotClock;
use std::fmt;
use std::fmt::Debug;
use std::num::NonZeroUsize;
use std::sync::Arc;
use task_executor::TaskExecutor;
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
Expand All @@ -32,15 +33,17 @@ mod processing_cache;
mod state_lru_cache;

pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory};
use types::non_zero_usize::new_non_zero_usize;

/// The LRU Cache stores `PendingComponents` which can store up to
/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So
/// the maximum size of a `PendingComponents` is ~ 0.787536 MB. Setting this
/// to 1024 means the maximum size of the cache is ~ 0.8 GB. But the cache
/// will target a size of less than 75% of capacity.
pub const OVERFLOW_LRU_CAPACITY: usize = 1024;
pub const OVERFLOW_LRU_CAPACITY: NonZeroUsize = new_non_zero_usize(1024);
/// Until tree-states is implemented, we can't store very many states in memory :(
pub const STATE_LRU_CAPACITY: usize = 2;
pub const STATE_LRU_CAPACITY_NON_ZERO: NonZeroUsize = new_non_zero_usize(2);
pub const STATE_LRU_CAPACITY: usize = STATE_LRU_CAPACITY_NON_ZERO.get();

/// This includes a cache for any blocks or blobs that have been received over gossip or RPC
/// and are awaiting more components before they can be imported. Additionally the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use ssz_types::{FixedVector, VariableList};
use std::num::NonZeroUsize;
use std::{collections::HashSet, sync::Arc};
use types::blob_sidecar::BlobIdentifier;
use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256};
Expand Down Expand Up @@ -288,7 +289,7 @@ struct Critical<T: BeaconChainTypes> {
}

impl<T: BeaconChainTypes> Critical<T> {
pub fn new(capacity: usize) -> Self {
pub fn new(capacity: NonZeroUsize) -> Self {
Self {
in_memory: LruCache::new(capacity),
store_keys: HashSet::new(),
Expand Down Expand Up @@ -329,7 +330,7 @@ impl<T: BeaconChainTypes> Critical<T> {
pending_components: PendingComponents<T::EthSpec>,
overflow_store: &OverflowStore<T>,
) -> Result<(), AvailabilityCheckError> {
if self.in_memory.len() == self.in_memory.cap() {
if self.in_memory.len() == self.in_memory.cap().get() {
// cache will overflow, must write lru entry to disk
if let Some((lru_key, lru_value)) = self.in_memory.pop_lru() {
overflow_store.persist_pending_components(lru_key, lru_value)?;
Expand Down Expand Up @@ -377,12 +378,12 @@ pub struct OverflowLRUCache<T: BeaconChainTypes> {
/// Mutex to guard maintenance methods which move data between disk and memory
maintenance_lock: Mutex<()>,
/// The capacity of the LRU cache
capacity: usize,
capacity: NonZeroUsize,
}

impl<T: BeaconChainTypes> OverflowLRUCache<T> {
pub fn new(
capacity: usize,
capacity: NonZeroUsize,
beacon_store: BeaconStore<T>,
spec: ChainSpec,
) -> Result<Self, AvailabilityCheckError> {
Expand Down Expand Up @@ -514,7 +515,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
/// maintain the cache
pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> {
// ensure memory usage is below threshold
let threshold = self.capacity * 3 / 4;
let threshold = self.capacity.get() * 3 / 4;
self.maintain_threshold(threshold, cutoff_epoch)?;
// clean up any keys on the disk that shouldn't be there
self.prune_disk(cutoff_epoch)?;
Expand Down Expand Up @@ -753,6 +754,7 @@ mod test {
use std::ops::AddAssign;
use store::{HotColdDB, ItemStore, LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::non_zero_usize::new_non_zero_usize;
use types::{ChainSpec, ExecPayload, MinimalEthSpec};

const LOW_VALIDATOR_COUNT: usize = 32;
Expand Down Expand Up @@ -974,8 +976,9 @@ mod test {
let harness = get_deneb_chain(log.clone(), &chain_db_path).await;
let spec = harness.spec.clone();
let test_store = harness.chain.store.clone();
let capacity_non_zero = new_non_zero_usize(capacity);
let cache = Arc::new(
OverflowLRUCache::<T>::new(capacity, test_store, spec.clone())
OverflowLRUCache::<T>::new(capacity_non_zero, test_store, spec.clone())
.expect("should create cache"),
);
(harness, cache, chain_db_path)
Expand Down Expand Up @@ -1477,7 +1480,7 @@ mod test {

// create a new cache with the same store
let recovered_cache = OverflowLRUCache::<T>::new(
capacity,
new_non_zero_usize(capacity),
harness.chain.store.clone(),
harness.chain.spec.clone(),
)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::block_verification_types::AsBlock;
use crate::{
block_verification_types::BlockImportData,
data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY},
data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO},
eth1_finalization_cache::Eth1FinalizationData,
AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome,
};
Expand Down Expand Up @@ -61,7 +61,7 @@ pub struct StateLRUCache<T: BeaconChainTypes> {
impl<T: BeaconChainTypes> StateLRUCache<T> {
pub fn new(store: BeaconStore<T>, spec: ChainSpec) -> Self {
Self {
states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY)),
states: RwLock::new(LruCache::new(STATE_LRU_CAPACITY_NON_ZERO)),
store,
spec,
}
Expand Down
8 changes: 5 additions & 3 deletions beacon_node/beacon_chain/src/pre_finalization_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@ use itertools::process_results;
use lru::LruCache;
use parking_lot::Mutex;
use slog::debug;
use std::num::NonZeroUsize;
use std::time::Duration;
use types::non_zero_usize::new_non_zero_usize;
use types::Hash256;

const BLOCK_ROOT_CACHE_LIMIT: usize = 512;
const LOOKUP_LIMIT: usize = 8;
const BLOCK_ROOT_CACHE_LIMIT: NonZeroUsize = new_non_zero_usize(512);
const LOOKUP_LIMIT: NonZeroUsize = new_non_zero_usize(8);
const METRICS_TIMEOUT: Duration = Duration::from_millis(100);

/// Cache for rejecting attestations to blocks from before finalization.
Expand Down Expand Up @@ -78,7 +80,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {

// 3. Check the network with a single block lookup.
cache.in_progress_lookups.put(block_root, ());
if cache.in_progress_lookups.len() == LOOKUP_LIMIT {
if cache.in_progress_lookups.len() == LOOKUP_LIMIT.get() {
// NOTE: we expect this to occur sometimes if a lot of blocks that we look up fail to be
// imported for reasons other than being pre-finalization. The cache will eventually
// self-repair in this case by replacing old entries with new ones until all the failed
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/execution_layer/src/engines.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,19 @@ use crate::HttpJsonRpc;
use lru::LruCache;
use slog::{debug, error, info, warn, Logger};
use std::future::Future;
use std::num::NonZeroUsize;
use std::sync::Arc;
use std::time::Duration;
use task_executor::TaskExecutor;
use tokio::sync::{watch, Mutex, RwLock};
use tokio_stream::wrappers::WatchStream;
use types::non_zero_usize::new_non_zero_usize;
use types::ExecutionBlockHash;

/// The number of payload IDs that will be stored for each `Engine`.
///
/// Since the size of each value is small (~800 bytes) a large number is used for safety.
const PAYLOAD_ID_LRU_CACHE_SIZE: usize = 512;
const PAYLOAD_ID_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(512);
const CACHED_ENGINE_CAPABILITIES_AGE_LIMIT: Duration = Duration::from_secs(900); // 15 minutes

/// Stores the remembered state of a engine.
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/execution_layer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ use std::collections::HashMap;
use std::fmt;
use std::future::Future;
use std::io::Write;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
Expand All @@ -42,6 +43,7 @@ use tokio_stream::wrappers::WatchStream;
use tree_hash::TreeHash;
use types::beacon_block_body::KzgCommitments;
use types::builder_bid::BuilderBid;
use types::non_zero_usize::new_non_zero_usize;
use types::payload::BlockProductionVersion;
use types::{
AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock,
Expand All @@ -68,7 +70,7 @@ pub const DEFAULT_JWT_FILE: &str = "jwt.hex";

/// Each time the `ExecutionLayer` retrieves a block from an execution node, it stores that block
/// in an LRU cache to avoid redundant lookups. This is the size of that cache.
const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128;
const EXECUTION_BLOCKS_LRU_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(128);

/// A fee recipient address for use during block production. Only used as a very last resort if
/// there is no address provided by the user.
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/execution_layer/src/payload_cache.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
use eth2::types::FullPayloadContents;
use lru::LruCache;
use parking_lot::Mutex;
use std::num::NonZeroUsize;
use tree_hash::TreeHash;
use types::non_zero_usize::new_non_zero_usize;
use types::{EthSpec, Hash256};

pub const DEFAULT_PAYLOAD_CACHE_SIZE: usize = 10;
pub const DEFAULT_PAYLOAD_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(10);

/// A cache mapping execution payloads by tree hash roots.
pub struct PayloadCache<T: EthSpec> {
Expand Down
14 changes: 5 additions & 9 deletions beacon_node/http_api/src/block_rewards.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ use eth2::lighthouse::{BlockReward, BlockRewardsQuery};
use lru::LruCache;
use slog::{debug, warn, Logger};
use state_processing::BlockReplayer;
use std::num::NonZeroUsize;
use std::sync::Arc;
use types::beacon_block::BlindedBeaconBlock;
use warp_utils::reject::{
beacon_chain_error, beacon_state_error, custom_bad_request, custom_server_error,
};
use types::non_zero_usize::new_non_zero_usize;
use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request};

const STATE_CACHE_SIZE: usize = 2;
const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2);

/// Fetch block rewards for blocks from the canonical chain.
pub fn get_block_rewards<T: BeaconChainTypes>(
Expand Down Expand Up @@ -164,11 +164,7 @@ pub fn compute_block_rewards<T: BeaconChainTypes>(
.build_all_committee_caches(&chain.spec)
.map_err(beacon_state_error)?;

state_cache
.get_or_insert((parent_root, block.slot()), || state)
.ok_or_else(|| {
custom_server_error("LRU cache insert should always succeed".into())
})?
state_cache.get_or_insert((parent_root, block.slot()), || state)
};

// Compute block reward.
Expand Down
6 changes: 5 additions & 1 deletion beacon_node/lighthouse_network/src/discovery/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ pub use libp2p::{
use lru::LruCache;
use slog::{crit, debug, error, info, trace, warn};
use ssz::Encode;
use std::num::NonZeroUsize;
use std::{
collections::{HashMap, VecDeque},
net::{IpAddr, SocketAddr},
Expand All @@ -49,6 +50,7 @@ use types::{EnrForkId, EthSpec};

mod subnet_predicate;
pub use subnet_predicate::subnet_predicate;
use types::non_zero_usize::new_non_zero_usize;

/// Local ENR storage filename.
pub const ENR_FILENAME: &str = "enr.dat";
Expand All @@ -70,6 +72,8 @@ const MAX_SUBNETS_IN_QUERY: usize = 3;
pub const FIND_NODE_QUERY_CLOSEST_PEERS: usize = 16;
/// The threshold for updating `min_ttl` on a connected peer.
const DURATION_DIFFERENCE: Duration = Duration::from_millis(1);
/// The capacity of the Discovery ENR cache.
const ENR_CACHE_CAPACITY: NonZeroUsize = new_non_zero_usize(50);

/// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl`
/// of the peer if it is specified.
Expand Down Expand Up @@ -318,7 +322,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
};

Ok(Self {
cached_enrs: LruCache::new(50),
cached_enrs: LruCache::new(ENR_CACHE_CAPACITY),
network_globals,
find_peer_active: false,
queued_queries: VecDeque::with_capacity(10),
Expand Down
4 changes: 3 additions & 1 deletion beacon_node/network/src/network_beacon_processor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use parking_lot::Mutex;
use slog::{crit, debug, error, trace, Logger};
use slot_clock::{ManualSlotClock, SlotClock};
use std::collections::HashSet;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
Expand All @@ -35,6 +36,7 @@ use types::*;

pub use sync_methods::ChainSegmentProcessId;
use types::blob_sidecar::FixedBlobSidecarList;
use types::non_zero_usize::new_non_zero_usize;

pub type Error<T> = TrySendError<BeaconWorkEvent<T>>;

Expand All @@ -44,7 +46,7 @@ mod sync_methods;
mod tests;

pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1;
pub const DELAYED_PEER_CACHE_SIZE: usize = 16;
pub const DELAYED_PEER_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16);

/// Defines if and where we will store the SSZ files of invalid blocks.
#[derive(Clone)]
Expand Down
10 changes: 6 additions & 4 deletions beacon_node/store/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,14 @@ use crate::{DBColumn, Error, StoreItem};
use serde::{Deserialize, Serialize};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::num::NonZeroUsize;
use types::non_zero_usize::new_non_zero_usize;
use types::{EthSpec, MinimalEthSpec};

pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192;
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1;
pub const DEFAULT_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(5);
pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(1);
pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1;
pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0;

Expand All @@ -19,9 +21,9 @@ pub struct StoreConfig {
/// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user.
pub slots_per_restore_point_set_explicitly: bool,
/// Maximum number of blocks to store in the in-memory block cache.
pub block_cache_size: usize,
pub block_cache_size: NonZeroUsize,
/// Maximum number of states from freezer database to store in the in-memory state cache.
pub historic_state_cache_size: usize,
pub historic_state_cache_size: NonZeroUsize,
/// Whether to compact the database on initialization.
pub compact_on_init: bool,
/// Whether to compact the database during database pruning.
Expand Down
3 changes: 2 additions & 1 deletion beacon_node/store/src/hot_cold_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ use state_processing::{
use std::cmp::min;
use std::convert::TryInto;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
Expand Down Expand Up @@ -85,7 +86,7 @@ struct BlockCache<E: EthSpec> {
}

impl<E: EthSpec> BlockCache<E> {
pub fn new(size: usize) -> Self {
pub fn new(size: NonZeroUsize) -> Self {
Self {
block_cache: LruCache::new(size),
blob_cache: LruCache::new(size),
Expand Down
3 changes: 2 additions & 1 deletion common/account_utils/src/validator_definitions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,8 @@ impl ValidatorDefinitions {
pub fn save<P: AsRef<Path>>(&self, validators_dir: P) -> Result<(), Error> {
let config_path = validators_dir.as_ref().join(CONFIG_FILENAME);
let temp_path = validators_dir.as_ref().join(CONFIG_TEMP_FILENAME);
let bytes = serde_yaml::to_vec(self).map_err(Error::UnableToEncodeFile)?;
let mut bytes = vec![];
serde_yaml::to_writer(&mut bytes, self).map_err(Error::UnableToEncodeFile)?;
jimmygchen marked this conversation as resolved.
Show resolved Hide resolved

write_file_via_temporary(&config_path, &temp_path, &bytes)
.map_err(Error::UnableToWriteFile)?;
Expand Down
Loading
Loading