Skip to content

Commit

Permalink
feat(api): Make acceptable values cache lag configurable (#3028)
Browse files Browse the repository at this point in the history
## What ❔

Allows to configure acceptable values cache lag (measured in the number
of L2 blocks). Increases the default value from 5 to 20 blocks.

## Why ❔

Currently, acceptable lag is hard-coded and is arguably too small at
times. It can lead to the values cache getting reset during Postgres
usage surges (e.g., when sealing an L1 batch).

## Checklist

- [x] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [x] Tests for the changes have been added / updated.
- [x] Documentation comments have been added / updated.
- [x] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor
lint`.
  • Loading branch information
slowli authored Oct 16, 2024
1 parent 331fe87 commit 6747529
Show file tree
Hide file tree
Showing 11 changed files with 157 additions and 99 deletions.
1 change: 1 addition & 0 deletions core/bin/external_node/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,6 +378,7 @@ impl ExternalNodeBuilder {
factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64,
initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64,
latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64,
latest_values_max_block_lag: 20, // reasonable default
};
let max_vm_concurrency = self.config.optional.vm_concurrency_limit;
let tx_sender_layer = TxSenderLayer::new(
Expand Down
1 change: 1 addition & 0 deletions core/bin/zksync_server/src/node_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ impl MainNodeBuilder {
factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64,
initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64,
latest_values_cache_size: rpc_config.latest_values_cache_size() as u64,
latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(),
};

// On main node we always use master pool sink.
Expand Down
34 changes: 22 additions & 12 deletions core/lib/config/src/configs/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,10 @@ pub struct Web3JsonRpcConfig {
/// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest
/// values cache will be disabled.
pub latest_values_cache_size_mb: Option<usize>,
/// Maximum lag in the number of blocks for the latest values cache after which the cache is reset. Greater values
/// lead to increased the cache update latency, i.e., less storage queries being processed by the cache. OTOH, smaller values
/// can lead to spurious resets when Postgres lags for whatever reason (e.g., when sealing L1 batches).
pub latest_values_max_block_lag: Option<NonZeroU32>,
/// Limit for fee history block range.
pub fee_history_limit: Option<u64>,
/// Maximum number of requests in a single batch JSON RPC request. Default is 500.
Expand Down Expand Up @@ -243,20 +247,21 @@ impl Web3JsonRpcConfig {
estimate_gas_acceptable_overestimation: 1000,
estimate_gas_optimize_search: false,
max_tx_size: 1000000,
vm_execution_cache_misses_limit: Default::default(),
vm_concurrency_limit: Default::default(),
factory_deps_cache_size_mb: Default::default(),
initial_writes_cache_size_mb: Default::default(),
latest_values_cache_size_mb: Default::default(),
fee_history_limit: Default::default(),
max_batch_request_size: Default::default(),
max_response_body_size_mb: Default::default(),
vm_execution_cache_misses_limit: None,
vm_concurrency_limit: None,
factory_deps_cache_size_mb: None,
initial_writes_cache_size_mb: None,
latest_values_cache_size_mb: None,
latest_values_max_block_lag: None,
fee_history_limit: None,
max_batch_request_size: None,
max_response_body_size_mb: None,
max_response_body_size_overrides_mb: MaxResponseSizeOverrides::empty(),
websocket_requests_per_minute_limit: Default::default(),
mempool_cache_update_interval: Default::default(),
mempool_cache_size: Default::default(),
websocket_requests_per_minute_limit: None,
mempool_cache_update_interval: None,
mempool_cache_size: None,
tree_api_url: None,
whitelisted_tokens_for_aa: Default::default(),
whitelisted_tokens_for_aa: vec![],
api_namespaces: None,
extended_api_tracing: false,
}
Expand Down Expand Up @@ -308,6 +313,11 @@ impl Web3JsonRpcConfig {
self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE
}

/// Returns the maximum lag in the number of blocks for the latest values cache.
pub fn latest_values_max_block_lag(&self) -> u32 {
self.latest_values_max_block_lag.map_or(20, NonZeroU32::get)
}

pub fn fee_history_limit(&self) -> u64 {
self.fee_history_limit.unwrap_or(1024)
}
Expand Down
1 change: 1 addition & 0 deletions core/lib/config/src/testonly.rs
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,7 @@ impl Distribution<configs::api::Web3JsonRpcConfig> for EncodeDist {
factory_deps_cache_size_mb: self.sample(rng),
initial_writes_cache_size_mb: self.sample(rng),
latest_values_cache_size_mb: self.sample(rng),
latest_values_max_block_lag: self.sample(rng),
fee_history_limit: self.sample(rng),
max_batch_request_size: self.sample(rng),
max_response_body_size_mb: self.sample(rng),
Expand Down
2 changes: 2 additions & 0 deletions core/lib/env_config/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ mod tests {
factory_deps_cache_size_mb: Some(128),
initial_writes_cache_size_mb: Some(32),
latest_values_cache_size_mb: Some(256),
latest_values_max_block_lag: Some(NonZeroU32::new(50).unwrap()),
fee_history_limit: Some(100),
max_batch_request_size: Some(200),
max_response_body_size_mb: Some(10),
Expand Down Expand Up @@ -136,6 +137,7 @@ mod tests {
API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128
API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32
API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256
API_WEB3_JSON_RPC_LATEST_VALUES_MAX_BLOCK_LAG=50
API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100
API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200
API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10
Expand Down
8 changes: 7 additions & 1 deletion core/lib/protobuf_config/src/api.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::num::NonZeroUsize;
use std::num::{NonZeroU32, NonZeroUsize};

use anyhow::Context as _;
use zksync_config::configs::{api, ApiConfig};
Expand Down Expand Up @@ -113,6 +113,11 @@ impl ProtoRepr for proto::Web3JsonRpc {
.map(|x| x.try_into())
.transpose()
.context("latest_values_cache_size_mb")?,
latest_values_max_block_lag: self
.latest_values_max_block_lag
.map(|x| x.try_into())
.transpose()
.context("latest_values_max_block_lag")?,
fee_history_limit: self.fee_history_limit,
max_batch_request_size: self
.max_batch_request_size
Expand Down Expand Up @@ -183,6 +188,7 @@ impl ProtoRepr for proto::Web3JsonRpc {
latest_values_cache_size_mb: this
.latest_values_cache_size_mb
.map(|x| x.try_into().unwrap()),
latest_values_max_block_lag: this.latest_values_max_block_lag.map(NonZeroU32::get),
fee_history_limit: this.fee_history_limit,
max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()),
max_response_body_size_mb: this
Expand Down
1 change: 1 addition & 0 deletions core/lib/protobuf_config/src/proto/config/api.proto
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ message Web3JsonRpc {
repeated string api_namespaces = 32; // Optional, if empty all namespaces are available
optional bool extended_api_tracing = 33; // optional, default false
optional bool estimate_gas_optimize_search = 34; // optional, default false
optional uint32 latest_values_max_block_lag = 35; // optional

reserved 15; reserved "l1_to_l2_transactions_compatibility_mode";
reserved 11; reserved "request_timeout";
Expand Down
7 changes: 7 additions & 0 deletions core/lib/state/src/cache/lru_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,13 @@ where
Self { name, cache }
}

/// Returns the capacity of this cache in bytes.
pub fn capacity(&self) -> u64 {
self.cache
.as_ref()
.map_or(0, |cache| cache.policy().max_capacity().unwrap_or(u64::MAX))
}

/// Gets an entry and pulls it to the front if it exists.
pub fn get(&self, key: &K) -> Option<V> {
let latency = METRICS.latency[&(self.name, Method::Get)].start();
Expand Down
Loading

0 comments on commit 6747529

Please sign in to comment.