From 0b7cd0b50ead2406915528becad2fac8b7e48f85 Mon Sep 17 00:00:00 2001
From: Alex Ostrovski <slowli@users.noreply.github.com>
Date: Thu, 18 Jan 2024 17:04:36 +0200
Subject: [PATCH] feat(api): Make Web3 API server work with pruned data (#838)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

## What ❔

Modifies the Web3 API server so that it works with pruned node data
during snapshot recovery.

## Why ❔

Part of preparations of EN code to support snapshot recovery.

## Checklist

- [x] PR title corresponds to the body of PR (we generate changelog
entries from PRs).
- [x] Tests for the changes have been added / updated.
- [x] Documentation comments have been added / updated.
- [x] Code has been formatted via `zk fmt` and `zk lint`.
- [x] Spellcheck has been run via `cargo spellcheck
--cfg=./spellcheck/era.cfg --code 1`.
---
 .../storage_logs_dedup_migration/src/main.rs  |   3 +-
 ...3fe77e649e9d110043d2ac22005dd61cfcfb9.json |  22 +
 ...e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json |  20 -
 ...1788feea2fbb5682de09120dd109899510820.json |  28 -
 ...bed26c831730944237b74bed98869c83b3ca5.json |  28 +
 core/lib/dal/src/blocks_dal.rs                |  11 +-
 core/lib/dal/src/blocks_web3_dal.rs           | 147 ++--
 core/lib/dal/src/models/storage_block.rs      | 103 +--
 core/lib/dal/src/snapshot_recovery_dal.rs     |   2 +-
 core/lib/dal/src/storage_web3_dal.rs          | 184 ++++-
 core/lib/dal/src/tests/mod.rs                 |   2 +-
 core/lib/dal/src/transactions_web3_dal.rs     | 121 ++-
 core/lib/types/src/api/mod.rs                 |   2 +-
 core/lib/web3_decl/src/error.rs               |   6 +-
 .../src/api_server/execution_sandbox/apply.rs |  75 +-
 .../api_server/execution_sandbox/execute.rs   | 205 ++---
 .../src/api_server/execution_sandbox/mod.rs   |  99 ++-
 .../api_server/execution_sandbox/testonly.rs  |  74 ++
 .../src/api_server/execution_sandbox/tests.rs | 155 ++++
 .../api_server/execution_sandbox/validate.rs  |  13 +-
 .../src/api_server/tx_sender/mod.rs           | 164 ++--
 .../src/api_server/tx_sender/tests.rs         | 139 ++++
 .../api_server/web3/backend_jsonrpsee/mod.rs  |   8 +-
 .../backend_jsonrpsee/namespaces/debug.rs     |   7 +-
 .../web3/backend_jsonrpsee/namespaces/zks.rs  |   8 +-
 .../zksync_core/src/api_server/web3/mod.rs    | 140 ++--
 .../src/api_server/web3/namespaces/debug.rs   | 126 ++-
 .../src/api_server/web3/namespaces/en.rs      |   8 +-
 .../src/api_server/web3/namespaces/eth.rs     | 126 +--
 .../src/api_server/web3/namespaces/zks.rs     | 164 ++--
 .../zksync_core/src/api_server/web3/pubsub.rs |  26 +-
 .../zksync_core/src/api_server/web3/state.rs  | 121 ++-
 .../src/api_server/web3/tests/debug.rs        | 164 ++++
 .../src/api_server/web3/tests/filters.rs      | 261 +++++++
 .../src/api_server/web3/tests/mod.rs          | 731 ++++++++++++------
 .../src/api_server/web3/tests/snapshots.rs    |  30 +-
 .../src/api_server/web3/tests/vm.rs           | 237 ++++++
 .../src/api_server/web3/tests/ws.rs           | 182 ++++-
 .../zksync_core/src/consensus/storage/mod.rs  |   3 +-
 .../lib/zksync_core/src/consensus/testonly.rs |   1 +
 .../src/metadata_calculator/recovery/tests.rs |  88 +--
 .../lib/zksync_core/src/reorg_detector/mod.rs |   7 +-
 .../src/state_keeper/io/mempool.rs            |   3 +-
 .../src/state_keeper/io/tests/mod.rs          |   4 +-
 .../zksync_core/src/sync_layer/external_io.rs |   3 +-
 core/lib/zksync_core/src/sync_layer/tests.rs  |   9 +-
 core/lib/zksync_core/src/utils/testonly.rs    |  84 +-
 prover/Cargo.lock                             | 664 +++++++++++-----
 48 files changed, 3483 insertions(+), 1325 deletions(-)
 create mode 100644 core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json
 delete mode 100644 core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json
 delete mode 100644 core/lib/dal/.sqlx/query-ac673a122962b57b0272df2d82a1788feea2fbb5682de09120dd109899510820.json
 create mode 100644 core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json
 create mode 100644 core/lib/zksync_core/src/api_server/execution_sandbox/testonly.rs
 create mode 100644 core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs
 create mode 100644 core/lib/zksync_core/src/api_server/tx_sender/tests.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/tests/debug.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/tests/filters.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/tests/vm.rs

diff --git a/core/bin/storage_logs_dedup_migration/src/main.rs b/core/bin/storage_logs_dedup_migration/src/main.rs
index 733976b44e1e..179685c40022 100644
--- a/core/bin/storage_logs_dedup_migration/src/main.rs
+++ b/core/bin/storage_logs_dedup_migration/src/main.rs
@@ -57,7 +57,8 @@ async fn main() {
         .blocks_dal()
         .get_sealed_miniblock_number()
         .await
-        .unwrap();
+        .unwrap()
+        .expect("Cannot start migration for Postgres recovered from snapshot");
     println!(
         "Migration started for miniblock range {}..={}",
         opt.start_from_miniblock, sealed_miniblock
diff --git a/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json b/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json
new file mode 100644
index 000000000000..4290ba1f1b31
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9.json
@@ -0,0 +1,22 @@
+{
+  "db_name": "PostgreSQL",
+  "query": "\n            SELECT\n                timestamp\n            FROM\n                miniblocks\n            WHERE\n                (\n                    $1::BIGINT IS NULL\n                    AND l1_batch_number IS NULL\n                )\n                OR (l1_batch_number = $1::BIGINT)\n            ORDER BY\n                number\n            LIMIT\n                1\n            ",
+  "describe": {
+    "columns": [
+      {
+        "ordinal": 0,
+        "name": "timestamp",
+        "type_info": "Int8"
+      }
+    ],
+    "parameters": {
+      "Left": [
+        "Int8"
+      ]
+    },
+    "nullable": [
+      false
+    ]
+  },
+  "hash": "3191f5ba16af041123ffa941ad63fe77e649e9d110043d2ac22005dd61cfcfb9"
+}
diff --git a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json b/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json
deleted file mode 100644
index 9b989a9ba251..000000000000
--- a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-  "db_name": "PostgreSQL",
-  "query": "\n            SELECT\n                MAX(number) AS \"number\"\n            FROM\n                l1_batches\n            ",
-  "describe": {
-    "columns": [
-      {
-        "ordinal": 0,
-        "name": "number",
-        "type_info": "Int8"
-      }
-    ],
-    "parameters": {
-      "Left": []
-    },
-    "nullable": [
-      null
-    ]
-  },
-  "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c"
-}
diff --git a/core/lib/dal/.sqlx/query-ac673a122962b57b0272df2d82a1788feea2fbb5682de09120dd109899510820.json b/core/lib/dal/.sqlx/query-ac673a122962b57b0272df2d82a1788feea2fbb5682de09120dd109899510820.json
deleted file mode 100644
index f96c6945623e..000000000000
--- a/core/lib/dal/.sqlx/query-ac673a122962b57b0272df2d82a1788feea2fbb5682de09120dd109899510820.json
+++ /dev/null
@@ -1,28 +0,0 @@
-{
-  "db_name": "PostgreSQL",
-  "query": "\n            SELECT\n                (\n                    SELECT\n                        l1_batch_number\n                    FROM\n                        miniblocks\n                    WHERE\n                        number = $1\n                ) AS \"block_batch?\",\n                (\n                    SELECT\n                        MAX(number) + 1\n                    FROM\n                        l1_batches\n                ) AS \"max_batch?\"\n            ",
-  "describe": {
-    "columns": [
-      {
-        "ordinal": 0,
-        "name": "block_batch?",
-        "type_info": "Int8"
-      },
-      {
-        "ordinal": 1,
-        "name": "max_batch?",
-        "type_info": "Int8"
-      }
-    ],
-    "parameters": {
-      "Left": [
-        "Int8"
-      ]
-    },
-    "nullable": [
-      null,
-      null
-    ]
-  },
-  "hash": "ac673a122962b57b0272df2d82a1788feea2fbb5682de09120dd109899510820"
-}
diff --git a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json b/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json
new file mode 100644
index 000000000000..433564c6ae05
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json
@@ -0,0 +1,28 @@
+{
+  "db_name": "PostgreSQL",
+  "query": "\n            SELECT\n                (\n                    SELECT\n                        l1_batch_number\n                    FROM\n                        miniblocks\n                    WHERE\n                        number = $1\n                ) AS \"block_batch?\",\n                COALESCE(\n                    (\n                        SELECT\n                            MAX(number) + 1\n                        FROM\n                            l1_batches\n                    ),\n                    (\n                        SELECT\n                            MAX(l1_batch_number) + 1\n                        FROM\n                            snapshot_recovery\n                    ),\n                    0\n                ) AS \"pending_batch!\"\n            ",
+  "describe": {
+    "columns": [
+      {
+        "ordinal": 0,
+        "name": "block_batch?",
+        "type_info": "Int8"
+      },
+      {
+        "ordinal": 1,
+        "name": "pending_batch!",
+        "type_info": "Int8"
+      }
+    ],
+    "parameters": {
+      "Left": [
+        "Int8"
+      ]
+    },
+    "nullable": [
+      null,
+      null
+    ]
+  },
+  "hash": "c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5"
+}
diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs
index cf9cb03f3d4d..72f7ad926b52 100644
--- a/core/lib/dal/src/blocks_dal.rs
+++ b/core/lib/dal/src/blocks_dal.rs
@@ -61,8 +61,8 @@ impl BlocksDal<'_, '_> {
         Ok(row.number.map(|num| L1BatchNumber(num as u32)))
     }
 
-    pub async fn get_sealed_miniblock_number(&mut self) -> sqlx::Result<MiniblockNumber> {
-        let number: i64 = sqlx::query!(
+    pub async fn get_sealed_miniblock_number(&mut self) -> sqlx::Result<Option<MiniblockNumber>> {
+        let row = sqlx::query!(
             r#"
             SELECT
                 MAX(number) AS "number"
@@ -73,10 +73,9 @@ impl BlocksDal<'_, '_> {
         .instrument("get_sealed_miniblock_number")
         .report_latency()
         .fetch_one(self.storage.conn())
-        .await?
-        .number
-        .unwrap_or(0);
-        Ok(MiniblockNumber(number as u32))
+        .await?;
+
+        Ok(row.number.map(|number| MiniblockNumber(number as u32)))
     }
 
     /// Returns the number of the earliest L1 batch present in the DB, or `None` if there are no L1 batches.
diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs
index 4be51e07240f..9f9a9d964af3 100644
--- a/core/lib/dal/src/blocks_web3_dal.rs
+++ b/core/lib/dal/src/blocks_web3_dal.rs
@@ -17,7 +17,7 @@ use crate::{
     models::{
         storage_block::{
             bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql,
-            StorageBlockDetails, StorageL1BatchDetails,
+            ResolvedL1BatchForMiniblock, StorageBlockDetails, StorageL1BatchDetails,
         },
         storage_transaction::{extract_web3_transaction, web3_transaction_select_sql, CallTrace},
     },
@@ -32,42 +32,6 @@ pub struct BlocksWeb3Dal<'a, 'c> {
 }
 
 impl BlocksWeb3Dal<'_, '_> {
-    pub async fn get_sealed_miniblock_number(&mut self) -> sqlx::Result<MiniblockNumber> {
-        let number = sqlx::query!(
-            r#"
-            SELECT
-                MAX(number) AS "number"
-            FROM
-                miniblocks
-            "#
-        )
-        .instrument("get_sealed_block_number")
-        .report_latency()
-        .fetch_one(self.storage.conn())
-        .await?
-        .number
-        .expect("DAL invocation before genesis");
-        Ok(MiniblockNumber(number as u32))
-    }
-
-    pub async fn get_sealed_l1_batch_number(&mut self) -> sqlx::Result<L1BatchNumber> {
-        let number = sqlx::query!(
-            r#"
-            SELECT
-                MAX(number) AS "number"
-            FROM
-                l1_batches
-            "#
-        )
-        .instrument("get_sealed_block_number")
-        .report_latency()
-        .fetch_one(self.storage.conn())
-        .await?
-        .number
-        .expect("DAL invocation before genesis");
-        Ok(L1BatchNumber(number as u32))
-    }
-
     pub async fn get_block_by_web3_block_id(
         &mut self,
         block_id: api::BlockId,
@@ -258,21 +222,26 @@ impl BlocksWeb3Dal<'_, '_> {
         &mut self,
         block_id: api::BlockId,
     ) -> sqlx::Result<Option<MiniblockNumber>> {
-        let query_string = match block_id {
-            api::BlockId::Hash(_) => "SELECT number FROM miniblocks WHERE hash = $1".to_owned(),
+        let query_string;
+        let query_str = match block_id {
+            api::BlockId::Hash(_) => "SELECT number FROM miniblocks WHERE hash = $1",
             api::BlockId::Number(api::BlockNumber::Number(_)) => {
                 // The reason why instead of returning the `block_number` directly we use query is
-                // to handle numbers of blocks that are not created yet.
-                // the `SELECT number FROM miniblocks WHERE number=block_number` for
-                // non-existing block number will returns zero.
-                "SELECT number FROM miniblocks WHERE number = $1".to_owned()
+                // to handle numbers of blocks that are not created yet or were pruned.
+                // The query below will return NULL for non-existing block numbers.
+                "SELECT number FROM miniblocks WHERE number = $1"
             }
             api::BlockId::Number(api::BlockNumber::Earliest) => {
-                return Ok(Some(MiniblockNumber(0)));
+                // Similarly to `BlockNumber::Number`, we may be missing the earliest block
+                // if the storage was recovered from a snapshot.
+                "SELECT number FROM miniblocks WHERE number = 0"
+            }
+            api::BlockId::Number(block_number) => {
+                query_string = web3_block_number_to_sql(block_number);
+                &query_string
             }
-            api::BlockId::Number(block_number) => web3_block_number_to_sql(block_number),
         };
-        let row = bind_block_where_sql_params(&block_id, sqlx::query(&query_string))
+        let row = bind_block_where_sql_params(&block_id, sqlx::query(query_str))
             .fetch_optional(self.storage.conn())
             .await?;
 
@@ -283,21 +252,13 @@ impl BlocksWeb3Dal<'_, '_> {
     }
 
     /// Returns L1 batch timestamp for either sealed or pending L1 batch.
+    ///
+    /// The correctness of the current implementation depends on the timestamp of an L1 batch always
+    /// being equal to the timestamp of the first miniblock in the batch.
     pub async fn get_expected_l1_batch_timestamp(
         &mut self,
-        l1_batch_number: L1BatchNumber,
+        l1_batch_number: &ResolvedL1BatchForMiniblock,
     ) -> sqlx::Result<Option<u64>> {
-        let first_miniblock_of_batch = if l1_batch_number.0 == 0 {
-            MiniblockNumber(0)
-        } else {
-            match self
-                .get_miniblock_range_of_l1_batch(l1_batch_number - 1)
-                .await?
-            {
-                Some((_, miniblock_number)) => miniblock_number + 1,
-                None => return Ok(None),
-            }
-        };
         let timestamp = sqlx::query!(
             r#"
             SELECT
@@ -305,9 +266,19 @@ impl BlocksWeb3Dal<'_, '_> {
             FROM
                 miniblocks
             WHERE
-                number = $1
+                (
+                    $1::BIGINT IS NULL
+                    AND l1_batch_number IS NULL
+                )
+                OR (l1_batch_number = $1::BIGINT)
+            ORDER BY
+                number
+            LIMIT
+                1
             "#,
-            first_miniblock_of_batch.0 as i64
+            l1_batch_number
+                .miniblock_l1_batch
+                .map(|number| i64::from(number.0))
         )
         .fetch_optional(self.storage.conn())
         .await?
@@ -629,6 +600,7 @@ impl BlocksWeb3Dal<'_, '_> {
 mod tests {
     use zksync_types::{
         block::{MiniblockHasher, MiniblockHeader},
+        snapshots::SnapshotRecoveryStatus,
         MiniblockNumber, ProtocolVersion, ProtocolVersionId,
     };
 
@@ -698,8 +670,18 @@ mod tests {
     async fn resolving_earliest_block_id() {
         let connection_pool = ConnectionPool::test_pool().await;
         let mut conn = connection_pool.access_storage().await.unwrap();
+
+        let miniblock_number = conn
+            .blocks_web3_dal()
+            .resolve_block_id(api::BlockId::Number(api::BlockNumber::Earliest))
+            .await;
+        assert_eq!(miniblock_number.unwrap(), None);
+
+        conn.protocol_versions_dal()
+            .save_protocol_version_with_tx(ProtocolVersion::default())
+            .await;
         conn.blocks_dal()
-            .delete_miniblocks(MiniblockNumber(0))
+            .insert_miniblock(&create_miniblock_header(0))
             .await
             .unwrap();
 
@@ -714,13 +696,23 @@ mod tests {
     async fn resolving_latest_block_id() {
         let connection_pool = ConnectionPool::test_pool().await;
         let mut conn = connection_pool.access_storage().await.unwrap();
-        conn.blocks_dal()
-            .delete_miniblocks(MiniblockNumber(0))
-            .await
-            .unwrap();
         conn.protocol_versions_dal()
             .save_protocol_version_with_tx(ProtocolVersion::default())
             .await;
+
+        let miniblock_number = conn
+            .blocks_web3_dal()
+            .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest))
+            .await
+            .unwrap();
+        assert_eq!(miniblock_number, None);
+        let miniblock_number = conn
+            .blocks_web3_dal()
+            .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending))
+            .await
+            .unwrap();
+        assert_eq!(miniblock_number, Some(MiniblockNumber(0)));
+
         conn.blocks_dal()
             .insert_miniblock(&create_miniblock_header(0))
             .await
@@ -766,6 +758,31 @@ mod tests {
         assert_eq!(miniblock_number.unwrap(), Some(MiniblockNumber(1)));
     }
 
+    #[tokio::test]
+    async fn resolving_pending_block_id_for_snapshot_recovery() {
+        let connection_pool = ConnectionPool::test_pool().await;
+        let mut conn = connection_pool.access_storage().await.unwrap();
+        let snapshot_recovery = SnapshotRecoveryStatus {
+            l1_batch_number: L1BatchNumber(23),
+            l1_batch_root_hash: H256::zero(),
+            miniblock_number: MiniblockNumber(42),
+            miniblock_root_hash: H256::zero(),
+            last_finished_chunk_id: None,
+            total_chunk_count: 100,
+        };
+        conn.snapshot_recovery_dal()
+            .set_applied_snapshot_status(&snapshot_recovery)
+            .await
+            .unwrap();
+
+        let miniblock_number = conn
+            .blocks_web3_dal()
+            .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending))
+            .await
+            .unwrap();
+        assert_eq!(miniblock_number, Some(MiniblockNumber(43)));
+    }
+
     #[tokio::test]
     async fn resolving_block_by_hash() {
         let connection_pool = ConnectionPool::test_pool().await;
diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs
index 2ff98815abf4..1e8e97d5dca1 100644
--- a/core/lib/dal/src/models/storage_block.rs
+++ b/core/lib/dal/src/models/storage_block.rs
@@ -296,27 +296,32 @@ pub fn web3_block_number_to_sql(block_number: api::BlockNumber) -> String {
     match block_number {
         api::BlockNumber::Number(number) => number.to_string(),
         api::BlockNumber::Earliest => 0.to_string(),
-        api::BlockNumber::Pending => {
-            "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string()
-        }
+        api::BlockNumber::Pending => "
+            (SELECT COALESCE(
+                (SELECT (MAX(number) + 1) AS number FROM miniblocks),
+                (SELECT (MAX(miniblock_number) + 1) AS number FROM snapshot_recovery),
+                0
+            ) AS number)
+        "
+        .to_string(),
         api::BlockNumber::Latest | api::BlockNumber::Committed => {
-            "(SELECT MAX(number) as number FROM miniblocks)".to_string()
+            "(SELECT MAX(number) AS number FROM miniblocks)".to_string()
         }
         api::BlockNumber::Finalized => "
-                (SELECT COALESCE(
-                    (
-                        SELECT MAX(number) FROM miniblocks
-                        WHERE l1_batch_number = (
-                            SELECT MAX(number) FROM l1_batches
-                            JOIN eth_txs ON
-                                l1_batches.eth_execute_tx_id = eth_txs.id
-                            WHERE
-                                eth_txs.confirmed_eth_tx_history_id IS NOT NULL
-                        )
-                    ),
-                    0
-                ) as number)
-            "
+            (SELECT COALESCE(
+                (
+                    SELECT MAX(number) FROM miniblocks
+                    WHERE l1_batch_number = (
+                        SELECT MAX(number) FROM l1_batches
+                        JOIN eth_txs ON
+                            l1_batches.eth_execute_tx_id = eth_txs.id
+                        WHERE
+                            eth_txs.confirmed_eth_tx_history_id IS NOT NULL
+                    )
+                ),
+                0
+            ) AS number)
+        "
         .to_string(),
     }
 }
@@ -561,65 +566,3 @@ impl ResolvedL1BatchForMiniblock {
         self.miniblock_l1_batch.unwrap_or(self.pending_l1_batch)
     }
 }
-
-#[cfg(test)]
-mod tests {
-    use super::*;
-
-    #[test]
-    fn test_web3_block_number_to_sql_earliest() {
-        let sql = web3_block_number_to_sql(api::BlockNumber::Earliest);
-        assert_eq!(sql, 0.to_string());
-    }
-
-    #[test]
-    fn test_web3_block_number_to_sql_pending() {
-        let sql = web3_block_number_to_sql(api::BlockNumber::Pending);
-        assert_eq!(
-            sql,
-            "(SELECT (MAX(number) + 1) as number FROM miniblocks)".to_string()
-        );
-    }
-
-    #[test]
-    fn test_web3_block_number_to_sql_latest() {
-        let sql = web3_block_number_to_sql(api::BlockNumber::Latest);
-        assert_eq!(
-            sql,
-            "(SELECT MAX(number) as number FROM miniblocks)".to_string()
-        );
-    }
-
-    #[test]
-    fn test_web3_block_number_to_sql_committed() {
-        let sql = web3_block_number_to_sql(api::BlockNumber::Committed);
-        assert_eq!(
-            sql,
-            "(SELECT MAX(number) as number FROM miniblocks)".to_string()
-        );
-    }
-
-    #[test]
-    fn test_web3_block_number_to_sql_finalized() {
-        let sql = web3_block_number_to_sql(api::BlockNumber::Finalized);
-        assert_eq!(
-            sql,
-            "
-                (SELECT COALESCE(
-                    (
-                        SELECT MAX(number) FROM miniblocks
-                        WHERE l1_batch_number = (
-                            SELECT MAX(number) FROM l1_batches
-                            JOIN eth_txs ON
-                                l1_batches.eth_execute_tx_id = eth_txs.id
-                            WHERE
-                                eth_txs.confirmed_eth_tx_history_id IS NOT NULL
-                        )
-                    ),
-                    0
-                ) as number)
-            "
-            .to_string()
-        );
-    }
-}
diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs
index edcf7ccf1986..abf6ceb44069 100644
--- a/core/lib/dal/src/snapshot_recovery_dal.rs
+++ b/core/lib/dal/src/snapshot_recovery_dal.rs
@@ -87,7 +87,7 @@ mod tests {
     use crate::ConnectionPool;
 
     #[tokio::test]
-    async fn resolving_earliest_block_id() {
+    async fn manipulating_snapshot_recovery_table() {
         let connection_pool = ConnectionPool::test_pool().await;
         let mut conn = connection_pool.access_storage().await.unwrap();
         let mut applied_status_dal = conn.snapshot_recovery_dal();
diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs
index c95d4ca73dbb..e5cc4fc8b975 100644
--- a/core/lib/dal/src/storage_web3_dal.rs
+++ b/core/lib/dal/src/storage_web3_dal.rs
@@ -107,12 +107,21 @@ impl StorageWeb3Dal<'_, '_> {
                     WHERE
                         number = $1
                 ) AS "block_batch?",
-                (
-                    SELECT
-                        MAX(number) + 1
-                    FROM
-                        l1_batches
-                ) AS "max_batch?"
+                COALESCE(
+                    (
+                        SELECT
+                            MAX(number) + 1
+                        FROM
+                            l1_batches
+                    ),
+                    (
+                        SELECT
+                            MAX(l1_batch_number) + 1
+                        FROM
+                            snapshot_recovery
+                    ),
+                    0
+                ) AS "pending_batch!"
             "#,
             miniblock_number.0 as i64
         )
@@ -121,7 +130,7 @@ impl StorageWeb3Dal<'_, '_> {
 
         Ok(ResolvedL1BatchForMiniblock {
             miniblock_l1_batch: row.block_batch.map(|n| L1BatchNumber(n as u32)),
-            pending_l1_batch: L1BatchNumber(row.max_batch.unwrap_or(0) as u32),
+            pending_l1_batch: L1BatchNumber(row.pending_batch as u32),
         })
     }
 
@@ -245,3 +254,164 @@ impl StorageWeb3Dal<'_, '_> {
         }
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use zksync_types::{
+        block::{BlockGasCount, L1BatchHeader},
+        snapshots::SnapshotRecoveryStatus,
+        ProtocolVersion, ProtocolVersionId,
+    };
+
+    use super::*;
+    use crate::{tests::create_miniblock_header, ConnectionPool};
+
+    #[tokio::test]
+    async fn resolving_l1_batch_number_of_miniblock() {
+        let pool = ConnectionPool::test_pool().await;
+        let mut conn = pool.access_storage().await.unwrap();
+        conn.protocol_versions_dal()
+            .save_protocol_version_with_tx(ProtocolVersion::default())
+            .await;
+        conn.blocks_dal()
+            .insert_miniblock(&create_miniblock_header(0))
+            .await
+            .unwrap();
+        let l1_batch_header = L1BatchHeader::new(
+            L1BatchNumber(0),
+            0,
+            Address::repeat_byte(0x42),
+            Default::default(),
+            ProtocolVersionId::latest(),
+        );
+        conn.blocks_dal()
+            .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0)
+            .await
+            .unwrap();
+        conn.blocks_dal()
+            .mark_miniblocks_as_executed_in_l1_batch(L1BatchNumber(0))
+            .await
+            .unwrap();
+
+        let first_miniblock = create_miniblock_header(1);
+        conn.blocks_dal()
+            .insert_miniblock(&first_miniblock)
+            .await
+            .unwrap();
+
+        let resolved = conn
+            .storage_web3_dal()
+            .resolve_l1_batch_number_of_miniblock(MiniblockNumber(0))
+            .await
+            .unwrap();
+        assert_eq!(resolved.miniblock_l1_batch, Some(L1BatchNumber(0)));
+        assert_eq!(resolved.pending_l1_batch, L1BatchNumber(1));
+        assert_eq!(resolved.expected_l1_batch(), L1BatchNumber(0));
+
+        let timestamp = conn
+            .blocks_web3_dal()
+            .get_expected_l1_batch_timestamp(&resolved)
+            .await
+            .unwrap();
+        assert_eq!(timestamp, Some(0));
+
+        for pending_miniblock_number in [1, 2] {
+            let resolved = conn
+                .storage_web3_dal()
+                .resolve_l1_batch_number_of_miniblock(MiniblockNumber(pending_miniblock_number))
+                .await
+                .unwrap();
+            assert_eq!(resolved.miniblock_l1_batch, None);
+            assert_eq!(resolved.pending_l1_batch, L1BatchNumber(1));
+            assert_eq!(resolved.expected_l1_batch(), L1BatchNumber(1));
+
+            let timestamp = conn
+                .blocks_web3_dal()
+                .get_expected_l1_batch_timestamp(&resolved)
+                .await
+                .unwrap();
+            assert_eq!(timestamp, Some(first_miniblock.timestamp));
+        }
+    }
+
+    #[tokio::test]
+    async fn resolving_l1_batch_number_of_miniblock_with_snapshot_recovery() {
+        let pool = ConnectionPool::test_pool().await;
+        let mut conn = pool.access_storage().await.unwrap();
+        conn.protocol_versions_dal()
+            .save_protocol_version_with_tx(ProtocolVersion::default())
+            .await;
+        let snapshot_recovery = SnapshotRecoveryStatus {
+            l1_batch_number: L1BatchNumber(23),
+            l1_batch_root_hash: H256::zero(),
+            miniblock_number: MiniblockNumber(42),
+            miniblock_root_hash: H256::zero(),
+            last_finished_chunk_id: None,
+            total_chunk_count: 100,
+        };
+        conn.snapshot_recovery_dal()
+            .set_applied_snapshot_status(&snapshot_recovery)
+            .await
+            .unwrap();
+
+        let first_miniblock = create_miniblock_header(snapshot_recovery.miniblock_number.0 + 1);
+        conn.blocks_dal()
+            .insert_miniblock(&first_miniblock)
+            .await
+            .unwrap();
+
+        let resolved = conn
+            .storage_web3_dal()
+            .resolve_l1_batch_number_of_miniblock(snapshot_recovery.miniblock_number + 1)
+            .await
+            .unwrap();
+        assert_eq!(resolved.miniblock_l1_batch, None);
+        assert_eq!(
+            resolved.pending_l1_batch,
+            snapshot_recovery.l1_batch_number + 1
+        );
+        assert_eq!(
+            resolved.expected_l1_batch(),
+            snapshot_recovery.l1_batch_number + 1
+        );
+
+        let timestamp = conn
+            .blocks_web3_dal()
+            .get_expected_l1_batch_timestamp(&resolved)
+            .await
+            .unwrap();
+        assert_eq!(timestamp, Some(first_miniblock.timestamp));
+
+        let l1_batch_header = L1BatchHeader::new(
+            snapshot_recovery.l1_batch_number + 1,
+            100,
+            Address::repeat_byte(0x42),
+            Default::default(),
+            ProtocolVersionId::latest(),
+        );
+        conn.blocks_dal()
+            .insert_l1_batch(&l1_batch_header, &[], BlockGasCount::default(), &[], &[], 0)
+            .await
+            .unwrap();
+        conn.blocks_dal()
+            .mark_miniblocks_as_executed_in_l1_batch(l1_batch_header.number)
+            .await
+            .unwrap();
+
+        let resolved = conn
+            .storage_web3_dal()
+            .resolve_l1_batch_number_of_miniblock(snapshot_recovery.miniblock_number + 1)
+            .await
+            .unwrap();
+        assert_eq!(resolved.miniblock_l1_batch, Some(l1_batch_header.number));
+        assert_eq!(resolved.pending_l1_batch, l1_batch_header.number + 1);
+        assert_eq!(resolved.expected_l1_batch(), l1_batch_header.number);
+
+        let timestamp = conn
+            .blocks_web3_dal()
+            .get_expected_l1_batch_timestamp(&resolved)
+            .await
+            .unwrap();
+        assert_eq!(timestamp, Some(first_miniblock.timestamp));
+    }
+}
diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs
index 5830ee14110d..1e4b4c95e4b7 100644
--- a/core/lib/dal/src/tests/mod.rs
+++ b/core/lib/dal/src/tests/mod.rs
@@ -32,7 +32,7 @@ pub(crate) fn create_miniblock_header(number: u32) -> MiniblockHeader {
     let protocol_version = ProtocolVersionId::default();
     MiniblockHeader {
         number,
-        timestamp: 0,
+        timestamp: number.0.into(),
         hash: MiniblockHasher::new(number, 0, H256::zero()).finalize(protocol_version),
         l1_tx_count: 0,
         l2_tx_count: 0,
diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs
index 033a053cad24..abc243dd1a16 100644
--- a/core/lib/dal/src/transactions_web3_dal.rs
+++ b/core/lib/dal/src/transactions_web3_dal.rs
@@ -321,23 +321,12 @@ impl TransactionsWeb3Dal<'_, '_> {
         Ok((hashes, last_loc))
     }
 
+    /// `committed_next_nonce` should equal the nonce for `initiator_address` in the storage.
     pub async fn next_nonce_by_initiator_account(
         &mut self,
         initiator_address: Address,
+        committed_next_nonce: u64,
     ) -> Result<U256, SqlxError> {
-        let latest_block_number = self
-            .storage
-            .blocks_web3_dal()
-            .resolve_block_id(api::BlockId::Number(api::BlockNumber::Latest))
-            .await?
-            .expect("Failed to get `latest` nonce");
-        let latest_nonce = self
-            .storage
-            .storage_web3_dal()
-            .get_address_historical_nonce(initiator_address, latest_block_number)
-            .await?
-            .as_u64();
-
         // Get nonces of non-rejected transactions, starting from the 'latest' nonce.
         // `latest` nonce is used, because it is guaranteed that there are no gaps before it.
         // `(miniblock_number IS NOT NULL OR error IS NULL)` is the condition that filters non-rejected transactions.
@@ -361,7 +350,7 @@ impl TransactionsWeb3Dal<'_, '_> {
                 nonce
             "#,
             initiator_address.as_bytes(),
-            latest_nonce as i64
+            committed_next_nonce as i64
         )
         .fetch_all(self.storage.conn())
         .await?
@@ -370,7 +359,7 @@ impl TransactionsWeb3Dal<'_, '_> {
         .collect();
 
         // Find pending nonce as the first "gap" in nonces.
-        let mut pending_nonce = latest_nonce;
+        let mut pending_nonce = committed_next_nonce;
         for nonce in non_rejected_nonces {
             if pending_nonce == nonce {
                 pending_nonce += 1;
@@ -411,8 +400,10 @@ impl TransactionsWeb3Dal<'_, '_> {
 
 #[cfg(test)]
 mod tests {
+    use std::collections::HashMap;
+
     use zksync_types::{
-        block::MiniblockHasher, fee::TransactionExecutionMetrics, l2::L2Tx, ProtocolVersion,
+        block::MiniblockHasher, fee::TransactionExecutionMetrics, l2::L2Tx, Nonce, ProtocolVersion,
         ProtocolVersionId,
     };
 
@@ -536,4 +527,102 @@ mod tests {
         assert_eq!(raw_txs.len(), 1);
         assert_eq!(raw_txs[0].hash(), tx_hash);
     }
+
+    #[tokio::test]
+    async fn getting_next_nonce_by_initiator_account() {
+        let connection_pool = ConnectionPool::test_pool().await;
+        let mut conn = connection_pool.access_storage().await.unwrap();
+        conn.protocol_versions_dal()
+            .save_protocol_version_with_tx(ProtocolVersion::default())
+            .await;
+
+        let initiator = Address::repeat_byte(1);
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 0)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 0.into());
+
+        let mut tx_by_nonce = HashMap::new();
+        for nonce in [0, 1, 4] {
+            let mut tx = mock_l2_transaction();
+            // Changing transaction fields invalidates its signature, but it's OK for test purposes
+            tx.common_data.nonce = Nonce(nonce);
+            tx.common_data.initiator_address = initiator;
+            tx_by_nonce.insert(nonce, tx.clone());
+            conn.transactions_dal()
+                .insert_transaction_l2(tx, TransactionExecutionMetrics::default())
+                .await;
+        }
+
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 0)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 2.into());
+
+        // Reject the transaction with nonce 1, so that it'd be not taken into account.
+        conn.transactions_dal()
+            .mark_tx_as_rejected(tx_by_nonce[&1].hash(), "oops")
+            .await;
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 0)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 1.into());
+
+        // Include transactions in a miniblock (including the rejected one), so that they are taken into account again.
+        let mut miniblock = create_miniblock_header(1);
+        miniblock.l2_tx_count = 2;
+        conn.blocks_dal()
+            .insert_miniblock(&miniblock)
+            .await
+            .unwrap();
+        let executed_txs = [
+            mock_execution_result(tx_by_nonce[&0].clone()),
+            mock_execution_result(tx_by_nonce[&1].clone()),
+        ];
+        conn.transactions_dal()
+            .mark_txs_as_executed_in_miniblock(miniblock.number, &executed_txs, 1.into())
+            .await;
+
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 0)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 2.into());
+    }
+
+    #[tokio::test]
+    async fn getting_next_nonce_by_initiator_account_after_snapshot_recovery() {
+        // Emulate snapshot recovery: no transactions with past nonces are present in the storage
+        let connection_pool = ConnectionPool::test_pool().await;
+        let mut conn = connection_pool.access_storage().await.unwrap();
+        let initiator = Address::repeat_byte(1);
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 1)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 1.into());
+
+        let mut tx = mock_l2_transaction();
+        // Changing transaction fields invalidates its signature, but it's OK for test purposes
+        tx.common_data.nonce = Nonce(1);
+        tx.common_data.initiator_address = initiator;
+        conn.transactions_dal()
+            .insert_transaction_l2(tx, TransactionExecutionMetrics::default())
+            .await;
+
+        let next_nonce = conn
+            .transactions_web3_dal()
+            .next_nonce_by_initiator_account(initiator, 1)
+            .await
+            .unwrap();
+        assert_eq!(next_nonce, 2.into());
+    }
 }
diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs
index 8051fd3cd068..9f00aee0cf71 100644
--- a/core/lib/types/src/api/mod.rs
+++ b/core/lib/types/src/api/mod.rs
@@ -568,7 +568,7 @@ pub enum DebugCallType {
     Create,
 }
 
-#[derive(Debug, Serialize, Deserialize, Clone)]
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct DebugCall {
     pub r#type: DebugCallType,
diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs
index 13f88c66c651..f2c77c743c54 100644
--- a/core/lib/web3_decl/src/error.rs
+++ b/core/lib/web3_decl/src/error.rs
@@ -1,12 +1,16 @@
 //! Definition of errors that can occur in the zkSync Web3 API.
 
 use thiserror::Error;
-use zksync_types::api::SerializationTransactionError;
+use zksync_types::{api::SerializationTransactionError, L1BatchNumber, MiniblockNumber};
 
 #[derive(Debug, Error)]
 pub enum Web3Error {
     #[error("Block with such an ID doesn't exist yet")]
     NoBlock,
+    #[error("Block with such an ID is pruned; the first retained block is {0}")]
+    PrunedBlock(MiniblockNumber),
+    #[error("L1 batch with such an ID is pruned; the first retained L1 batch is {0}")]
+    PrunedL1Batch(L1BatchNumber),
     #[error("Request timeout")]
     RequestTimeout,
     #[error("Internal error")]
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs
index ba40531875da..54d9cde6cf23 100644
--- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs
@@ -8,13 +8,14 @@
 
 use std::time::{Duration, Instant};
 
+use anyhow::Context as _;
 use multivm::{
     interface::{L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface},
     utils::adjust_pubdata_price_for_tx,
     vm_latest::{constants::BLOCK_GAS_LIMIT, HistoryDisabled},
     VmInstance,
 };
-use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor};
+use zksync_dal::{ConnectionPool, StorageProcessor};
 use zksync_state::{PostgresStorage, ReadStorage, StorageView, WriteStorage};
 use zksync_system_constants::{
     SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION,
@@ -34,6 +35,7 @@ use super::{
     vm_metrics::{self, SandboxStage, SANDBOX_METRICS},
     BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit,
 };
+use crate::utils::projected_first_l1_batch;
 
 #[allow(clippy::too_many_arguments)]
 pub(super) fn apply_vm_in_sandbox<T>(
@@ -317,55 +319,46 @@ impl BlockArgs {
     async fn resolve_block_info(
         &self,
         connection: &mut StorageProcessor<'_>,
-    ) -> Result<ResolvedBlockInfo, SqlxError> {
-        let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp) =
-            if self.is_pending_miniblock() {
-                let sealed_l1_batch_number = connection
-                    .blocks_web3_dal()
-                    .get_sealed_l1_batch_number()
-                    .await?;
-                let sealed_miniblock_header = connection
-                    .blocks_dal()
-                    .get_last_sealed_miniblock_header()
-                    .await
-                    .unwrap()
-                    .expect("At least one miniblock must exist");
-
-                // Timestamp of the next L1 batch must be greater than the timestamp of the last miniblock.
-                let l1_batch_timestamp =
-                    seconds_since_epoch().max(sealed_miniblock_header.timestamp + 1);
-                (
-                    sealed_miniblock_header.number,
-                    sealed_l1_batch_number + 1,
-                    l1_batch_timestamp,
-                )
-            } else {
-                let l1_batch_number = connection
-                    .storage_web3_dal()
-                    .resolve_l1_batch_number_of_miniblock(self.resolved_block_number)
-                    .await?
-                    .expected_l1_batch();
-                let l1_batch_timestamp = self.l1_batch_timestamp_s.unwrap_or_else(|| {
-                    panic!(
+    ) -> anyhow::Result<ResolvedBlockInfo> {
+        let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp);
+
+        if self.is_pending_miniblock() {
+            let sealed_l1_batch_number =
+                connection.blocks_dal().get_sealed_l1_batch_number().await?;
+            let sealed_miniblock_header = connection
+                .blocks_dal()
+                .get_last_sealed_miniblock_header()
+                .await?
+                .context("no miniblocks in storage")?;
+
+            vm_l1_batch_number = match sealed_l1_batch_number {
+                Some(number) => number + 1,
+                None => projected_first_l1_batch(connection).await?,
+            };
+            state_l2_block_number = sealed_miniblock_header.number;
+            // Timestamp of the next L1 batch must be greater than the timestamp of the last miniblock.
+            l1_batch_timestamp = seconds_since_epoch().max(sealed_miniblock_header.timestamp + 1);
+        } else {
+            vm_l1_batch_number = connection
+                .storage_web3_dal()
+                .resolve_l1_batch_number_of_miniblock(self.resolved_block_number)
+                .await?
+                .expected_l1_batch();
+            l1_batch_timestamp = self.l1_batch_timestamp_s.unwrap_or_else(|| {
+                panic!(
                     "L1 batch timestamp is `None`, `block_id`: {:?}, `resolved_block_number`: {}",
                     self.block_id, self.resolved_block_number.0
                 );
-                });
-
-                (
-                    self.resolved_block_number,
-                    l1_batch_number,
-                    l1_batch_timestamp,
-                )
-            };
+            });
+            state_l2_block_number = self.resolved_block_number;
+        };
 
         // Blocks without version specified are considered to be of `Version9`.
         // TODO: remove `unwrap_or` when protocol version ID will be assigned for each block.
         let protocol_version = connection
             .blocks_dal()
             .get_miniblock_protocol_version_id(state_l2_block_number)
-            .await
-            .unwrap()
+            .await?
             .unwrap_or(ProtocolVersionId::Version9);
         Ok(ResolvedBlockInfo {
             state_l2_block_number,
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs
index cbff25698cf7..80c5fcd979a6 100644
--- a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs
@@ -13,6 +13,8 @@ use zksync_types::{
     PackedEthSignature, Transaction, U256,
 };
 
+#[cfg(test)]
+use super::testonly::MockTransactionExecutor;
 use super::{apply, vm_metrics, ApiTracer, BlockArgs, TxSharedArgs, VmPermit};
 
 #[derive(Debug)]
@@ -73,97 +75,120 @@ impl TxExecutionArgs {
     }
 }
 
-pub(crate) async fn execute_tx_eth_call(
-    vm_permit: VmPermit,
-    shared_args: TxSharedArgs,
-    connection_pool: ConnectionPool,
-    mut tx: L2Tx,
-    block_args: BlockArgs,
-    vm_execution_cache_misses_limit: Option<usize>,
-    custom_tracers: Vec<ApiTracer>,
-) -> VmExecutionResultAndLogs {
-    let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64();
-    let execution_args =
-        TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit);
-
-    if tx.common_data.signature.is_empty() {
-        tx.common_data.signature = PackedEthSignature::default().serialize_packed().into();
+/// Executor of transactions.
+#[derive(Debug)]
+pub(crate) enum TransactionExecutor {
+    Real,
+    #[cfg(test)]
+    Mock(MockTransactionExecutor),
+}
+
+impl TransactionExecutor {
+    /// This method assumes that (block with number `resolved_block_number` is present in DB)
+    /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB)
+    #[allow(clippy::too_many_arguments)]
+    #[tracing::instrument(skip_all)]
+    pub async fn execute_tx_in_sandbox(
+        &self,
+        vm_permit: VmPermit,
+        shared_args: TxSharedArgs,
+        // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <=
+        // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the
+        // current L1 prices for gas or pubdata.
+        adjust_pubdata_price: bool,
+        execution_args: TxExecutionArgs,
+        connection_pool: ConnectionPool,
+        tx: Transaction,
+        block_args: BlockArgs,
+        custom_tracers: Vec<ApiTracer>,
+    ) -> (VmExecutionResultAndLogs, TransactionExecutionMetrics, bool) {
+        #[cfg(test)]
+        if let Self::Mock(mock_executor) = self {
+            return mock_executor.execute_tx(&tx);
+        }
+
+        let total_factory_deps = tx
+            .execute
+            .factory_deps
+            .as_ref()
+            .map_or(0, |deps| deps.len() as u16);
+
+        let (published_bytecodes, execution_result) = tokio::task::spawn_blocking(move || {
+            let span = span!(Level::DEBUG, "execute_in_sandbox").entered();
+            let result = apply::apply_vm_in_sandbox(
+                vm_permit,
+                shared_args,
+                adjust_pubdata_price,
+                &execution_args,
+                &connection_pool,
+                tx,
+                block_args,
+                |vm, tx| {
+                    let storage_invocation_tracer =
+                        StorageInvocations::new(execution_args.missed_storage_invocation_limit);
+                    let custom_tracers: Vec<_> = custom_tracers
+                        .into_iter()
+                        .map(|tracer| tracer.into_boxed())
+                        .chain(vec![storage_invocation_tracer.into_tracer_pointer()])
+                        .collect();
+                    vm.inspect_transaction_with_bytecode_compression(
+                        custom_tracers.into(),
+                        tx,
+                        true,
+                    )
+                },
+            );
+            span.exit();
+            result
+        })
+        .await
+        .unwrap();
+
+        let tx_execution_metrics =
+            vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result);
+        (
+            execution_result,
+            tx_execution_metrics,
+            published_bytecodes.is_ok(),
+        )
     }
 
-    // Protection against infinite-loop eth_calls and alike:
-    // limiting the amount of gas the call can use.
-    // We can't use `BLOCK_ERGS_LIMIT` here since the VM itself has some overhead.
-    tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into();
-    let (vm_result, _, _) = execute_tx_in_sandbox(
-        vm_permit,
-        shared_args,
-        false,
-        execution_args,
-        connection_pool,
-        tx.into(),
-        block_args,
-        custom_tracers,
-    )
-    .await;
-
-    vm_result
-}
+    #[allow(clippy::too_many_arguments)]
+    pub async fn execute_tx_eth_call(
+        &self,
+        vm_permit: VmPermit,
+        shared_args: TxSharedArgs,
+        connection_pool: ConnectionPool,
+        mut tx: L2Tx,
+        block_args: BlockArgs,
+        vm_execution_cache_misses_limit: Option<usize>,
+        custom_tracers: Vec<ApiTracer>,
+    ) -> VmExecutionResultAndLogs {
+        let enforced_base_fee = tx.common_data.fee.max_fee_per_gas.as_u64();
+        let execution_args =
+            TxExecutionArgs::for_eth_call(enforced_base_fee, vm_execution_cache_misses_limit);
+
+        if tx.common_data.signature.is_empty() {
+            tx.common_data.signature = PackedEthSignature::default().serialize_packed().into();
+        }
+
+        // Protection against infinite-loop eth_calls and alike:
+        // limiting the amount of gas the call can use.
+        // We can't use `BLOCK_ERGS_LIMIT` here since the VM itself has some overhead.
+        tx.common_data.fee.gas_limit = ETH_CALL_GAS_LIMIT.into();
+        let (vm_result, ..) = self
+            .execute_tx_in_sandbox(
+                vm_permit,
+                shared_args,
+                false,
+                execution_args,
+                connection_pool,
+                tx.into(),
+                block_args,
+                custom_tracers,
+            )
+            .await;
 
-/// This method assumes that (block with number `resolved_block_number` is present in DB)
-/// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB)
-#[allow(clippy::too_many_arguments)]
-#[tracing::instrument(skip_all)]
-pub(crate) async fn execute_tx_in_sandbox(
-    vm_permit: VmPermit,
-    shared_args: TxSharedArgs,
-    // If `true`, then the batch's L1/pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <=
-    // to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the
-    // current L1 prices for gas or pubdata.
-    adjust_pubdata_price: bool,
-    execution_args: TxExecutionArgs,
-    connection_pool: ConnectionPool,
-    tx: Transaction,
-    block_args: BlockArgs,
-    custom_tracers: Vec<ApiTracer>,
-) -> (VmExecutionResultAndLogs, TransactionExecutionMetrics, bool) {
-    let total_factory_deps = tx
-        .execute
-        .factory_deps
-        .as_ref()
-        .map_or(0, |deps| deps.len() as u16);
-
-    let (published_bytecodes, execution_result) = tokio::task::spawn_blocking(move || {
-        let span = span!(Level::DEBUG, "execute_in_sandbox").entered();
-        let result = apply::apply_vm_in_sandbox(
-            vm_permit,
-            shared_args,
-            adjust_pubdata_price,
-            &execution_args,
-            &connection_pool,
-            tx,
-            block_args,
-            |vm, tx| {
-                let storage_invocation_tracer =
-                    StorageInvocations::new(execution_args.missed_storage_invocation_limit);
-                let custom_tracers: Vec<_> = custom_tracers
-                    .into_iter()
-                    .map(|tracer| tracer.into_boxed())
-                    .chain(vec![storage_invocation_tracer.into_tracer_pointer()])
-                    .collect();
-                vm.inspect_transaction_with_bytecode_compression(custom_tracers.into(), tx, true)
-            },
-        );
-        span.exit();
-        result
-    })
-    .await
-    .unwrap();
-
-    let tx_execution_metrics =
-        vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result);
-    (
-        execution_result,
-        tx_execution_metrics,
-        published_bytecodes.is_ok(),
-    )
+        vm_result
+    }
 }
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs
index 900bb28886f1..95c6793d0662 100644
--- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs
@@ -1,16 +1,19 @@
 use std::{sync::Arc, time::Duration};
 
+use anyhow::Context;
 use tokio::runtime::Handle;
 use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor};
 use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView};
 use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD;
-use zksync_types::{api, fee_model::BatchFeeInput, AccountTreeId, L2ChainId, MiniblockNumber};
+use zksync_types::{
+    api, fee_model::BatchFeeInput, AccountTreeId, L1BatchNumber, L2ChainId, MiniblockNumber,
+};
 use zksync_utils::bytecode::{compress_bytecode, hash_bytecode};
 
 use self::vm_metrics::SandboxStage;
 pub(super) use self::{
     error::SandboxExecutionError,
-    execute::{execute_tx_eth_call, execute_tx_in_sandbox, TxExecutionArgs},
+    execute::{TransactionExecutor, TxExecutionArgs},
     tracers::ApiTracer,
     vm_metrics::{SubmitTxStage, SANDBOX_METRICS},
 };
@@ -20,6 +23,10 @@ use super::tx_sender::MultiVMBaseSystemContracts;
 mod apply;
 mod error;
 mod execute;
+#[cfg(test)]
+pub(super) mod testonly;
+#[cfg(test)]
+mod tests;
 mod tracers;
 mod validate;
 mod vm_metrics;
@@ -208,6 +215,62 @@ pub(crate) struct TxSharedArgs {
     pub chain_id: L2ChainId,
 }
 
+/// Information about first L1 batch / miniblock in the node storage.
+#[derive(Debug, Clone, Copy)]
+pub(crate) struct BlockStartInfo {
+    /// Projected number of the first locally available miniblock. This miniblock is **not**
+    /// guaranteed to be present in the storage!
+    pub first_miniblock: MiniblockNumber,
+    /// Projected number of the first locally available L1 batch. This L1 batch is **not**
+    /// guaranteed to be present in the storage!
+    pub first_l1_batch: L1BatchNumber,
+}
+
+impl BlockStartInfo {
+    pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result<Self> {
+        let snapshot_recovery = storage
+            .snapshot_recovery_dal()
+            .get_applied_snapshot_status()
+            .await
+            .context("failed getting snapshot recovery status")?;
+        let snapshot_recovery = snapshot_recovery.as_ref();
+        Ok(Self {
+            first_miniblock: snapshot_recovery
+                .map_or(MiniblockNumber(0), |recovery| recovery.miniblock_number + 1),
+            first_l1_batch: snapshot_recovery
+                .map_or(L1BatchNumber(0), |recovery| recovery.l1_batch_number + 1),
+        })
+    }
+
+    /// Checks whether a block with the specified ID is pruned and returns an error if it is.
+    /// The `Err` variant wraps the first non-pruned miniblock.
+    pub fn ensure_not_pruned_block(&self, block: api::BlockId) -> Result<(), MiniblockNumber> {
+        match block {
+            api::BlockId::Number(api::BlockNumber::Number(number))
+                if number < self.first_miniblock.0.into() =>
+            {
+                Err(self.first_miniblock)
+            }
+            api::BlockId::Number(api::BlockNumber::Earliest)
+                if self.first_miniblock > MiniblockNumber(0) =>
+            {
+                Err(self.first_miniblock)
+            }
+            _ => Ok(()),
+        }
+    }
+}
+
+#[derive(Debug, thiserror::Error)]
+pub(crate) enum BlockArgsError {
+    #[error("Block is pruned; first retained block is {0}")]
+    Pruned(MiniblockNumber),
+    #[error("Block is missing, but can appear in the future")]
+    Missing,
+    #[error("Database error")]
+    Database(#[from] SqlxError),
+}
+
 /// Information about a block provided to VM.
 #[derive(Debug, Clone, Copy)]
 pub(crate) struct BlockArgs {
@@ -230,9 +293,17 @@ impl BlockArgs {
     pub async fn new(
         connection: &mut StorageProcessor<'_>,
         block_id: api::BlockId,
-    ) -> Result<Option<Self>, SqlxError> {
+        start_info: BlockStartInfo,
+    ) -> Result<Self, BlockArgsError> {
+        // We need to check that `block_id` is present in Postgres or can be present in the future
+        // (i.e., it does not refer to a pruned block). If called for a pruned block, the returned value
+        // (specifically, `l1_batch_timestamp_s`) will be nonsensical.
+        start_info
+            .ensure_not_pruned_block(block_id)
+            .map_err(BlockArgsError::Pruned)?;
+
         if block_id == api::BlockId::Number(api::BlockNumber::Pending) {
-            return Ok(Some(BlockArgs::pending(connection).await));
+            return Ok(BlockArgs::pending(connection).await);
         }
 
         let resolved_block_number = connection
@@ -240,27 +311,27 @@ impl BlockArgs {
             .resolve_block_id(block_id)
             .await?;
         let Some(resolved_block_number) = resolved_block_number else {
-            return Ok(None);
+            return Err(BlockArgsError::Missing);
         };
 
         let l1_batch_number = connection
             .storage_web3_dal()
             .resolve_l1_batch_number_of_miniblock(resolved_block_number)
-            .await?
-            .expected_l1_batch();
+            .await?;
         let l1_batch_timestamp_s = connection
             .blocks_web3_dal()
-            .get_expected_l1_batch_timestamp(l1_batch_number)
+            .get_expected_l1_batch_timestamp(&l1_batch_number)
             .await?;
-        assert!(
-            l1_batch_timestamp_s.is_some(),
-            "Missing batch timestamp for non-pending block"
-        );
-        Ok(Some(Self {
+        if l1_batch_timestamp_s.is_none() {
+            // Can happen after snapshot recovery if no miniblocks are persisted yet. In this case,
+            // we cannot proceed; the issue will be resolved shortly.
+            return Err(BlockArgsError::Missing);
+        }
+        Ok(Self {
             block_id,
             resolved_block_number,
             l1_batch_timestamp_s,
-        }))
+        })
     }
 
     pub fn resolved_block_number(&self) -> MiniblockNumber {
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/testonly.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/testonly.rs
new file mode 100644
index 000000000000..c9780c42e044
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/testonly.rs
@@ -0,0 +1,74 @@
+use std::collections::HashMap;
+
+use multivm::{
+    interface::{ExecutionResult, VmExecutionResultAndLogs},
+    tracers::validator::ValidationError,
+};
+use zksync_types::{
+    fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Transaction, H256,
+};
+
+use super::TransactionExecutor;
+
+type MockExecutionOutput = (VmExecutionResultAndLogs, TransactionExecutionMetrics, bool);
+
+#[derive(Debug, Default)]
+pub(crate) struct MockTransactionExecutor {
+    call_responses: HashMap<Vec<u8>, MockExecutionOutput>,
+    tx_responses: HashMap<H256, MockExecutionOutput>,
+}
+
+impl MockTransactionExecutor {
+    pub fn insert_call_response(&mut self, calldata: Vec<u8>, result: ExecutionResult) {
+        let result = VmExecutionResultAndLogs {
+            result,
+            logs: Default::default(),
+            statistics: Default::default(),
+            refunds: Default::default(),
+        };
+        let output = (result, TransactionExecutionMetrics::default(), true);
+        self.call_responses.insert(calldata, output);
+    }
+
+    pub fn insert_tx_response(&mut self, tx_hash: H256, result: ExecutionResult) {
+        let result = VmExecutionResultAndLogs {
+            result,
+            logs: Default::default(),
+            statistics: Default::default(),
+            refunds: Default::default(),
+        };
+        let output = (result, TransactionExecutionMetrics::default(), true);
+        self.tx_responses.insert(tx_hash, output);
+    }
+
+    pub fn validate_tx(&self, tx: &L2Tx) -> Result<(), ValidationError> {
+        self.tx_responses
+            .get(&tx.hash())
+            .unwrap_or_else(|| panic!("Validating unexpected transaction: {tx:?}"));
+        Ok(())
+    }
+
+    pub fn execute_tx(&self, tx: &Transaction) -> MockExecutionOutput {
+        if let ExecuteTransactionCommon::L2(data) = &tx.common_data {
+            if data.input.is_none() {
+                // `Transaction` was obtained from a `CallRequest`
+                return self
+                    .call_responses
+                    .get(tx.execute.calldata())
+                    .unwrap_or_else(|| panic!("Executing unexpected call: {tx:?}"))
+                    .clone();
+            }
+        }
+
+        self.tx_responses
+            .get(&tx.hash())
+            .unwrap_or_else(|| panic!("Executing unexpected transaction: {tx:?}"))
+            .clone()
+    }
+}
+
+impl From<MockTransactionExecutor> for TransactionExecutor {
+    fn from(executor: MockTransactionExecutor) -> Self {
+        Self::Mock(executor)
+    }
+}
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs
new file mode 100644
index 000000000000..d81b4b940454
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs
@@ -0,0 +1,155 @@
+//! Tests for the VM execution sandbox.
+
+use assert_matches::assert_matches;
+
+use super::*;
+use crate::{
+    genesis::{ensure_genesis_state, GenesisParams},
+    utils::testonly::{create_miniblock, prepare_empty_recovery_snapshot},
+};
+
+#[tokio::test]
+async fn creating_block_args() {
+    let pool = ConnectionPool::test_pool().await;
+    let mut storage = pool.access_storage().await.unwrap();
+    ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock())
+        .await
+        .unwrap();
+    let miniblock = create_miniblock(1);
+    storage
+        .blocks_dal()
+        .insert_miniblock(&miniblock)
+        .await
+        .unwrap();
+
+    let pending_block_args = BlockArgs::pending(&mut storage).await;
+    assert_eq!(
+        pending_block_args.block_id,
+        api::BlockId::Number(api::BlockNumber::Pending)
+    );
+    assert_eq!(pending_block_args.resolved_block_number, MiniblockNumber(2));
+    assert_eq!(pending_block_args.l1_batch_timestamp_s, None);
+
+    let start_info = BlockStartInfo::new(&mut storage).await.unwrap();
+    assert_eq!(start_info.first_miniblock, MiniblockNumber(0));
+    assert_eq!(start_info.first_l1_batch, L1BatchNumber(0));
+
+    let latest_block = api::BlockId::Number(api::BlockNumber::Latest);
+    let latest_block_args = BlockArgs::new(&mut storage, latest_block, start_info)
+        .await
+        .unwrap();
+    assert_eq!(latest_block_args.block_id, latest_block);
+    assert_eq!(latest_block_args.resolved_block_number, MiniblockNumber(1));
+    assert_eq!(
+        latest_block_args.l1_batch_timestamp_s,
+        Some(miniblock.timestamp)
+    );
+
+    let earliest_block = api::BlockId::Number(api::BlockNumber::Earliest);
+    let earliest_block_args = BlockArgs::new(&mut storage, earliest_block, start_info)
+        .await
+        .unwrap();
+    assert_eq!(earliest_block_args.block_id, earliest_block);
+    assert_eq!(
+        earliest_block_args.resolved_block_number,
+        MiniblockNumber(0)
+    );
+    assert_eq!(earliest_block_args.l1_batch_timestamp_s, Some(0));
+
+    let missing_block = api::BlockId::Number(100.into());
+    let err = BlockArgs::new(&mut storage, missing_block, start_info)
+        .await
+        .unwrap_err();
+    assert_matches!(err, BlockArgsError::Missing);
+}
+
+#[tokio::test]
+async fn creating_block_args_after_snapshot_recovery() {
+    let pool = ConnectionPool::test_pool().await;
+    let mut storage = pool.access_storage().await.unwrap();
+    let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await;
+
+    let pending_block_args = BlockArgs::pending(&mut storage).await;
+    assert_eq!(
+        pending_block_args.block_id,
+        api::BlockId::Number(api::BlockNumber::Pending)
+    );
+    assert_eq!(
+        pending_block_args.resolved_block_number,
+        snapshot_recovery.miniblock_number + 1
+    );
+    assert_eq!(pending_block_args.l1_batch_timestamp_s, None);
+
+    let start_info = BlockStartInfo::new(&mut storage).await.unwrap();
+    assert_eq!(
+        start_info.first_miniblock,
+        snapshot_recovery.miniblock_number + 1
+    );
+    assert_eq!(
+        start_info.first_l1_batch,
+        snapshot_recovery.l1_batch_number + 1
+    );
+
+    let latest_block = api::BlockId::Number(api::BlockNumber::Latest);
+    let err = BlockArgs::new(&mut storage, latest_block, start_info)
+        .await
+        .unwrap_err();
+    assert_matches!(err, BlockArgsError::Missing);
+
+    let pruned_blocks = [
+        api::BlockNumber::Earliest,
+        0.into(),
+        snapshot_recovery.miniblock_number.0.into(),
+    ];
+    for pruned_block in pruned_blocks {
+        let pruned_block = api::BlockId::Number(pruned_block);
+        let err = BlockArgs::new(&mut storage, pruned_block, start_info)
+            .await
+            .unwrap_err();
+        assert_matches!(err, BlockArgsError::Pruned(_));
+    }
+
+    let missing_blocks = [
+        api::BlockNumber::from(snapshot_recovery.miniblock_number.0 + 2),
+        100.into(),
+    ];
+    for missing_block in missing_blocks {
+        let missing_block = api::BlockId::Number(missing_block);
+        let err = BlockArgs::new(&mut storage, missing_block, start_info)
+            .await
+            .unwrap_err();
+        assert_matches!(err, BlockArgsError::Missing);
+    }
+
+    let miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1);
+    storage
+        .blocks_dal()
+        .insert_miniblock(&miniblock)
+        .await
+        .unwrap();
+
+    let latest_block_args = BlockArgs::new(&mut storage, latest_block, start_info)
+        .await
+        .unwrap();
+    assert_eq!(latest_block_args.block_id, latest_block);
+    assert_eq!(latest_block_args.resolved_block_number, miniblock.number);
+    assert_eq!(
+        latest_block_args.l1_batch_timestamp_s,
+        Some(miniblock.timestamp)
+    );
+
+    for pruned_block in pruned_blocks {
+        let pruned_block = api::BlockId::Number(pruned_block);
+        let err = BlockArgs::new(&mut storage, pruned_block, start_info)
+            .await
+            .unwrap_err();
+        assert_matches!(err, BlockArgsError::Pruned(_));
+    }
+    for missing_block in missing_blocks {
+        let missing_block = api::BlockId::Number(missing_block);
+        let err = BlockArgs::new(&mut storage, missing_block, start_info)
+            .await
+            .unwrap_err();
+        assert_matches!(err, BlockArgsError::Missing);
+    }
+}
diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs
index f2ad317800b5..419e9804f887 100644
--- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs
+++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs
@@ -14,19 +14,26 @@ use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_S
 
 use super::{
     apply,
+    execute::TransactionExecutor,
     vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS},
     BlockArgs, TxExecutionArgs, TxSharedArgs, VmPermit,
 };
 
-impl TxSharedArgs {
+impl TransactionExecutor {
     pub(crate) async fn validate_tx_in_sandbox(
-        self,
+        &self,
         connection_pool: ConnectionPool,
         vm_permit: VmPermit,
         tx: L2Tx,
+        shared_args: TxSharedArgs,
         block_args: BlockArgs,
         computational_gas_limit: u32,
     ) -> Result<(), ValidationError> {
+        #[cfg(test)]
+        if let Self::Mock(mock) = self {
+            return mock.validate_tx(&tx);
+        }
+
         let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start();
         let mut connection = connection_pool.access_storage_tagged("api").await.unwrap();
         let validation_params =
@@ -40,7 +47,7 @@ impl TxSharedArgs {
             let span = tracing::debug_span!("validate_in_sandbox").entered();
             let result = apply::apply_vm_in_sandbox(
                 vm_permit,
-                self,
+                shared_args,
                 true,
                 &execution_args,
                 &connection_pool,
diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs
index efefb021bb2f..455ba262d471 100644
--- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs
+++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs
@@ -2,6 +2,7 @@
 
 use std::{cmp, sync::Arc, time::Instant};
 
+use anyhow::Context as _;
 use multivm::{
     interface::VmExecutionResultAndLogs,
     utils::{adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead},
@@ -17,19 +18,19 @@ use zksync_types::{
     get_code_key, get_intrinsic_constants,
     l2::{error::TxCheckError::TxDuplication, L2Tx},
     utils::storage_key_for_eth_balance,
-    AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature,
-    ProtocolVersionId, Transaction, VmVersion, H160, H256, MAX_GAS_PER_PUBDATA_BYTE,
-    MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256,
+    AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, MiniblockNumber, Nonce,
+    PackedEthSignature, ProtocolVersionId, Transaction, VmVersion, H160, H256,
+    MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256,
 };
 use zksync_utils::h256_to_u256;
 
 pub(super) use self::{proxy::TxProxy, result::SubmitTxError};
-use super::execution_sandbox::execute_tx_in_sandbox;
 use crate::{
     api_server::{
         execution_sandbox::{
-            execute_tx_eth_call, get_pubdata_for_factory_deps, BlockArgs, SubmitTxStage,
-            TxExecutionArgs, TxSharedArgs, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS,
+            get_pubdata_for_factory_deps, BlockArgs, BlockStartInfo, SubmitTxStage,
+            TransactionExecutor, TxExecutionArgs, TxSharedArgs, VmConcurrencyLimiter, VmPermit,
+            SANDBOX_METRICS,
         },
         tx_sender::result::ApiCallResult,
     },
@@ -40,6 +41,8 @@ use crate::{
 
 mod proxy;
 mod result;
+#[cfg(test)]
+pub(crate) mod tests;
 
 #[derive(Debug, Clone)]
 pub struct MultiVMBaseSystemContracts {
@@ -190,6 +193,7 @@ impl TxSenderBuilder {
             vm_concurrency_limiter,
             storage_caches,
             sealer,
+            executor: TransactionExecutor::Real,
         }))
     }
 }
@@ -245,6 +249,7 @@ pub struct TxSenderInner {
     storage_caches: PostgresStorageCaches,
     /// Batch sealer used to check whether transaction can be executed by the sequencer.
     sealer: Arc<dyn ConditionalSealer>,
+    pub(super) executor: TransactionExecutor,
 }
 
 #[derive(Clone)]
@@ -265,6 +270,7 @@ impl TxSender {
         self.0.storage_caches.clone()
     }
 
+    // TODO (PLA-725): propagate DB errors instead of panicking
     #[tracing::instrument(skip(self, tx))]
     pub async fn submit_tx(&self, tx: L2Tx) -> Result<L2TxSubmissionResult, SubmitTxError> {
         let stage_latency = SANDBOX_METRICS.submit_tx[&SubmitTxStage::Validate].start();
@@ -284,17 +290,20 @@ impl TxSender {
         let block_args = BlockArgs::pending(&mut connection).await;
         drop(connection);
 
-        let (_, tx_metrics, published_bytecodes) = execute_tx_in_sandbox(
-            vm_permit.clone(),
-            shared_args.clone(),
-            true,
-            TxExecutionArgs::for_validation(&tx),
-            self.0.replica_connection_pool.clone(),
-            tx.clone().into(),
-            block_args,
-            vec![],
-        )
-        .await;
+        let (_, tx_metrics, published_bytecodes) = self
+            .0
+            .executor
+            .execute_tx_in_sandbox(
+                vm_permit.clone(),
+                shared_args.clone(),
+                true,
+                TxExecutionArgs::for_validation(&tx),
+                self.0.replica_connection_pool.clone(),
+                tx.clone().into(),
+                block_args,
+                vec![],
+            )
+            .await;
 
         tracing::info!(
             "Submit tx {:?} with execution metrics {:?}",
@@ -305,11 +314,14 @@ impl TxSender {
 
         let stage_latency = SANDBOX_METRICS.submit_tx[&SubmitTxStage::VerifyExecute].start();
         let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit;
-        let validation_result = shared_args
+        let validation_result = self
+            .0
+            .executor
             .validate_tx_in_sandbox(
                 self.0.replica_connection_pool.clone(),
                 vm_permit,
                 tx.clone(),
+                shared_args,
                 block_args,
                 computational_gas_limit,
             )
@@ -349,7 +361,7 @@ impl TxSender {
 
         let nonce = tx.common_data.nonce.0;
         let hash = tx.hash();
-        let expected_nonce = self.get_expected_nonce(&tx).await;
+        let initiator_account = tx.initiator_account();
         let submission_res_handle = self
             .0
             .master_connection_pool
@@ -365,11 +377,15 @@ impl TxSender {
         APP_METRICS.processed_txs[&TxStage::Mempool(submission_res_handle)].inc();
 
         match submission_res_handle {
-            L2TxSubmissionResult::AlreadyExecuted => Err(SubmitTxError::NonceIsTooLow(
-                expected_nonce.0,
-                expected_nonce.0 + self.0.sender_config.max_nonce_ahead,
-                nonce,
-            )),
+            L2TxSubmissionResult::AlreadyExecuted => {
+                let Nonce(expected_nonce) =
+                    self.get_expected_nonce(initiator_account).await.unwrap();
+                Err(SubmitTxError::NonceIsTooLow(
+                    expected_nonce,
+                    expected_nonce + self.0.sender_config.max_nonce_ahead,
+                    nonce,
+                ))
+            }
             L2TxSubmissionResult::Duplicate => Err(SubmitTxError::IncorrectTx(TxDuplication(hash))),
             _ => {
                 SANDBOX_METRICS.submit_tx[&SubmitTxStage::DbInsert]
@@ -454,19 +470,22 @@ impl TxSender {
     }
 
     async fn validate_account_nonce(&self, tx: &L2Tx) -> Result<(), SubmitTxError> {
-        let expected_nonce = self.get_expected_nonce(tx).await;
+        let Nonce(expected_nonce) = self
+            .get_expected_nonce(tx.initiator_account())
+            .await
+            .unwrap();
 
-        if tx.common_data.nonce.0 < expected_nonce.0 {
+        if tx.common_data.nonce.0 < expected_nonce {
             Err(SubmitTxError::NonceIsTooLow(
-                expected_nonce.0,
-                expected_nonce.0 + self.0.sender_config.max_nonce_ahead,
+                expected_nonce,
+                expected_nonce + self.0.sender_config.max_nonce_ahead,
                 tx.nonce().0,
             ))
         } else {
-            let max_nonce = expected_nonce.0 + self.0.sender_config.max_nonce_ahead;
-            if !(expected_nonce.0..=max_nonce).contains(&tx.common_data.nonce.0) {
+            let max_nonce = expected_nonce + self.0.sender_config.max_nonce_ahead;
+            if !(expected_nonce..=max_nonce).contains(&tx.common_data.nonce.0) {
                 Err(SubmitTxError::NonceIsTooHigh(
-                    expected_nonce.0,
+                    expected_nonce,
                     max_nonce,
                     tx.nonce().0,
                 ))
@@ -476,25 +495,37 @@ impl TxSender {
         }
     }
 
-    async fn get_expected_nonce(&self, tx: &L2Tx) -> Nonce {
-        let mut connection = self
+    async fn get_expected_nonce(&self, initiator_account: Address) -> anyhow::Result<Nonce> {
+        let mut storage = self
             .0
             .replica_connection_pool
             .access_storage_tagged("api")
-            .await
-            .unwrap();
+            .await?;
 
-        let latest_block_number = connection
-            .blocks_web3_dal()
+        let latest_block_number = storage
+            .blocks_dal()
             .get_sealed_miniblock_number()
             .await
-            .unwrap();
-        let nonce = connection
+            .context("failed getting sealed miniblock number")?;
+        let latest_block_number = match latest_block_number {
+            Some(number) => number,
+            None => {
+                // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead.
+                let start = BlockStartInfo::new(&mut storage).await?;
+                MiniblockNumber(start.first_miniblock.saturating_sub(1))
+            }
+        };
+
+        let nonce = storage
             .storage_web3_dal()
-            .get_address_historical_nonce(tx.initiator_account(), latest_block_number)
+            .get_address_historical_nonce(initiator_account, latest_block_number)
             .await
-            .unwrap();
-        Nonce(nonce.as_u32())
+            .with_context(|| {
+                format!("failed getting nonce for address {initiator_account:?} at miniblock #{latest_block_number}")
+            })?;
+        let nonce = u32::try_from(nonce)
+            .map_err(|err| anyhow::anyhow!("failed converting nonce to u32: {err}"))?;
+        Ok(Nonce(nonce))
     }
 
     async fn validate_enough_balance(&self, tx: &L2Tx) -> Result<(), SubmitTxError> {
@@ -592,17 +623,20 @@ impl TxSender {
         let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit;
         let execution_args =
             TxExecutionArgs::for_gas_estimate(vm_execution_cache_misses_limit, &tx, base_fee);
-        let (exec_result, tx_metrics, _) = execute_tx_in_sandbox(
-            vm_permit,
-            shared_args,
-            true,
-            execution_args,
-            self.0.replica_connection_pool.clone(),
-            tx.clone(),
-            block_args,
-            vec![],
-        )
-        .await;
+        let (exec_result, tx_metrics, _) = self
+            .0
+            .executor
+            .execute_tx_in_sandbox(
+                vm_permit,
+                shared_args,
+                true,
+                execution_args,
+                self.0.replica_connection_pool.clone(),
+                tx.clone(),
+                block_args,
+                vec![],
+            )
+            .await;
 
         (exec_result, tx_metrics)
     }
@@ -849,17 +883,19 @@ impl TxSender {
         let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?;
 
         let vm_execution_cache_misses_limit = self.0.sender_config.vm_execution_cache_misses_limit;
-        execute_tx_eth_call(
-            vm_permit,
-            self.shared_args(),
-            self.0.replica_connection_pool.clone(),
-            tx,
-            block_args,
-            vm_execution_cache_misses_limit,
-            vec![],
-        )
-        .await
-        .into_api_call_result()
+        self.0
+            .executor
+            .execute_tx_eth_call(
+                vm_permit,
+                self.shared_args(),
+                self.0.replica_connection_pool.clone(),
+                tx,
+                block_args,
+                vm_execution_cache_misses_limit,
+                vec![],
+            )
+            .await
+            .into_api_call_result()
     }
 
     pub async fn gas_price(&self) -> u64 {
diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs
new file mode 100644
index 000000000000..55c6852cd4ab
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs
@@ -0,0 +1,139 @@
+//! Tests for the transaction sender.
+
+use zksync_types::{get_nonce_key, StorageLog};
+
+use super::*;
+use crate::{
+    api_server::execution_sandbox::{testonly::MockTransactionExecutor, VmConcurrencyBarrier},
+    genesis::{ensure_genesis_state, GenesisParams},
+    utils::testonly::{create_miniblock, prepare_recovery_snapshot, MockL1GasPriceProvider},
+};
+
+pub(crate) async fn create_test_tx_sender(
+    pool: ConnectionPool,
+    l2_chain_id: L2ChainId,
+    tx_executor: TransactionExecutor,
+) -> (TxSender, VmConcurrencyBarrier) {
+    let web3_config = Web3JsonRpcConfig::for_tests();
+    let state_keeper_config = StateKeeperConfig::for_tests();
+    let tx_sender_config = TxSenderConfig::new(&state_keeper_config, &web3_config, l2_chain_id);
+
+    let mut storage_caches = PostgresStorageCaches::new(1, 1);
+    let cache_update_task = storage_caches.configure_storage_values_cache(
+        1,
+        pool.clone(),
+        tokio::runtime::Handle::current(),
+    );
+    tokio::task::spawn_blocking(cache_update_task);
+
+    let gas_adjuster = Arc::new(MockL1GasPriceProvider(1));
+    let (mut tx_sender, vm_barrier) = crate::build_tx_sender(
+        &tx_sender_config,
+        &web3_config,
+        &state_keeper_config,
+        pool.clone(),
+        pool,
+        gas_adjuster,
+        storage_caches,
+    )
+    .await;
+
+    Arc::get_mut(&mut tx_sender.0).unwrap().executor = tx_executor;
+    (tx_sender, vm_barrier)
+}
+
+#[tokio::test]
+async fn getting_nonce_for_account() {
+    let l2_chain_id = L2ChainId::default();
+    let test_address = Address::repeat_byte(1);
+    let pool = ConnectionPool::test_pool().await;
+    let mut storage = pool.access_storage().await.unwrap();
+    ensure_genesis_state(&mut storage, l2_chain_id, &GenesisParams::mock())
+        .await
+        .unwrap();
+    // Manually insert a nonce for the address.
+    let nonce_key = get_nonce_key(&test_address);
+    let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123));
+    storage
+        .storage_logs_dal()
+        .append_storage_logs(MiniblockNumber(0), &[(H256::default(), vec![nonce_log])])
+        .await;
+
+    let tx_executor = MockTransactionExecutor::default().into();
+    let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await;
+
+    let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap();
+    assert_eq!(nonce, Nonce(123));
+
+    // Insert another miniblock with a new nonce log.
+    storage
+        .blocks_dal()
+        .insert_miniblock(&create_miniblock(1))
+        .await
+        .unwrap();
+    let nonce_log = StorageLog {
+        value: H256::from_low_u64_be(321),
+        ..nonce_log
+    };
+    storage
+        .storage_logs_dal()
+        .insert_storage_logs(MiniblockNumber(1), &[(H256::default(), vec![nonce_log])])
+        .await;
+
+    let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap();
+    assert_eq!(nonce, Nonce(321));
+    let missing_address = Address::repeat_byte(0xff);
+    let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap();
+    assert_eq!(nonce, Nonce(0));
+}
+
+#[tokio::test]
+async fn getting_nonce_for_account_after_snapshot_recovery() {
+    const SNAPSHOT_MINIBLOCK_NUMBER: u32 = 42;
+
+    let pool = ConnectionPool::test_pool().await;
+    let mut storage = pool.access_storage().await.unwrap();
+    let test_address = Address::repeat_byte(1);
+    let other_address = Address::repeat_byte(2);
+    let nonce_logs = [
+        StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)),
+        StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)),
+    ];
+    prepare_recovery_snapshot(&mut storage, SNAPSHOT_MINIBLOCK_NUMBER, &nonce_logs).await;
+
+    let l2_chain_id = L2ChainId::default();
+    let tx_executor = MockTransactionExecutor::default().into();
+    let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await;
+
+    let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap();
+    assert_eq!(nonce, Nonce(123));
+    let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap();
+    assert_eq!(nonce, Nonce(25));
+    let missing_address = Address::repeat_byte(0xff);
+    let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap();
+    assert_eq!(nonce, Nonce(0));
+
+    storage
+        .blocks_dal()
+        .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER + 1))
+        .await
+        .unwrap();
+    let new_nonce_logs = vec![StorageLog::new_write_log(
+        get_nonce_key(&test_address),
+        H256::from_low_u64_be(321),
+    )];
+    storage
+        .storage_logs_dal()
+        .insert_storage_logs(
+            MiniblockNumber(SNAPSHOT_MINIBLOCK_NUMBER + 1),
+            &[(H256::default(), new_nonce_logs)],
+        )
+        .await;
+
+    let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap();
+    assert_eq!(nonce, Nonce(321));
+    let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap();
+    assert_eq!(nonce, Nonce(25));
+    let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap();
+    assert_eq!(nonce, Nonce(0));
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs
index 51f06345da19..c8fbc726e2f0 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs
@@ -2,7 +2,7 @@
 //! Consists mostly of boilerplate code implementing the `jsonrpsee` server traits for the corresponding
 //! namespace structures defined in `zksync_core`.
 
-use std::{error::Error, fmt};
+use std::fmt;
 
 use zksync_web3_decl::{
     error::Web3Error,
@@ -14,15 +14,13 @@ use crate::api_server::web3::metrics::API_METRICS;
 pub mod batch_limiter_middleware;
 pub mod namespaces;
 
-pub fn from_std_error(e: impl Error) -> ErrorObjectOwned {
-    ErrorObjectOwned::owned(ErrorCode::InternalError.code(), e.to_string(), Some(()))
-}
-
 pub fn into_jsrpc_error(err: Web3Error) -> ErrorObjectOwned {
     ErrorObjectOwned::owned(
         match err {
             Web3Error::InternalError | Web3Error::NotImplemented => ErrorCode::InternalError.code(),
             Web3Error::NoBlock
+            | Web3Error::PrunedBlock(_)
+            | Web3Error::PrunedL1Batch(_)
             | Web3Error::NoSuchFunction
             | Web3Error::RLPError(_)
             | Web3Error::InvalidTransactionData(_)
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs
index 0bd61bbbc3d2..9f1e00a6c80a 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/debug.rs
@@ -21,6 +21,7 @@ impl DebugNamespaceServer for DebugNamespace {
             .await
             .map_err(into_jsrpc_error)
     }
+
     async fn trace_block_by_hash(
         &self,
         hash: H256,
@@ -30,6 +31,7 @@ impl DebugNamespaceServer for DebugNamespace {
             .await
             .map_err(into_jsrpc_error)
     }
+
     async fn trace_call(
         &self,
         request: CallRequest,
@@ -40,11 +42,14 @@ impl DebugNamespaceServer for DebugNamespace {
             .await
             .map_err(into_jsrpc_error)
     }
+
     async fn trace_transaction(
         &self,
         tx_hash: H256,
         options: Option<TracerConfig>,
     ) -> RpcResult<Option<DebugCall>> {
-        Ok(self.debug_trace_transaction_impl(tx_hash, options).await)
+        self.debug_trace_transaction_impl(tx_hash, options)
+            .await
+            .map_err(into_jsrpc_error)
     }
 }
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs
index e7500968f42e..8ba611b14073 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs
@@ -136,7 +136,9 @@ impl ZksNamespaceServer for ZksNamespace {
     }
 
     async fn get_bytecode_by_hash(&self, hash: H256) -> RpcResult<Option<Vec<u8>>> {
-        Ok(self.get_bytecode_by_hash_impl(hash).await)
+        self.get_bytecode_by_hash_impl(hash)
+            .await
+            .map_err(into_jsrpc_error)
     }
 
     async fn get_l1_gas_price(&self) -> RpcResult<U64> {
@@ -151,7 +153,9 @@ impl ZksNamespaceServer for ZksNamespace {
         &self,
         version_id: Option<u16>,
     ) -> RpcResult<Option<ProtocolVersion>> {
-        Ok(self.get_protocol_version_impl(version_id).await)
+        self.get_protocol_version_impl(version_id)
+            .await
+            .map_err(into_jsrpc_error)
     }
 
     async fn get_proof(
diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index 06a4b0d545f5..0f88382a1d18 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -9,11 +9,10 @@ use tokio::{
     task::JoinHandle,
 };
 use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer};
-use zksync_dal::{ConnectionPool, StorageProcessor};
+use zksync_dal::ConnectionPool;
 use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck};
-use zksync_types::{api, MiniblockNumber};
+use zksync_types::MiniblockNumber;
 use zksync_web3_decl::{
-    error::Web3Error,
     jsonrpsee::{
         server::{BatchRequestConfig, RpcServiceBuilder, ServerBuilder},
         RpcModule,
@@ -26,7 +25,6 @@ use zksync_web3_decl::{
 };
 
 use self::{
-    backend_jsonrpsee::internal_error,
     metrics::API_METRICS,
     namespaces::{
         DebugNamespace, EnNamespace, EthNamespace, NetNamespace, SnapshotsNamespace, Web3Namespace,
@@ -37,7 +35,9 @@ use self::{
 };
 use crate::{
     api_server::{
-        execution_sandbox::VmConcurrencyBarrier, tree::TreeApiHttpClient, tx_sender::TxSender,
+        execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier},
+        tree::TreeApiHttpClient,
+        tx_sender::TxSender,
         web3::backend_jsonrpsee::batch_limiter_middleware::LimitMiddleware,
     },
     sync_layer::SyncState,
@@ -272,41 +272,45 @@ impl ApiBuilder {
 }
 
 impl FullApiParams {
-    fn build_rpc_state(self) -> RpcState {
-        // Chosen to be significantly smaller than the interval between miniblocks, but larger than
-        // the latency of getting the latest sealed miniblock number from Postgres. If the API server
-        // processes enough requests, information about the latest sealed miniblock will be updated
-        // by reporting block difference metrics, so the actual update lag would be much smaller than this value.
-        const SEALED_MINIBLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25);
-
-        let (last_sealed_miniblock, update_task) =
-            SealedMiniblockNumber::new(self.last_miniblock_pool, SEALED_MINIBLOCK_UPDATE_INTERVAL);
-        // The update tasks takes care of its termination, so we don't need to retain its handle.
-        tokio::spawn(update_task);
-
-        RpcState {
+    async fn build_rpc_state(
+        self,
+        last_sealed_miniblock: SealedMiniblockNumber,
+    ) -> anyhow::Result<RpcState> {
+        let mut storage = self
+            .last_miniblock_pool
+            .access_storage_tagged("api")
+            .await?;
+        let start_info = BlockStartInfo::new(&mut storage).await?;
+        drop(storage);
+
+        Ok(RpcState {
             installed_filters: Arc::new(Mutex::new(Filters::new(self.optional.filters_limit))),
             connection_pool: self.pool,
             tx_sender: self.tx_sender,
             sync_state: self.optional.sync_state,
             api_config: self.config,
+            start_info,
             last_sealed_miniblock,
             tree_api: self
                 .optional
                 .tree_api_url
                 .map(|url| TreeApiHttpClient::new(url.as_str())),
-        }
+        })
     }
 
-    async fn build_rpc_module(self, pubsub: Option<EthSubscribe>) -> RpcModule<()> {
+    async fn build_rpc_module(
+        self,
+        pub_sub: Option<EthSubscribe>,
+        last_sealed_miniblock: SealedMiniblockNumber,
+    ) -> anyhow::Result<RpcModule<()>> {
         let namespaces = self.namespaces.clone();
         let zksync_network_id = self.config.l2_chain_id;
-        let rpc_state = self.build_rpc_state();
+        let rpc_state = self.build_rpc_state(last_sealed_miniblock).await?;
 
         // Collect all the methods into a single RPC module.
         let mut rpc = RpcModule::new(());
-        if let Some(pubsub) = pubsub {
-            rpc.merge(pubsub.into_rpc())
+        if let Some(pub_sub) = pub_sub {
+            rpc.merge(pub_sub.into_rpc())
                 .expect("Can't merge eth pubsub namespace");
         }
 
@@ -338,7 +342,7 @@ impl FullApiParams {
             rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc())
                 .expect("Can't merge snapshots namespace");
         }
-        rpc
+        Ok(rpc)
     }
 
     async fn spawn_server(
@@ -389,30 +393,27 @@ impl FullApiParams {
         self,
         stop_receiver: watch::Receiver<bool>,
     ) -> anyhow::Result<ApiServerHandles> {
+        // Chosen to be significantly smaller than the interval between miniblocks, but larger than
+        // the latency of getting the latest sealed miniblock number from Postgres. If the API server
+        // processes enough requests, information about the latest sealed miniblock will be updated
+        // by reporting block difference metrics, so the actual update lag would be much smaller than this value.
+        const SEALED_MINIBLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25);
+
         let transport = self.transport;
         let health_check_name = match transport {
             ApiTransport::Http(_) => "http_api",
             ApiTransport::WebSocket(_) => "ws_api",
         };
         let (health_check, health_updater) = ReactiveHealthCheck::new(health_check_name);
-        let vm_barrier = self.vm_barrier.clone();
-        let batch_request_config = self
-            .optional
-            .batch_request_size_limit
-            .map_or(BatchRequestConfig::Unlimited, |limit| {
-                BatchRequestConfig::Limit(limit as u32)
-            });
-        let response_body_size_limit = self
-            .optional
-            .response_body_size_limit
-            .map_or(u32::MAX, |limit| limit as u32);
 
-        let websocket_requests_per_minute_limit = self.optional.websocket_requests_per_minute_limit;
-        let subscriptions_limit = self.optional.subscriptions_limit;
+        let (last_sealed_miniblock, update_task) = SealedMiniblockNumber::new(
+            self.last_miniblock_pool.clone(),
+            SEALED_MINIBLOCK_UPDATE_INTERVAL,
+            stop_receiver.clone(),
+        );
+        let mut tasks = vec![tokio::spawn(update_task)];
 
-        let mut tasks = vec![];
-        let mut pubsub = None;
-        if matches!(transport, ApiTransport::WebSocket(_))
+        let pub_sub = if matches!(transport, ApiTransport::WebSocket(_))
             && self.namespaces.contains(&Namespace::Pubsub)
         {
             let mut pub_sub = EthSubscribe::new();
@@ -425,23 +426,19 @@ impl FullApiParams {
                 self.polling_interval,
                 stop_receiver.clone(),
             ));
-            pubsub = Some(pub_sub);
-        }
+            Some(pub_sub)
+        } else {
+            None
+        };
 
-        let rpc = self.build_rpc_module(pubsub).await;
         // Start the server in a separate tokio runtime from a dedicated thread.
         let (local_addr_sender, local_addr) = oneshot::channel();
-        let server_task = tokio::spawn(Self::run_jsonrpsee_server(
-            rpc,
-            transport,
+        let server_task = tokio::spawn(self.run_jsonrpsee_server(
             stop_receiver,
+            pub_sub,
+            last_sealed_miniblock,
             local_addr_sender,
             health_updater,
-            vm_barrier,
-            batch_request_config,
-            response_body_size_limit,
-            subscriptions_limit,
-            websocket_requests_per_minute_limit,
         ));
 
         let local_addr = match local_addr.await {
@@ -463,19 +460,33 @@ impl FullApiParams {
         })
     }
 
-    #[allow(clippy::too_many_arguments)]
     async fn run_jsonrpsee_server(
-        rpc: RpcModule<()>,
-        transport: ApiTransport,
+        self,
         mut stop_receiver: watch::Receiver<bool>,
+        pub_sub: Option<EthSubscribe>,
+        last_sealed_miniblock: SealedMiniblockNumber,
         local_addr_sender: oneshot::Sender<SocketAddr>,
         health_updater: HealthUpdater,
-        vm_barrier: VmConcurrencyBarrier,
-        batch_request_config: BatchRequestConfig,
-        response_body_size_limit: u32,
-        subscriptions_limit: Option<usize>,
-        websocket_requests_per_minute_limit: Option<NonZeroU32>,
     ) -> anyhow::Result<()> {
+        let transport = self.transport;
+        let batch_request_config = self
+            .optional
+            .batch_request_size_limit
+            .map_or(BatchRequestConfig::Unlimited, |limit| {
+                BatchRequestConfig::Limit(limit as u32)
+            });
+        let response_body_size_limit = self
+            .optional
+            .response_body_size_limit
+            .map_or(u32::MAX, |limit| limit as u32);
+        let websocket_requests_per_minute_limit = self.optional.websocket_requests_per_minute_limit;
+        let subscriptions_limit = self.optional.subscriptions_limit;
+        let vm_barrier = self.vm_barrier.clone();
+
+        let rpc = self
+            .build_rpc_module(pub_sub, last_sealed_miniblock)
+            .await?;
+
         let (transport_str, is_http, addr) = match transport {
             ApiTransport::Http(addr) => ("HTTP", true, addr),
             ApiTransport::WebSocket(addr) => ("WS", false, addr),
@@ -564,14 +575,3 @@ impl FullApiParams {
         Ok(())
     }
 }
-
-async fn resolve_block(
-    connection: &mut StorageProcessor<'_>,
-    block: api::BlockId,
-    method_name: &'static str,
-) -> Result<MiniblockNumber, Web3Error> {
-    let result = connection.blocks_web3_dal().resolve_block_id(block).await;
-    result
-        .map_err(|err| internal_error(method_name, err))?
-        .ok_or(Web3Error::NoBlock)
-}
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs
index 46640d63648e..d30e3652e293 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs
@@ -2,60 +2,41 @@ use std::sync::Arc;
 
 use multivm::{interface::ExecutionResult, vm_latest::constants::BLOCK_GAS_LIMIT};
 use once_cell::sync::OnceCell;
-use zksync_dal::ConnectionPool;
-use zksync_state::PostgresStorageCaches;
 use zksync_types::{
     api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig},
     fee_model::BatchFeeInput,
     l2::L2Tx,
     transaction_request::CallRequest,
     vm_trace::Call,
-    AccountTreeId, L2ChainId, H256, USED_BOOTLOADER_MEMORY_BYTES,
+    AccountTreeId, H256, USED_BOOTLOADER_MEMORY_BYTES,
 };
 use zksync_web3_decl::error::Web3Error;
 
 use crate::api_server::{
-    execution_sandbox::{
-        execute_tx_eth_call, ApiTracer, BlockArgs, TxSharedArgs, VmConcurrencyLimiter,
-    },
-    tx_sender::ApiContracts,
-    web3::{
-        backend_jsonrpsee::internal_error,
-        metrics::API_METRICS,
-        resolve_block,
-        state::{RpcState, SealedMiniblockNumber},
-    },
+    execution_sandbox::{ApiTracer, TxSharedArgs},
+    tx_sender::{ApiContracts, TxSenderConfig},
+    web3::{backend_jsonrpsee::internal_error, metrics::API_METRICS, state::RpcState},
 };
 
 #[derive(Debug, Clone)]
 pub struct DebugNamespace {
-    connection_pool: ConnectionPool,
-    fair_l2_gas_price: u64,
+    state: RpcState,
     api_contracts: ApiContracts,
-    vm_execution_cache_misses_limit: Option<usize>,
-    vm_concurrency_limiter: Arc<VmConcurrencyLimiter>,
-    storage_caches: PostgresStorageCaches,
-    last_sealed_miniblock: SealedMiniblockNumber,
-    chain_id: L2ChainId,
 }
 
 impl DebugNamespace {
     pub async fn new(state: RpcState) -> Self {
-        let sender_config = &state.tx_sender.0.sender_config;
-
         let api_contracts = ApiContracts::load_from_disk();
         Self {
-            connection_pool: state.connection_pool,
-            fair_l2_gas_price: sender_config.fair_l2_gas_price,
+            state,
             api_contracts,
-            vm_execution_cache_misses_limit: sender_config.vm_execution_cache_misses_limit,
-            vm_concurrency_limiter: state.tx_sender.vm_concurrency_limiter(),
-            storage_caches: state.tx_sender.storage_caches(),
-            last_sealed_miniblock: state.last_sealed_miniblock,
-            chain_id: sender_config.chain_id,
         }
     }
 
+    fn sender_config(&self) -> &TxSenderConfig {
+        &self.state.tx_sender.0.sender_config
+    }
+
     #[tracing::instrument(skip(self))]
     pub async fn debug_trace_block_impl(
         &self,
@@ -69,17 +50,21 @@ impl DebugNamespace {
             .map(|options| options.tracer_config.only_top_call)
             .unwrap_or(false);
         let mut connection = self
+            .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
-        let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?;
-        let call_trace = connection
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let block_number = self
+            .state
+            .resolve_block(&mut connection, block_id, METHOD_NAME)
+            .await?;
+        let call_traces = connection
             .blocks_web3_dal()
-            .get_trace_for_miniblock(block_number)
+            .get_trace_for_miniblock(block_number) // FIXME: is some ordering among transactions expected?
             .await
-            .unwrap();
-        let call_trace = call_trace
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let call_trace = call_traces
             .into_iter()
             .map(|call_trace| {
                 let mut result: DebugCall = call_trace.into();
@@ -90,7 +75,7 @@ impl DebugNamespace {
             })
             .collect();
 
-        let block_diff = self.last_sealed_miniblock.diff(block_number);
+        let block_diff = self.state.last_sealed_miniblock.diff(block_number);
         method_latency.observe(block_diff);
         Ok(call_trace)
     }
@@ -100,25 +85,26 @@ impl DebugNamespace {
         &self,
         tx_hash: H256,
         options: Option<TracerConfig>,
-    ) -> Option<DebugCall> {
+    ) -> Result<Option<DebugCall>, Web3Error> {
+        const METHOD_NAME: &str = "debug_trace_transaction";
+
         let only_top_call = options
             .map(|options| options.tracer_config.only_top_call)
             .unwrap_or(false);
-        let call_trace = self
+        let mut connection = self
+            .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap()
-            .transactions_dal()
-            .get_call_trace(tx_hash)
-            .await;
-        call_trace.map(|call_trace| {
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let call_trace = connection.transactions_dal().get_call_trace(tx_hash).await;
+        Ok(call_trace.map(|call_trace| {
             let mut result: DebugCall = call_trace.into();
             if only_top_call {
                 result.calls = vec![];
             }
             result
-        })
+        }))
     }
 
     #[tracing::instrument(skip(self, request, block_id))]
@@ -137,20 +123,26 @@ impl DebugNamespace {
             .unwrap_or(false);
 
         let mut connection = self
+            .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
-        let block_args = BlockArgs::new(&mut connection, block_id)
-            .await
-            .map_err(|err| internal_error("debug_trace_call", err))?
-            .ok_or(Web3Error::NoBlock)?;
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let block_args = self
+            .state
+            .resolve_block_args(&mut connection, block_id, METHOD_NAME)
+            .await?;
         drop(connection);
 
         let tx = L2Tx::from_request(request.into(), USED_BOOTLOADER_MEMORY_BYTES)?;
 
         let shared_args = self.shared_args();
-        let vm_permit = self.vm_concurrency_limiter.acquire().await;
+        let vm_permit = self
+            .state
+            .tx_sender
+            .vm_concurrency_limiter()
+            .acquire()
+            .await;
         let vm_permit = vm_permit.ok_or(Web3Error::InternalError)?;
 
         // We don't need properly trace if we only need top call
@@ -161,16 +153,18 @@ impl DebugNamespace {
             vec![ApiTracer::CallTracer(call_tracer_result.clone())]
         };
 
-        let result = execute_tx_eth_call(
-            vm_permit,
-            shared_args,
-            self.connection_pool.clone(),
-            tx.clone(),
-            block_args,
-            self.vm_execution_cache_misses_limit,
-            custom_tracers,
-        )
-        .await;
+        let executor = &self.state.tx_sender.0.executor;
+        let result = executor
+            .execute_tx_eth_call(
+                vm_permit,
+                shared_args,
+                self.state.connection_pool.clone(),
+                tx.clone(),
+                block_args,
+                self.sender_config().vm_execution_cache_misses_limit,
+                custom_tracers,
+            )
+            .await;
 
         let (output, revert_reason) = match result.result {
             ExecutionResult::Success { output, .. } => (output, None),
@@ -198,19 +192,23 @@ impl DebugNamespace {
             trace,
         );
 
-        let block_diff = self.last_sealed_miniblock.diff_with_block_args(&block_args);
+        let block_diff = self
+            .state
+            .last_sealed_miniblock
+            .diff_with_block_args(&block_args);
         method_latency.observe(block_diff);
         Ok(call.into())
     }
 
     fn shared_args(&self) -> TxSharedArgs {
+        let sender_config = self.sender_config();
         TxSharedArgs {
             operator_account: AccountTreeId::default(),
-            fee_input: BatchFeeInput::l1_pegged(100_000, self.fair_l2_gas_price),
+            fee_input: BatchFeeInput::l1_pegged(100_000, sender_config.fair_l2_gas_price),
             base_system_contracts: self.api_contracts.eth_call.clone(),
-            caches: self.storage_caches.clone(),
+            caches: self.state.tx_sender.storage_caches().clone(),
             validation_computational_gas_limit: BLOCK_GAS_LIMIT,
-            chain_id: self.chain_id,
+            chain_id: sender_config.chain_id,
         }
     }
 }
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs
index 8b717e37613a..92781ae8f68a 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs
@@ -7,7 +7,7 @@ use crate::api_server::web3::{backend_jsonrpsee::internal_error, state::RpcState
 /// Main use case for it is the EN synchronization.
 #[derive(Debug)]
 pub struct EnNamespace {
-    pub state: RpcState,
+    state: RpcState,
 }
 
 impl EnNamespace {
@@ -21,12 +21,14 @@ impl EnNamespace {
         block_number: MiniblockNumber,
         include_transactions: bool,
     ) -> Result<Option<SyncBlock>, Web3Error> {
+        const METHOD_NAME: &str = "en_syncL2Block";
+
         let mut storage = self
             .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
         storage
             .sync_dal()
             .sync_block(
@@ -35,6 +37,6 @@ impl EnNamespace {
                 include_transactions,
             )
             .await
-            .map_err(|err| internal_error("en_syncL2Block", err))
+            .map_err(|err| internal_error(METHOD_NAME, err))
     }
 }
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs
index a2760b869edb..07c61ed5ac1a 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs
@@ -17,15 +17,11 @@ use zksync_web3_decl::{
     types::{Address, Block, Filter, FilterChanges, Log, U64},
 };
 
-use crate::api_server::{
-    execution_sandbox::BlockArgs,
-    web3::{
-        backend_jsonrpsee::internal_error,
-        metrics::{BlockCallObserver, API_METRICS},
-        resolve_block,
-        state::RpcState,
-        TypedFilter,
-    },
+use crate::api_server::web3::{
+    backend_jsonrpsee::internal_error,
+    metrics::{BlockCallObserver, API_METRICS},
+    state::RpcState,
+    TypedFilter,
 };
 
 pub const EVENT_TOPIC_NUMBER_LIMIT: usize = 4;
@@ -46,20 +42,21 @@ impl EthNamespace {
         const METHOD_NAME: &str = "get_block_number";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let block_number = self
+        let mut storage = self
             .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap()
-            .blocks_web3_dal()
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let block_number = storage
+            .blocks_dal()
             .get_sealed_miniblock_number()
             .await
-            .map(|n| U64::from(n.0))
-            .map_err(|err| internal_error(METHOD_NAME, err));
+            .map_err(|err| internal_error(METHOD_NAME, err))?
+            .ok_or(Web3Error::NoBlock)?;
 
         method_latency.observe();
-        block_number
+        Ok(block_number.0.into())
     }
 
     #[tracing::instrument(skip(self, request, block_id))]
@@ -77,11 +74,11 @@ impl EthNamespace {
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
-        let block_args = BlockArgs::new(&mut connection, block_id)
-            .await
-            .map_err(|err| internal_error("eth_call", err))?
-            .ok_or(Web3Error::NoBlock)?;
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let block_args = self
+            .state
+            .resolve_block_args(&mut connection, block_id, METHOD_NAME)
+            .await?;
         drop(connection);
 
         let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?;
@@ -180,8 +177,11 @@ impl EthNamespace {
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
-        let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?;
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        let block_number = self
+            .state
+            .resolve_block(&mut connection, block_id, METHOD_NAME)
+            .await?;
         let balance = connection
             .storage_web3_dal()
             .standard_token_historical_balance(
@@ -268,12 +268,13 @@ impl EthNamespace {
         };
         let method_latency = API_METRICS.start_block_call(method_name, block_id);
 
+        self.state.start_info.ensure_not_pruned(block_id)?;
         let block = self
             .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap()
+            .map_err(|err| internal_error(method_name, err))?
             .blocks_web3_dal()
             .get_block_by_web3_block_id(
                 block_id,
@@ -300,12 +301,13 @@ impl EthNamespace {
         const METHOD_NAME: &str = "get_block_transaction_count";
 
         let method_latency = API_METRICS.start_block_call(METHOD_NAME, block_id);
+        self.state.start_info.ensure_not_pruned(block_id)?;
         let tx_count = self
             .state
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap()
+            .map_err(|err| internal_error(METHOD_NAME, err))?
             .blocks_web3_dal()
             .get_block_tx_count(block_id)
             .await
@@ -335,7 +337,10 @@ impl EthNamespace {
             .access_storage_tagged("api")
             .await
             .unwrap();
-        let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?;
+        let block_number = self
+            .state
+            .resolve_block(&mut connection, block_id, METHOD_NAME)
+            .await?;
         let contract_code = connection
             .storage_web3_dal()
             .get_contract_code_unchecked(address, block_number)
@@ -369,7 +374,10 @@ impl EthNamespace {
             .access_storage_tagged("api")
             .await
             .unwrap();
-        let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?;
+        let block_number = self
+            .state
+            .resolve_block(&mut connection, block_id, METHOD_NAME)
+            .await?;
         let value = connection
             .storage_web3_dal()
             .get_historical_value_unchecked(&storage_key, block_number)
@@ -401,35 +409,34 @@ impl EthNamespace {
             .await
             .unwrap();
 
-        let (full_nonce, block_number) = match block_id {
-            BlockId::Number(BlockNumber::Pending) => {
-                let nonce = connection
-                    .transactions_web3_dal()
-                    .next_nonce_by_initiator_account(address)
-                    .await
-                    .map_err(|err| internal_error(method_name, err));
-                (nonce, None)
-            }
-            _ => {
-                let block_number = resolve_block(&mut connection, block_id, method_name).await?;
-                let nonce = connection
-                    .storage_web3_dal()
-                    .get_address_historical_nonce(address, block_number)
-                    .await
-                    .map_err(|err| internal_error(method_name, err));
-                (nonce, Some(block_number))
-            }
-        };
+        let block_number = self
+            .state
+            .resolve_block(&mut connection, block_id, method_name)
+            .await?;
+        let full_nonce = connection
+            .storage_web3_dal()
+            .get_address_historical_nonce(address, block_number)
+            .await
+            .map_err(|err| internal_error(method_name, err))?;
 
         // TODO (SMA-1612): currently account nonce is returning always, but later we will
         //  return account nonce for account abstraction and deployment nonce for non account abstraction.
         //  Strip off deployer nonce part.
-        let account_nonce = full_nonce.map(|nonce| decompose_full_nonce(nonce).0);
+        let (mut account_nonce, _) = decompose_full_nonce(full_nonce);
+
+        if matches!(block_id, BlockId::Number(BlockNumber::Pending)) {
+            let account_nonce_u64 = u64::try_from(account_nonce)
+                .map_err(|err| internal_error(method_name, anyhow::anyhow!(err)))?;
+            account_nonce = connection
+                .transactions_web3_dal()
+                .next_nonce_by_initiator_account(address, account_nonce_u64)
+                .await
+                .map_err(|err| internal_error(method_name, err))?;
+        }
 
-        let block_diff =
-            block_number.map_or(0, |number| self.state.last_sealed_miniblock.diff(number));
+        let block_diff = self.state.last_sealed_miniblock.diff(block_number);
         method_latency.observe(block_diff);
-        account_nonce
+        Ok(account_nonce)
     }
 
     #[tracing::instrument(skip(self))]
@@ -506,25 +513,30 @@ impl EthNamespace {
         const METHOD_NAME: &str = "new_block_filter";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let mut conn = self
+        let mut storage = self
             .state
             .connection_pool
             .access_storage_tagged("api")
             .await
             .map_err(|err| internal_error(METHOD_NAME, err))?;
-        let last_block_number = conn
-            .blocks_web3_dal()
+        let last_block_number = storage
+            .blocks_dal()
             .get_sealed_miniblock_number()
             .await
             .map_err(|err| internal_error(METHOD_NAME, err))?;
-        drop(conn);
+        let next_block_number = match last_block_number {
+            Some(number) => number + 1,
+            // If we don't have miniblocks in the storage, use the first projected miniblock number as the cursor
+            None => self.state.start_info.first_miniblock,
+        };
+        drop(storage);
 
         let idx = self
             .state
             .installed_filters
             .lock()
             .await
-            .add(TypedFilter::Blocks(last_block_number + 1));
+            .add(TypedFilter::Blocks(next_block_number));
         method_latency.observe();
         Ok(idx)
     }
@@ -684,8 +696,10 @@ impl EthNamespace {
             .access_storage_tagged("api")
             .await
             .unwrap();
-        let newest_miniblock =
-            resolve_block(&mut connection, BlockId::Number(newest_block), METHOD_NAME).await?;
+        let newest_miniblock = self
+            .state
+            .resolve_block(&mut connection, BlockId::Number(newest_block), METHOD_NAME)
+            .await?;
 
         let mut base_fee_per_gas = connection
             .blocks_web3_dal()
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs
index b0c966679b64..b4192a691294 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs
@@ -40,6 +40,17 @@ impl ZksNamespace {
         Self { state }
     }
 
+    async fn access_storage(
+        &self,
+        method_name: &'static str,
+    ) -> Result<StorageProcessor<'_>, Web3Error> {
+        self.state
+            .connection_pool
+            .access_storage_tagged("api")
+            .await
+            .map_err(|err| internal_error(method_name, err))
+    }
+
     #[tracing::instrument(skip(self, request))]
     pub async fn estimate_fee_impl(&self, request: CallRequest) -> Result<Fee, Web3Error> {
         const METHOD_NAME: &str = "estimate_fee";
@@ -140,16 +151,14 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_confirmed_tokens";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let tokens = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let tokens = storage
             .tokens_web3_dal()
             .get_well_known_tokens()
             .await
-            .map_err(|err| internal_error(METHOD_NAME, err))?
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+
+        let tokens = tokens
             .into_iter()
             .skip(from as usize)
             .take(limit.into())
@@ -161,7 +170,6 @@ impl ZksNamespace {
                 decimals: token_info.metadata.decimals,
             })
             .collect();
-
         method_latency.observe();
         Ok(tokens)
     }
@@ -179,12 +187,7 @@ impl ZksNamespace {
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
         let token_price_result = {
-            let mut storage = self
-                .state
-                .connection_pool
-                .access_storage_tagged("api")
-                .await
-                .unwrap();
+            let mut storage = self.access_storage(METHOD_NAME).await?;
             storage.tokens_web3_dal().get_token_price(&l2_token).await
         };
 
@@ -210,16 +213,14 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_all_balances";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let balances = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let balances = storage
             .accounts_dal()
             .get_balances_for_address(address)
             .await
-            .map_err(|err| internal_error(METHOD_NAME, err))?
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+
+        let balances = balances
             .into_iter()
             .map(|(address, balance)| {
                 if address == L2_ETH_TOKEN_ADDRESS {
@@ -229,7 +230,6 @@ impl ZksNamespace {
                 }
             })
             .collect();
-
         method_latency.observe();
         Ok(balances)
     }
@@ -245,20 +245,15 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_l2_to_l1_msg_proof";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let mut storage = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap();
-        let l1_batch_number = match storage
+        self.state.start_info.ensure_not_pruned(block_number)?;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let Some(l1_batch_number) = storage
             .blocks_web3_dal()
             .get_l1_batch_number_of_miniblock(block_number)
             .await
             .map_err(|err| internal_error(METHOD_NAME, err))?
-        {
-            Some(number) => number,
-            None => return Ok(None),
+        else {
+            return Ok(None);
         };
         let (first_miniblock_of_l1_batch, _) = storage
             .blocks_web3_dal()
@@ -374,12 +369,7 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_l2_to_l1_msg_proof";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let mut storage = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap();
+        let mut storage = self.access_storage(METHOD_NAME).await?;
         let Some((l1_batch_number, l1_batch_tx_index)) = storage
             .blocks_web3_dal()
             .get_l1_batch_info_for_tx(tx_hash)
@@ -408,20 +398,16 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_l1_batch_number";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let l1_batch_number = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
-            .blocks_web3_dal()
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let l1_batch_number = storage
+            .blocks_dal()
             .get_sealed_l1_batch_number()
             .await
-            .map(|n| U64::from(n.0))
-            .map_err(|err| internal_error(METHOD_NAME, err));
+            .map_err(|err| internal_error(METHOD_NAME, err))?
+            .ok_or(Web3Error::NoBlock)?;
 
         method_latency.observe();
-        l1_batch_number
+        Ok(l1_batch_number.0.into())
     }
 
     #[tracing::instrument(skip(self))]
@@ -432,12 +418,9 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_miniblock_range";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let minmax = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        self.state.start_info.ensure_not_pruned(batch)?;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let minmax = storage
             .blocks_web3_dal()
             .get_miniblock_range_of_l1_batch(batch)
             .await
@@ -456,12 +439,9 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_block_details";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let block_details = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        self.state.start_info.ensure_not_pruned(block_number)?;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let block_details = storage
             .blocks_web3_dal()
             .get_block_details(
                 block_number,
@@ -482,12 +462,9 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_raw_block_transactions";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let transactions = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        self.state.start_info.ensure_not_pruned(block_number)?;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let transactions = storage
             .transactions_web3_dal()
             .get_raw_miniblock_transactions(block_number)
             .await
@@ -505,16 +482,13 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_transaction_details";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let mut tx_details = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let mut tx_details = storage
             .transactions_web3_dal()
             .get_transaction_details(hash)
             .await
             .map_err(|err| internal_error(METHOD_NAME, err));
+        drop(storage);
 
         if let Some(proxy) = &self.state.tx_sender.0.proxy {
             // We're running an external node - we should query the main node directly
@@ -540,12 +514,9 @@ impl ZksNamespace {
         const METHOD_NAME: &str = "get_l1_batch";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let l1_batch = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
+        self.state.start_info.ensure_not_pruned(batch_number)?;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let l1_batch = storage
             .blocks_web3_dal()
             .get_l1_batch_details(batch_number)
             .await
@@ -556,22 +527,18 @@ impl ZksNamespace {
     }
 
     #[tracing::instrument(skip(self))]
-    pub async fn get_bytecode_by_hash_impl(&self, hash: H256) -> Option<Vec<u8>> {
+    pub async fn get_bytecode_by_hash_impl(
+        &self,
+        hash: H256,
+    ) -> Result<Option<Vec<u8>>, Web3Error> {
         const METHOD_NAME: &str = "get_bytecode_by_hash";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
-        let bytecode = self
-            .state
-            .connection_pool
-            .access_storage_tagged("api")
-            .await
-            .unwrap()
-            .storage_dal()
-            .get_factory_dep(hash)
-            .await;
+        let mut storage = self.access_storage(METHOD_NAME).await?;
+        let bytecode = storage.storage_dal().get_factory_dep(hash).await;
 
         method_latency.observe();
-        bytecode
+        Ok(bytecode)
     }
 
     #[tracing::instrument(skip(self))]
@@ -612,27 +579,20 @@ impl ZksNamespace {
     pub async fn get_protocol_version_impl(
         &self,
         version_id: Option<u16>,
-    ) -> Option<ProtocolVersion> {
+    ) -> Result<Option<ProtocolVersion>, Web3Error> {
         const METHOD_NAME: &str = "get_protocol_version";
 
         let method_latency = API_METRICS.start_call(METHOD_NAME);
+        let mut storage = self.access_storage(METHOD_NAME).await?;
         let protocol_version = match version_id {
             Some(id) => {
-                self.state
-                    .connection_pool
-                    .access_storage()
-                    .await
-                    .unwrap()
+                storage
                     .protocol_versions_web3_dal()
                     .get_protocol_version_by_id(id)
                     .await
             }
             None => Some(
-                self.state
-                    .connection_pool
-                    .access_storage()
-                    .await
-                    .unwrap()
+                storage
                     .protocol_versions_web3_dal()
                     .get_latest_protocol_version()
                     .await,
@@ -640,7 +600,7 @@ impl ZksNamespace {
         };
 
         method_latency.observe();
-        protocol_version
+        Ok(protocol_version)
     }
 
     #[tracing::instrument(skip_all)]
@@ -652,11 +612,11 @@ impl ZksNamespace {
     ) -> Result<Proof, Web3Error> {
         const METHOD_NAME: &str = "get_proofs";
 
+        self.state.start_info.ensure_not_pruned(l1_batch_number)?;
         let hashed_keys = keys
             .iter()
             .map(|key| StorageKey::new(AccountTreeId::new(address), *key).hashed_key_u256())
             .collect();
-
         let storage_proof = self
             .state
             .tree_api
diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs
index 07a5eeb64af2..1eded8e49ea5 100644
--- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs
+++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs
@@ -24,6 +24,7 @@ use super::{
     metrics::{SubscriptionType, PUB_SUB_METRICS},
     namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT,
 };
+use crate::api_server::execution_sandbox::BlockStartInfo;
 
 const BROADCAST_CHANNEL_CAPACITY: usize = 1024;
 const SUBSCRIPTION_SINK_SEND_TIMEOUT: Duration = Duration::from_secs(1);
@@ -55,15 +56,25 @@ struct PubSubNotifier {
 }
 
 impl PubSubNotifier {
-    async fn sealed_miniblock_number(&self) -> anyhow::Result<MiniblockNumber> {
-        self.connection_pool
+    async fn get_starting_miniblock_number(&self) -> anyhow::Result<MiniblockNumber> {
+        let mut storage = self
+            .connection_pool
             .access_storage_tagged("api")
             .await
-            .context("access_storage_tagged")?
-            .blocks_web3_dal()
+            .context("access_storage_tagged")?;
+        let sealed_miniblock_number = storage
+            .blocks_dal()
             .get_sealed_miniblock_number()
             .await
-            .context("get_sealed_miniblock_number()")
+            .context("get_sealed_miniblock_number()")?;
+        Ok(match sealed_miniblock_number {
+            Some(number) => number,
+            None => {
+                // We don't have miniblocks in the storage yet. Use the snapshot miniblock number instead.
+                let start_info = BlockStartInfo::new(&mut storage).await?;
+                MiniblockNumber(start_info.first_miniblock.saturating_sub(1))
+            }
+        })
     }
 
     fn emit_event(&self, event: PubSubEvent) {
@@ -75,7 +86,7 @@ impl PubSubNotifier {
 
 impl PubSubNotifier {
     async fn notify_blocks(self, stop_receiver: watch::Receiver<bool>) -> anyhow::Result<()> {
-        let mut last_block_number = self.sealed_miniblock_number().await?;
+        let mut last_block_number = self.get_starting_miniblock_number().await?;
         let mut timer = interval(self.polling_interval);
         loop {
             if *stop_receiver.borrow() {
@@ -159,7 +170,8 @@ impl PubSubNotifier {
     }
 
     async fn notify_logs(self, stop_receiver: watch::Receiver<bool>) -> anyhow::Result<()> {
-        let mut last_block_number = self.sealed_miniblock_number().await?;
+        let mut last_block_number = self.get_starting_miniblock_number().await?;
+
         let mut timer = interval(self.polling_interval);
         loop {
             if *stop_receiver.borrow() {
diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs
index 7212f67b0915..180e2de7211f 100644
--- a/core/lib/zksync_core/src/api_server/web3/state.rs
+++ b/core/lib/zksync_core/src/api_server/web3/state.rs
@@ -8,12 +8,12 @@ use std::{
 };
 
 use lru::LruCache;
-use tokio::sync::Mutex;
+use tokio::sync::{watch, Mutex};
 use vise::GaugeGuard;
 use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig};
-use zksync_dal::ConnectionPool;
+use zksync_dal::{ConnectionPool, StorageProcessor};
 use zksync_types::{
-    api, l2::L2Tx, transaction_request::CallRequest, Address, L1ChainId, L2ChainId,
+    api, l2::L2Tx, transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2ChainId,
     MiniblockNumber, H256, U256, U64,
 };
 use zksync_web3_decl::{error::Web3Error, types::Filter};
@@ -21,14 +21,54 @@ use zksync_web3_decl::{error::Web3Error, types::Filter};
 use super::metrics::{FilterType, FILTER_METRICS};
 use crate::{
     api_server::{
-        execution_sandbox::BlockArgs,
+        execution_sandbox::{BlockArgs, BlockArgsError, BlockStartInfo},
         tree::TreeApiHttpClient,
         tx_sender::TxSender,
-        web3::{backend_jsonrpsee::internal_error, resolve_block, TypedFilter},
+        web3::{backend_jsonrpsee::internal_error, TypedFilter},
     },
     sync_layer::SyncState,
 };
 
+#[derive(Debug)]
+pub(super) enum PruneQuery {
+    BlockId(api::BlockId),
+    L1Batch(L1BatchNumber),
+}
+
+impl From<api::BlockId> for PruneQuery {
+    fn from(id: api::BlockId) -> Self {
+        Self::BlockId(id)
+    }
+}
+
+impl From<MiniblockNumber> for PruneQuery {
+    fn from(number: MiniblockNumber) -> Self {
+        Self::BlockId(api::BlockId::Number(number.0.into()))
+    }
+}
+
+impl From<L1BatchNumber> for PruneQuery {
+    fn from(number: L1BatchNumber) -> Self {
+        Self::L1Batch(number)
+    }
+}
+
+impl BlockStartInfo {
+    pub(super) fn ensure_not_pruned(&self, query: impl Into<PruneQuery>) -> Result<(), Web3Error> {
+        match query.into() {
+            PruneQuery::BlockId(id) => self
+                .ensure_not_pruned_block(id)
+                .map_err(Web3Error::PrunedBlock),
+            PruneQuery::L1Batch(number) => {
+                if number < self.first_l1_batch {
+                    return Err(Web3Error::PrunedL1Batch(self.first_l1_batch));
+                }
+                Ok(())
+            }
+        }
+    }
+}
+
 /// Configuration values for the API.
 /// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc)
 /// may require different configuration layouts.
@@ -88,32 +128,30 @@ impl SealedMiniblockNumber {
     pub fn new(
         connection_pool: ConnectionPool,
         update_interval: Duration,
-    ) -> (Self, impl Future<Output = ()> + Send) {
+        stop_receiver: watch::Receiver<bool>,
+    ) -> (Self, impl Future<Output = anyhow::Result<()>>) {
         let this = Self(Arc::default());
         let number_updater = this.clone();
+
         let update_task = async move {
             loop {
-                if Arc::strong_count(&number_updater.0) == 1 {
-                    // The `sealed_miniblock_number` was dropped; there's no sense continuing updates.
+                if *stop_receiver.borrow() {
                     tracing::debug!("Stopping latest sealed miniblock updates");
-                    break;
+                    return Ok(());
                 }
 
                 let mut connection = connection_pool.access_storage_tagged("api").await.unwrap();
-                let last_sealed_miniblock = connection
-                    .blocks_web3_dal()
+                let Some(last_sealed_miniblock) = connection
+                    .blocks_dal()
                     .get_sealed_miniblock_number()
-                    .await;
+                    .await?
+                else {
+                    tokio::time::sleep(update_interval).await;
+                    continue;
+                };
                 drop(connection);
 
-                match last_sealed_miniblock {
-                    Ok(number) => {
-                        number_updater.update(number);
-                    }
-                    Err(err) => tracing::warn!(
-                        "Failed fetching latest sealed miniblock to update the watch channel: {err}"
-                    ),
-                }
+                number_updater.update(last_sealed_miniblock);
                 tokio::time::sleep(update_interval).await;
             }
         };
@@ -160,6 +198,9 @@ pub struct RpcState {
     pub tx_sender: TxSender,
     pub sync_state: Option<SyncState>,
     pub(super) api_config: InternalApiConfig,
+    /// Number of the first locally available miniblock / L1 batch. May differ from 0 if the node state was recovered
+    /// from a snapshot.
+    pub(super) start_info: BlockStartInfo,
     pub(super) last_sealed_miniblock: SealedMiniblockNumber,
 }
 
@@ -182,6 +223,34 @@ impl RpcState {
         }
     }
 
+    pub(crate) async fn resolve_block(
+        &self,
+        connection: &mut StorageProcessor<'_>,
+        block: api::BlockId,
+        method_name: &'static str,
+    ) -> Result<MiniblockNumber, Web3Error> {
+        self.start_info.ensure_not_pruned(block)?;
+        let result = connection.blocks_web3_dal().resolve_block_id(block).await;
+        result
+            .map_err(|err| internal_error(method_name, err))?
+            .ok_or(Web3Error::NoBlock)
+    }
+
+    pub(crate) async fn resolve_block_args(
+        &self,
+        connection: &mut StorageProcessor<'_>,
+        block: api::BlockId,
+        method_name: &'static str,
+    ) -> Result<BlockArgs, Web3Error> {
+        BlockArgs::new(connection, block, self.start_info)
+            .await
+            .map_err(|err| match err {
+                BlockArgsError::Pruned(number) => Web3Error::PrunedBlock(number),
+                BlockArgsError::Missing => Web3Error::NoBlock,
+                BlockArgsError::Database(err) => internal_error(method_name, err),
+            })
+    }
+
     pub async fn resolve_filter_block_number(
         &self,
         block_number: Option<api::BlockNumber>,
@@ -198,12 +267,10 @@ impl RpcState {
             .connection_pool
             .access_storage_tagged("api")
             .await
-            .unwrap();
-        Ok(conn
-            .blocks_web3_dal()
-            .resolve_block_id(block_id)
+            .map_err(|err| internal_error(METHOD_NAME, err))?;
+        Ok(self
+            .resolve_block(&mut conn, block_id, METHOD_NAME)
             .await
-            .map_err(|err| internal_error(METHOD_NAME, err))?
             .unwrap())
         // ^ `unwrap()` is safe: `resolve_block_id(api::BlockId::Number(_))` can only return `None`
         // if called with an explicit number, and we've handled this case earlier.
@@ -284,7 +351,9 @@ impl RpcState {
                 .access_storage_tagged("api")
                 .await
                 .unwrap();
-            let block_number = resolve_block(&mut connection, block_id, METHOD_NAME).await?;
+            let block_number = self
+                .resolve_block(&mut connection, block_id, METHOD_NAME)
+                .await?;
             let address_historical_nonce = connection
                 .storage_web3_dal()
                 .get_address_historical_nonce(from, block_number)
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs
new file mode 100644
index 000000000000..874cc019a3db
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs
@@ -0,0 +1,164 @@
+//! Tests for the `debug` Web3 namespace.
+
+use zksync_types::{tx::TransactionExecutionResult, vm_trace::Call, BOOTLOADER_ADDRESS};
+use zksync_web3_decl::namespaces::DebugNamespaceClient;
+
+use super::*;
+
+fn execute_l2_transaction_with_traces() -> TransactionExecutionResult {
+    let first_call_trace = Call {
+        from: Address::repeat_byte(1),
+        to: Address::repeat_byte(2),
+        gas: 100,
+        gas_used: 42,
+        ..Call::default()
+    };
+    let second_call_trace = Call {
+        from: Address::repeat_byte(0xff),
+        to: Address::repeat_byte(0xab),
+        value: 123.into(),
+        gas: 58,
+        gas_used: 10,
+        input: b"input".to_vec(),
+        output: b"output".to_vec(),
+        ..Call::default()
+    };
+    TransactionExecutionResult {
+        call_traces: vec![first_call_trace, second_call_trace],
+        ..execute_l2_transaction(create_l2_transaction(1, 2))
+    }
+}
+
+#[derive(Debug)]
+struct TraceBlockTest(MiniblockNumber);
+
+#[async_trait]
+impl HttpTest for TraceBlockTest {
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let tx_results = [execute_l2_transaction_with_traces()];
+        let mut storage = pool.access_storage().await?;
+        let new_miniblock = store_miniblock(&mut storage, self.0, &tx_results).await?;
+        drop(storage);
+
+        let block_ids = [
+            api::BlockId::Number((*self.0).into()),
+            api::BlockId::Number(api::BlockNumber::Latest),
+            api::BlockId::Hash(new_miniblock.hash),
+        ];
+        let expected_calls: Vec<_> = tx_results[0]
+            .call_traces
+            .iter()
+            .map(|call| api::DebugCall::from(call.clone()))
+            .collect();
+
+        for block_id in block_ids {
+            let block_traces = match block_id {
+                api::BlockId::Number(number) => client.trace_block_by_number(number, None).await?,
+                api::BlockId::Hash(hash) => client.trace_block_by_hash(hash, None).await?,
+            };
+
+            assert_eq!(block_traces.len(), 1); // equals to the number of transactions in the block
+            let api::ResultDebugCall { result } = &block_traces[0];
+            assert_eq!(result.from, Address::zero());
+            assert_eq!(result.to, BOOTLOADER_ADDRESS);
+            assert_eq!(result.gas, tx_results[0].transaction.gas_limit());
+            assert_eq!(result.calls, expected_calls);
+        }
+
+        let missing_block_number = api::BlockNumber::from(*self.0 + 100);
+        let error = client
+            .trace_block_by_number(missing_block_number, None)
+            .await
+            .unwrap_err();
+        if let ClientError::Call(error) = error {
+            assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+            assert!(
+                error.message().contains("Block") && error.message().contains("doesn't exist"),
+                "{error:?}"
+            );
+            assert!(error.data().is_none(), "{error:?}");
+        } else {
+            panic!("Unexpected error: {error:?}");
+        }
+
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn tracing_block() {
+    test_http_server(TraceBlockTest(MiniblockNumber(1))).await;
+}
+
+#[derive(Debug)]
+struct TraceTransactionTest;
+
+#[async_trait]
+impl HttpTest for TraceTransactionTest {
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let tx_results = [execute_l2_transaction_with_traces()];
+        let mut storage = pool.access_storage().await?;
+        store_miniblock(&mut storage, MiniblockNumber(1), &tx_results).await?;
+        drop(storage);
+
+        let expected_calls: Vec<_> = tx_results[0]
+            .call_traces
+            .iter()
+            .map(|call| api::DebugCall::from(call.clone()))
+            .collect();
+
+        let result = client
+            .trace_transaction(tx_results[0].hash, None)
+            .await?
+            .context("no transaction traces")?;
+        assert_eq!(result.from, Address::zero());
+        assert_eq!(result.to, BOOTLOADER_ADDRESS);
+        assert_eq!(result.gas, tx_results[0].transaction.gas_limit());
+        assert_eq!(result.calls, expected_calls);
+
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn tracing_transaction() {
+    test_http_server(TraceTransactionTest).await;
+}
+
+#[derive(Debug)]
+struct TraceBlockTestWithSnapshotRecovery;
+
+#[async_trait]
+impl HttpTest for TraceBlockTestWithSnapshotRecovery {
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::empty_recovery()
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let snapshot_miniblock_number =
+            MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK);
+        let missing_miniblock_numbers = [
+            MiniblockNumber(0),
+            snapshot_miniblock_number - 1,
+            snapshot_miniblock_number,
+        ];
+
+        for number in missing_miniblock_numbers {
+            let error = client
+                .trace_block_by_number(number.0.into(), None)
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, 24);
+        }
+
+        TraceBlockTest(snapshot_miniblock_number + 1)
+            .test(client, pool)
+            .await?;
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn tracing_block_after_snapshot_recovery() {
+    test_http_server(TraceBlockTestWithSnapshotRecovery).await;
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs
new file mode 100644
index 000000000000..3c21be1b4be7
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs
@@ -0,0 +1,261 @@
+//! Tests for filter-related methods in the `eth` namespace.
+
+use zksync_web3_decl::{jsonrpsee::core::ClientError as RpcError, types::FilterChanges};
+
+use super::*;
+
+#[derive(Debug)]
+struct BasicFilterChangesTest {
+    snapshot_recovery: bool,
+}
+
+#[async_trait]
+impl HttpTest for BasicFilterChangesTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        if self.snapshot_recovery {
+            StorageInitialization::empty_recovery()
+        } else {
+            StorageInitialization::Genesis
+        }
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let block_filter_id = client.new_block_filter().await?;
+        let tx_filter_id = client.new_pending_transaction_filter().await?;
+        let tx_result = execute_l2_transaction(create_l2_transaction(1, 2));
+        let new_tx_hash = tx_result.hash;
+        let new_miniblock = store_miniblock(
+            &mut pool.access_storage().await?,
+            MiniblockNumber(if self.snapshot_recovery {
+                StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1
+            } else {
+                1
+            }),
+            &[tx_result],
+        )
+        .await?;
+
+        let block_filter_changes = client.get_filter_changes(block_filter_id).await?;
+        assert_matches!(
+            block_filter_changes,
+            FilterChanges::Hashes(hashes) if hashes == [new_miniblock.hash]
+        );
+        let block_filter_changes = client.get_filter_changes(block_filter_id).await?;
+        assert_matches!(block_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty());
+
+        let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?;
+        assert_matches!(
+            tx_filter_changes,
+            FilterChanges::Hashes(hashes) if hashes == [new_tx_hash]
+        );
+        let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?;
+        assert_matches!(tx_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty());
+
+        // Check uninstalling the filter.
+        let removed = client.uninstall_filter(block_filter_id).await?;
+        assert!(removed);
+        let removed = client.uninstall_filter(block_filter_id).await?;
+        assert!(!removed);
+
+        let err = client
+            .get_filter_changes(block_filter_id)
+            .await
+            .unwrap_err();
+        assert_matches!(err, RpcError::Call(err) if err.code() == ErrorCode::InvalidParams.code());
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn basic_filter_changes() {
+    test_http_server(BasicFilterChangesTest {
+        snapshot_recovery: false,
+    })
+    .await;
+}
+
+#[tokio::test]
+async fn basic_filter_changes_after_snapshot_recovery() {
+    test_http_server(BasicFilterChangesTest {
+        snapshot_recovery: true,
+    })
+    .await;
+}
+
+#[derive(Debug)]
+struct LogFilterChangesTest {
+    snapshot_recovery: bool,
+}
+
+#[async_trait]
+impl HttpTest for LogFilterChangesTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        if self.snapshot_recovery {
+            StorageInitialization::empty_recovery()
+        } else {
+            StorageInitialization::Genesis
+        }
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let all_logs_filter_id = client.new_filter(Filter::default()).await?;
+        let address_filter = Filter {
+            address: Some(Address::repeat_byte(23).into()),
+            ..Filter::default()
+        };
+        let address_filter_id = client.new_filter(address_filter).await?;
+        let topics_filter = Filter {
+            topics: Some(vec![Some(H256::repeat_byte(42).into())]),
+            ..Filter::default()
+        };
+        let topics_filter_id = client.new_filter(topics_filter).await?;
+
+        let mut storage = pool.access_storage().await?;
+        let first_local_miniblock = if self.snapshot_recovery {
+            StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1
+        } else {
+            1
+        };
+        let (_, events) = store_events(&mut storage, first_local_miniblock, 0).await?;
+        drop(storage);
+        let events: Vec<_> = events.iter().collect();
+
+        let all_logs = client.get_filter_changes(all_logs_filter_id).await?;
+        let FilterChanges::Logs(all_logs) = all_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", all_logs);
+        };
+        assert_logs_match(&all_logs, &events);
+
+        let address_logs = client.get_filter_changes(address_filter_id).await?;
+        let FilterChanges::Logs(address_logs) = address_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", address_logs);
+        };
+        assert_logs_match(&address_logs, &[events[0], events[3]]);
+
+        let topics_logs = client.get_filter_changes(topics_filter_id).await?;
+        let FilterChanges::Logs(topics_logs) = topics_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", topics_logs);
+        };
+        assert_logs_match(&topics_logs, &[events[1], events[3]]);
+
+        let new_all_logs = client.get_filter_changes(all_logs_filter_id).await?;
+        let FilterChanges::Hashes(new_all_logs) = new_all_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", new_all_logs);
+        };
+        assert!(new_all_logs.is_empty());
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn log_filter_changes() {
+    test_http_server(LogFilterChangesTest {
+        snapshot_recovery: false,
+    })
+    .await;
+}
+
+#[tokio::test]
+async fn log_filter_changes_after_snapshot_recovery() {
+    test_http_server(LogFilterChangesTest {
+        snapshot_recovery: true,
+    })
+    .await;
+}
+
+#[derive(Debug)]
+struct LogFilterChangesWithBlockBoundariesTest;
+
+#[async_trait]
+impl HttpTest for LogFilterChangesWithBlockBoundariesTest {
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let lower_bound_filter = Filter {
+            from_block: Some(api::BlockNumber::Number(2.into())),
+            ..Filter::default()
+        };
+        let lower_bound_filter_id = client.new_filter(lower_bound_filter).await?;
+        let upper_bound_filter = Filter {
+            to_block: Some(api::BlockNumber::Number(1.into())),
+            ..Filter::default()
+        };
+        let upper_bound_filter_id = client.new_filter(upper_bound_filter).await?;
+        let bounded_filter = Filter {
+            from_block: Some(api::BlockNumber::Number(1.into())),
+            to_block: Some(api::BlockNumber::Number(1.into())),
+            ..Filter::default()
+        };
+        let bounded_filter_id = client.new_filter(bounded_filter).await?;
+
+        let mut storage = pool.access_storage().await?;
+        let (_, events) = store_events(&mut storage, 1, 0).await?;
+        drop(storage);
+        let events: Vec<_> = events.iter().collect();
+
+        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
+        assert_matches!(
+            lower_bound_logs,
+            FilterChanges::Hashes(hashes) if hashes.is_empty()
+        );
+        // ^ Since `FilterChanges` is serialized w/o a tag, an empty array will be deserialized
+        // as `Hashes(_)` (the first declared variant).
+
+        let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
+        let FilterChanges::Logs(upper_bound_logs) = upper_bound_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs);
+        };
+        assert_logs_match(&upper_bound_logs, &events);
+        let bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
+        let FilterChanges::Logs(bounded_logs) = bounded_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", bounded_logs);
+        };
+        assert_eq!(bounded_logs, upper_bound_logs);
+
+        // Add another miniblock with events to the storage.
+        let mut storage = pool.access_storage().await?;
+        let (_, new_events) = store_events(&mut storage, 2, 4).await?;
+        drop(storage);
+        let new_events: Vec<_> = new_events.iter().collect();
+
+        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
+        let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs);
+        };
+        assert_logs_match(&lower_bound_logs, &new_events);
+
+        let new_upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
+        assert_matches!(new_upper_bound_logs, FilterChanges::Hashes(hashes) if hashes.is_empty());
+        let new_bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
+        assert_matches!(new_bounded_logs, FilterChanges::Hashes(hashes) if hashes.is_empty());
+
+        // Add miniblock #3. It should not be picked up by the bounded and upper bound filters,
+        // and should be picked up by the lower bound filter.
+        let mut storage = pool.access_storage().await?;
+        let (_, new_events) = store_events(&mut storage, 3, 8).await?;
+        drop(storage);
+        let new_events: Vec<_> = new_events.iter().collect();
+
+        let bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
+        let FilterChanges::Hashes(bounded_logs) = bounded_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", bounded_logs);
+        };
+        assert!(bounded_logs.is_empty());
+
+        let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
+        let FilterChanges::Hashes(upper_bound_logs) = upper_bound_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs);
+        };
+        assert!(upper_bound_logs.is_empty());
+
+        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
+        let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else {
+            panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs);
+        };
+        assert_logs_match(&lower_bound_logs, &new_events);
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn log_filter_changes_with_block_boundaries() {
+    test_http_server(LogFilterChangesWithBlockBoundariesTest).await;
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
index 50f3f69996f4..1cfd6af269fa 100644
--- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
@@ -1,55 +1,57 @@
-use std::{sync::Arc, time::Instant};
+use std::{collections::HashMap, time::Instant};
 
 use assert_matches::assert_matches;
 use async_trait::async_trait;
+use jsonrpsee::core::ClientError;
 use tokio::sync::watch;
 use zksync_config::configs::{
     api::Web3JsonRpcConfig,
     chain::{NetworkConfig, StateKeeperConfig},
     ContractsConfig,
 };
-use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool};
+use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool, StorageProcessor};
 use zksync_health_check::CheckHealth;
-use zksync_state::PostgresStorageCaches;
-use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE;
 use zksync_types::{
-    block::MiniblockHeader, fee::TransactionExecutionMetrics, tx::IncludedTxLocation, Address,
-    L1BatchNumber, VmEvent, H256, U64,
+    api,
+    block::{BlockGasCount, MiniblockHeader},
+    fee::TransactionExecutionMetrics,
+    get_nonce_key,
+    l2::L2Tx,
+    storage::get_code_key,
+    tx::{
+        tx_execution_info::TxExecutionStatus, ExecutionMetrics, IncludedTxLocation,
+        TransactionExecutionResult,
+    },
+    utils::storage_key_for_eth_balance,
+    AccountTreeId, Address, L1BatchNumber, Nonce, StorageKey, StorageLog, VmEvent, H256, U64,
 };
 use zksync_web3_decl::{
-    jsonrpsee::{core::ClientError as RpcError, http_client::HttpClient, types::error::ErrorCode},
+    jsonrpsee::{http_client::HttpClient, types::error::ErrorCode},
     namespaces::{EthNamespaceClient, ZksNamespaceClient},
-    types::FilterChanges,
 };
 
 use super::{metrics::ApiTransportLabel, *};
 use crate::{
-    api_server::tx_sender::TxSenderConfig,
+    api_server::{
+        execution_sandbox::testonly::MockTransactionExecutor,
+        tx_sender::tests::create_test_tx_sender,
+    },
     genesis::{ensure_genesis_state, GenesisParams},
-    l1_gas_price::L1GasPriceProvider,
-    utils::testonly::{create_l2_transaction, create_miniblock},
+    utils::testonly::{
+        create_l1_batch, create_l1_batch_metadata, create_l2_transaction, create_miniblock,
+        prepare_empty_recovery_snapshot, prepare_recovery_snapshot,
+    },
 };
 
+mod debug;
+mod filters;
 mod snapshots;
+mod vm;
 mod ws;
 
 const TEST_TIMEOUT: Duration = Duration::from_secs(10);
 const POLL_INTERVAL: Duration = Duration::from_millis(50);
 
-/// Mock [`L1GasPriceProvider`] that returns a constant value.
-#[derive(Debug)]
-struct MockL1GasPriceProvider(u64);
-
-impl L1GasPriceProvider for MockL1GasPriceProvider {
-    fn estimate_effective_gas_price(&self) -> u64 {
-        self.0
-    }
-
-    fn estimate_effective_pubdata_price(&self) -> u64 {
-        self.0 * L1_GAS_PER_PUBDATA_BYTE as u64
-    }
-}
-
 impl ApiServerHandles {
     /// Waits until the server health check reports the ready state.
     pub(crate) async fn wait_until_ready(&self) {
@@ -70,14 +72,15 @@ impl ApiServerHandles {
     pub(crate) async fn shutdown(self) {
         let stop_server = async {
             for task in self.tasks {
-                let task_result = task.await.unwrap_or_else(|err| {
-                    if err.is_cancelled() {
-                        Ok(())
-                    } else {
-                        panic!("Server panicked: {err:?}");
+                match task.await {
+                    Ok(Ok(())) => { /* Task successfully completed */ }
+                    Err(err) if err.is_cancelled() => {
+                        // Task was canceled since the server runtime which runs the task was dropped.
+                        // This is fine.
                     }
-                });
-                task_result.expect("Server task returned an error");
+                    Err(err) => panic!("Server task panicked: {err:?}"),
+                    Ok(Err(err)) => panic!("Server task failed: {err:?}"),
+                }
             }
         };
         tokio::time::timeout(TEST_TIMEOUT, stop_server)
@@ -89,14 +92,16 @@ impl ApiServerHandles {
 pub(crate) async fn spawn_http_server(
     network_config: &NetworkConfig,
     pool: ConnectionPool,
+    tx_executor: MockTransactionExecutor,
     stop_receiver: watch::Receiver<bool>,
 ) -> ApiServerHandles {
     spawn_server(
         ApiTransportLabel::Http,
         network_config,
         pool,
-        stop_receiver,
         None,
+        tx_executor,
+        stop_receiver,
     )
     .await
     .0
@@ -112,8 +117,9 @@ async fn spawn_ws_server(
         ApiTransportLabel::Ws,
         network_config,
         pool,
-        stop_receiver,
         websocket_requests_per_minute_limit,
+        MockTransactionExecutor::default(),
+        stop_receiver,
     )
     .await
 }
@@ -122,32 +128,19 @@ async fn spawn_server(
     transport: ApiTransportLabel,
     network_config: &NetworkConfig,
     pool: ConnectionPool,
-    stop_receiver: watch::Receiver<bool>,
     websocket_requests_per_minute_limit: Option<NonZeroU32>,
+    tx_executor: MockTransactionExecutor,
+    stop_receiver: watch::Receiver<bool>,
 ) -> (ApiServerHandles, mpsc::UnboundedReceiver<PubSubEvent>) {
     let contracts_config = ContractsConfig::for_tests();
     let web3_config = Web3JsonRpcConfig::for_tests();
-    let state_keeper_config = StateKeeperConfig::for_tests();
     let api_config = InternalApiConfig::new(network_config, &web3_config, &contracts_config);
-    let tx_sender_config =
-        TxSenderConfig::new(&state_keeper_config, &web3_config, api_config.l2_chain_id);
-
-    let storage_caches = PostgresStorageCaches::new(1, 1);
-    let gas_adjuster = Arc::new(MockL1GasPriceProvider(1));
-    let (tx_sender, vm_barrier) = crate::build_tx_sender(
-        &tx_sender_config,
-        &web3_config,
-        &state_keeper_config,
-        pool.clone(),
-        pool.clone(),
-        gas_adjuster,
-        storage_caches,
-    )
-    .await;
+    let (tx_sender, vm_barrier) =
+        create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor.into()).await;
     let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel();
 
     let mut namespaces = Namespace::DEFAULT.to_vec();
-    namespaces.push(Namespace::Snapshots);
+    namespaces.extend([Namespace::Debug, Namespace::Snapshots]);
 
     let server_builder = match transport {
         ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0),
@@ -174,27 +167,91 @@ async fn spawn_server(
 }
 
 #[async_trait]
-trait HttpTest {
+trait HttpTest: Send + Sync {
+    /// Prepares the storage before the server is started. The default implementation performs genesis.
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::Genesis
+    }
+
+    fn transaction_executor(&self) -> MockTransactionExecutor {
+        MockTransactionExecutor::default()
+    }
+
     async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>;
 }
 
+/// Storage initialization strategy.
+#[derive(Debug)]
+enum StorageInitialization {
+    Genesis,
+    Recovery {
+        logs: Vec<StorageLog>,
+        factory_deps: HashMap<H256, Vec<u8>>,
+    },
+}
+
+impl StorageInitialization {
+    const SNAPSHOT_RECOVERY_BLOCK: u32 = 23;
+
+    fn empty_recovery() -> Self {
+        Self::Recovery {
+            logs: vec![],
+            factory_deps: HashMap::new(),
+        }
+    }
+
+    async fn prepare_storage(
+        &self,
+        network_config: &NetworkConfig,
+        storage: &mut StorageProcessor<'_>,
+    ) -> anyhow::Result<()> {
+        match self {
+            Self::Genesis => {
+                if storage.blocks_dal().is_genesis_needed().await? {
+                    ensure_genesis_state(
+                        storage,
+                        network_config.zksync_network_id,
+                        &GenesisParams::mock(),
+                    )
+                    .await?;
+                }
+            }
+            Self::Recovery { logs, factory_deps } if logs.is_empty() && factory_deps.is_empty() => {
+                prepare_empty_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK).await;
+            }
+            Self::Recovery { logs, factory_deps } => {
+                prepare_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK, logs).await;
+                storage
+                    .storage_dal()
+                    .insert_factory_deps(
+                        MiniblockNumber(Self::SNAPSHOT_RECOVERY_BLOCK),
+                        factory_deps,
+                    )
+                    .await;
+            }
+        }
+        Ok(())
+    }
+}
+
 async fn test_http_server(test: impl HttpTest) {
     let pool = ConnectionPool::test_pool().await;
     let network_config = NetworkConfig::for_tests();
     let mut storage = pool.access_storage().await.unwrap();
-    if storage.blocks_dal().is_genesis_needed().await.unwrap() {
-        ensure_genesis_state(
-            &mut storage,
-            network_config.zksync_network_id,
-            &GenesisParams::mock(),
-        )
+    test.storage_initialization()
+        .prepare_storage(&network_config, &mut storage)
         .await
-        .unwrap();
-    }
+        .expect("Failed preparing storage for test");
     drop(storage);
 
     let (stop_sender, stop_receiver) = watch::channel(false);
-    let server_handles = spawn_http_server(&network_config, pool.clone(), stop_receiver).await;
+    let server_handles = spawn_http_server(
+        &network_config,
+        pool.clone(),
+        test.transaction_executor(),
+        stop_receiver,
+    )
+    .await;
     server_handles.wait_until_ready().await;
 
     let client = <HttpClient>::builder()
@@ -215,23 +272,66 @@ fn assert_logs_match(actual_logs: &[api::Log], expected_logs: &[&VmEvent]) {
     }
 }
 
+fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult {
+    TransactionExecutionResult {
+        hash: transaction.hash(),
+        transaction: transaction.into(),
+        execution_info: ExecutionMetrics::default(),
+        execution_status: TxExecutionStatus::Success,
+        refunded_gas: 0,
+        operator_suggested_refund: 0,
+        compressed_bytecodes: vec![],
+        call_traces: vec![],
+        revert_reason: None,
+    }
+}
+
+/// Stores miniblock #1 with a single transaction and returns the miniblock header + transaction hash.
 async fn store_miniblock(
     storage: &mut StorageProcessor<'_>,
-) -> anyhow::Result<(MiniblockHeader, H256)> {
-    let new_tx = create_l2_transaction(1, 2);
-    let new_tx_hash = new_tx.hash();
-    let tx_submission_result = storage
+    number: MiniblockNumber,
+    transaction_results: &[TransactionExecutionResult],
+) -> anyhow::Result<MiniblockHeader> {
+    for result in transaction_results {
+        let l2_tx = result.transaction.clone().try_into().unwrap();
+        let tx_submission_result = storage
+            .transactions_dal()
+            .insert_transaction_l2(l2_tx, TransactionExecutionMetrics::default())
+            .await;
+        assert_matches!(tx_submission_result, L2TxSubmissionResult::Added);
+    }
+
+    let new_miniblock = create_miniblock(number.0);
+    storage
+        .blocks_dal()
+        .insert_miniblock(&new_miniblock)
+        .await?;
+    storage
         .transactions_dal()
-        .insert_transaction_l2(new_tx, TransactionExecutionMetrics::default())
+        .mark_txs_as_executed_in_miniblock(new_miniblock.number, transaction_results, 1.into())
         .await;
-    assert_matches!(tx_submission_result, L2TxSubmissionResult::Added);
+    Ok(new_miniblock)
+}
 
-    let new_miniblock = create_miniblock(1);
+async fn seal_l1_batch(
+    storage: &mut StorageProcessor<'_>,
+    number: L1BatchNumber,
+) -> anyhow::Result<()> {
+    let header = create_l1_batch(number.0);
     storage
         .blocks_dal()
-        .insert_miniblock(&new_miniblock)
+        .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0)
+        .await?;
+    storage
+        .blocks_dal()
+        .mark_miniblocks_as_executed_in_l1_batch(number)
         .await?;
-    Ok((new_miniblock, new_tx_hash))
+    let metadata = create_l1_batch_metadata(number.0);
+    storage
+        .blocks_dal()
+        .save_l1_batch_metadata(number, &metadata, H256::zero(), false)
+        .await?;
+    Ok(())
 }
 
 async fn store_events(
@@ -240,6 +340,7 @@ async fn store_events(
     start_idx: u32,
 ) -> anyhow::Result<(IncludedTxLocation, Vec<VmEvent>)> {
     let new_miniblock = create_miniblock(miniblock_number);
+    let l1_batch_number = L1BatchNumber(miniblock_number);
     storage
         .blocks_dal()
         .insert_miniblock(&new_miniblock)
@@ -252,28 +353,28 @@ async fn store_events(
     let events = vec![
         // Matches address, doesn't match topics
         VmEvent {
-            location: (L1BatchNumber(1), start_idx),
+            location: (l1_batch_number, start_idx),
             address: Address::repeat_byte(23),
             indexed_topics: vec![],
             value: start_idx.to_le_bytes().to_vec(),
         },
         // Doesn't match address, matches topics
         VmEvent {
-            location: (L1BatchNumber(1), start_idx + 1),
+            location: (l1_batch_number, start_idx + 1),
             address: Address::zero(),
             indexed_topics: vec![H256::repeat_byte(42)],
             value: (start_idx + 1).to_le_bytes().to_vec(),
         },
         // Doesn't match address or topics
         VmEvent {
-            location: (L1BatchNumber(1), start_idx + 2),
+            location: (l1_batch_number, start_idx + 2),
             address: Address::zero(),
             indexed_topics: vec![H256::repeat_byte(1), H256::repeat_byte(42)],
             value: (start_idx + 2).to_le_bytes().to_vec(),
         },
         // Matches both address and topics
         VmEvent {
-            location: (L1BatchNumber(1), start_idx + 3),
+            location: (l1_batch_number, start_idx + 3),
             address: Address::repeat_byte(23),
             indexed_topics: vec![H256::repeat_byte(42), H256::repeat_byte(111)],
             value: (start_idx + 3).to_le_bytes().to_vec(),
@@ -316,201 +417,395 @@ async fn http_server_basics() {
 }
 
 #[derive(Debug)]
-struct BasicFilterChangesTest;
+struct BlockMethodsWithSnapshotRecovery;
 
 #[async_trait]
-impl HttpTest for BasicFilterChangesTest {
+impl HttpTest for BlockMethodsWithSnapshotRecovery {
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::empty_recovery()
+    }
+
     async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
-        let block_filter_id = client.new_block_filter().await?;
-        let tx_filter_id = client.new_pending_transaction_filter().await?;
+        let error = client.get_block_number().await.unwrap_err();
+        if let ClientError::Call(error) = error {
+            assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        } else {
+            panic!("Unexpected error: {error:?}");
+        }
 
-        let (new_miniblock, new_tx_hash) =
-            store_miniblock(&mut pool.access_storage().await?).await?;
+        let block = client
+            .get_block_by_number(api::BlockNumber::Latest, false)
+            .await?;
+        assert!(block.is_none());
+        let block = client.get_block_by_number(1_000.into(), false).await?;
+        assert!(block.is_none());
 
-        let block_filter_changes = client.get_filter_changes(block_filter_id).await?;
-        assert_matches!(
-            block_filter_changes,
-            FilterChanges::Hashes(hashes) if hashes == [new_miniblock.hash]
-        );
-        let block_filter_changes = client.get_filter_changes(block_filter_id).await?;
-        assert_matches!(block_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty());
+        let mut storage = pool.access_storage().await?;
+        store_miniblock(&mut storage, MiniblockNumber(24), &[]).await?;
+        drop(storage);
 
-        let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?;
-        assert_matches!(
-            tx_filter_changes,
-            FilterChanges::Hashes(hashes) if hashes == [new_tx_hash]
-        );
-        let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?;
-        assert_matches!(tx_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty());
+        let block_number = client.get_block_number().await?;
+        let expected_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1;
+        assert_eq!(block_number, expected_block_number.into());
+
+        for block_number in [api::BlockNumber::Latest, expected_block_number.into()] {
+            let block = client
+                .get_block_by_number(block_number, false)
+                .await?
+                .context("no latest block")?;
+            assert_eq!(block.number, expected_block_number.into());
+        }
 
-        // Check uninstalling the filter.
-        let removed = client.uninstall_filter(block_filter_id).await?;
-        assert!(removed);
-        let removed = client.uninstall_filter(block_filter_id).await?;
-        assert!(!removed);
+        for number in [0, 1, expected_block_number - 1] {
+            let error = client
+                .get_block_details(MiniblockNumber(number))
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, expected_block_number);
+            let error = client
+                .get_raw_block_transactions(MiniblockNumber(number))
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, expected_block_number);
+
+            let error = client
+                .get_block_transaction_count_by_number(number.into())
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, expected_block_number);
+            let error = client
+                .get_block_by_number(number.into(), false)
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, expected_block_number);
+        }
 
-        let err = client
-            .get_filter_changes(block_filter_id)
-            .await
-            .unwrap_err();
-        assert_matches!(err, RpcError::Call(err) if err.code() == ErrorCode::InvalidParams.code());
         Ok(())
     }
 }
 
+fn assert_pruned_block_error(error: &ClientError, first_retained_block: u32) {
+    if let ClientError::Call(error) = error {
+        assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        assert!(
+            error
+                .message()
+                .contains(&format!("first retained block is {first_retained_block}")),
+            "{error:?}"
+        );
+        assert!(error.data().is_none(), "{error:?}");
+    } else {
+        panic!("Unexpected error: {error:?}");
+    }
+}
+
 #[tokio::test]
-async fn basic_filter_changes() {
-    test_http_server(BasicFilterChangesTest).await;
+async fn block_methods_with_snapshot_recovery() {
+    test_http_server(BlockMethodsWithSnapshotRecovery).await;
 }
 
 #[derive(Debug)]
-struct LogFilterChangesTest;
+struct L1BatchMethodsWithSnapshotRecovery;
 
 #[async_trait]
-impl HttpTest for LogFilterChangesTest {
+impl HttpTest for L1BatchMethodsWithSnapshotRecovery {
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::empty_recovery()
+    }
+
     async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
-        let all_logs_filter_id = client.new_filter(Filter::default()).await?;
-        let address_filter = Filter {
-            address: Some(Address::repeat_byte(23).into()),
-            ..Filter::default()
-        };
-        let address_filter_id = client.new_filter(address_filter).await?;
-        let topics_filter = Filter {
-            topics: Some(vec![Some(H256::repeat_byte(42).into())]),
-            ..Filter::default()
-        };
-        let topics_filter_id = client.new_filter(topics_filter).await?;
+        let error = client.get_l1_batch_number().await.unwrap_err();
+        if let ClientError::Call(error) = error {
+            assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        } else {
+            panic!("Unexpected error: {error:?}");
+        }
 
         let mut storage = pool.access_storage().await?;
-        let (_, events) = store_events(&mut storage, 1, 0).await?;
+        let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1;
+        store_miniblock(&mut storage, MiniblockNumber(miniblock_number), &[]).await?;
+        seal_l1_batch(&mut storage, L1BatchNumber(miniblock_number)).await?;
         drop(storage);
-        let events: Vec<_> = events.iter().collect();
 
-        let all_logs = client.get_filter_changes(all_logs_filter_id).await?;
-        let FilterChanges::Logs(all_logs) = all_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", all_logs);
-        };
-        assert_logs_match(&all_logs, &events);
+        let l1_batch_number = client.get_l1_batch_number().await?;
+        assert_eq!(l1_batch_number, miniblock_number.into());
 
-        let address_logs = client.get_filter_changes(address_filter_id).await?;
-        let FilterChanges::Logs(address_logs) = address_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", address_logs);
-        };
-        assert_logs_match(&address_logs, &[events[0], events[3]]);
+        // `get_miniblock_range` method
+        let miniblock_range = client
+            .get_miniblock_range(L1BatchNumber(miniblock_number))
+            .await?
+            .context("no range for sealed L1 batch")?;
+        assert_eq!(miniblock_range.0, miniblock_number.into());
+        assert_eq!(miniblock_range.1, miniblock_number.into());
 
-        let topics_logs = client.get_filter_changes(topics_filter_id).await?;
-        let FilterChanges::Logs(topics_logs) = topics_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", topics_logs);
-        };
-        assert_logs_match(&topics_logs, &[events[1], events[3]]);
+        let miniblock_range_for_future_batch = client
+            .get_miniblock_range(L1BatchNumber(miniblock_number) + 1)
+            .await?;
+        assert_eq!(miniblock_range_for_future_batch, None);
+
+        let error = client
+            .get_miniblock_range(L1BatchNumber(miniblock_number) - 1)
+            .await
+            .unwrap_err();
+        assert_pruned_l1_batch_error(&error, miniblock_number);
+
+        // `get_l1_batch_details` method
+        let details = client
+            .get_l1_batch_details(L1BatchNumber(miniblock_number))
+            .await?
+            .context("no details for sealed L1 batch")?;
+        assert_eq!(details.number, L1BatchNumber(miniblock_number));
+
+        let details_for_future_batch = client
+            .get_l1_batch_details(L1BatchNumber(miniblock_number) + 1)
+            .await?;
+        assert!(
+            details_for_future_batch.is_none(),
+            "{details_for_future_batch:?}"
+        );
+
+        let error = client
+            .get_l1_batch_details(L1BatchNumber(miniblock_number) - 1)
+            .await
+            .unwrap_err();
+        assert_pruned_l1_batch_error(&error, miniblock_number);
 
-        let new_all_logs = client.get_filter_changes(all_logs_filter_id).await?;
-        let FilterChanges::Hashes(new_all_logs) = new_all_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", new_all_logs);
-        };
-        assert!(new_all_logs.is_empty());
         Ok(())
     }
 }
 
+fn assert_pruned_l1_batch_error(error: &ClientError, first_retained_l1_batch: u32) {
+    if let ClientError::Call(error) = error {
+        assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        assert!(
+            error.message().contains(&format!(
+                "first retained L1 batch is {first_retained_l1_batch}"
+            )),
+            "{error:?}"
+        );
+        assert!(error.data().is_none(), "{error:?}");
+    } else {
+        panic!("Unexpected error: {error:?}");
+    }
+}
+
 #[tokio::test]
-async fn log_filter_changes() {
-    test_http_server(LogFilterChangesTest).await;
+async fn l1_batch_methods_with_snapshot_recovery() {
+    test_http_server(L1BatchMethodsWithSnapshotRecovery).await;
 }
 
 #[derive(Debug)]
-struct LogFilterChangesWithBlockBoundariesTest;
+struct StorageAccessWithSnapshotRecovery;
 
 #[async_trait]
-impl HttpTest for LogFilterChangesWithBlockBoundariesTest {
-    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
-        let lower_bound_filter = Filter {
-            from_block: Some(api::BlockNumber::Number(2.into())),
-            ..Filter::default()
-        };
-        let lower_bound_filter_id = client.new_filter(lower_bound_filter).await?;
-        let upper_bound_filter = Filter {
-            to_block: Some(api::BlockNumber::Number(1.into())),
-            ..Filter::default()
-        };
-        let upper_bound_filter_id = client.new_filter(upper_bound_filter).await?;
-        let bounded_filter = Filter {
-            from_block: Some(api::BlockNumber::Number(1.into())),
-            to_block: Some(api::BlockNumber::Number(1.into())),
-            ..Filter::default()
-        };
-        let bounded_filter_id = client.new_filter(bounded_filter).await?;
+impl HttpTest for StorageAccessWithSnapshotRecovery {
+    fn storage_initialization(&self) -> StorageInitialization {
+        let address = Address::repeat_byte(1);
+        let code_key = get_code_key(&address);
+        let code_hash = H256::repeat_byte(2);
+        let balance_key = storage_key_for_eth_balance(&address);
+        let logs = vec![
+            StorageLog::new_write_log(code_key, code_hash),
+            StorageLog::new_write_log(balance_key, H256::from_low_u64_be(123)),
+            StorageLog::new_write_log(
+                StorageKey::new(AccountTreeId::new(address), H256::zero()),
+                H256::repeat_byte(0xff),
+            ),
+        ];
+        let factory_deps = [(code_hash, b"code".to_vec())].into();
+        StorageInitialization::Recovery { logs, factory_deps }
+    }
 
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
         let mut storage = pool.access_storage().await?;
-        let (_, events) = store_events(&mut storage, 1, 0).await?;
+
+        let address = Address::repeat_byte(1);
+        let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1;
+        for number in [0, 1, first_local_miniblock - 1] {
+            let number = api::BlockIdVariant::BlockNumber(number.into());
+            let error = client.get_code(address, Some(number)).await.unwrap_err();
+            assert_pruned_block_error(&error, first_local_miniblock);
+            let error = client.get_balance(address, Some(number)).await.unwrap_err();
+            assert_pruned_block_error(&error, first_local_miniblock);
+            let error = client
+                .get_storage_at(address, 0.into(), Some(number))
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, 24);
+        }
+
+        store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?;
         drop(storage);
-        let events: Vec<_> = events.iter().collect();
 
-        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
-        assert_matches!(
-            lower_bound_logs,
-            FilterChanges::Hashes(hashes) if hashes.is_empty()
-        );
-        // ^ Since `FilterChanges` is serialized w/o a tag, an empty array will be deserialized
-        // as `Hashes(_)` (the first declared variant).
+        for number in [api::BlockNumber::Latest, first_local_miniblock.into()] {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let code = client.get_code(address, Some(number)).await?;
+            assert_eq!(code.0, b"code");
+            let balance = client.get_balance(address, Some(number)).await?;
+            assert_eq!(balance, 123.into());
+            let storage_value = client
+                .get_storage_at(address, 0.into(), Some(number))
+                .await?;
+            assert_eq!(storage_value, H256::repeat_byte(0xff));
+        }
+        Ok(())
+    }
+}
 
-        let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
-        let FilterChanges::Logs(upper_bound_logs) = upper_bound_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs);
-        };
-        assert_logs_match(&upper_bound_logs, &events);
-        let bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
-        let FilterChanges::Logs(bounded_logs) = bounded_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", bounded_logs);
-        };
-        assert_eq!(bounded_logs, upper_bound_logs);
+#[tokio::test]
+async fn storage_access_with_snapshot_recovery() {
+    test_http_server(StorageAccessWithSnapshotRecovery).await;
+}
+
+#[derive(Debug)]
+struct TransactionCountTest;
 
-        // Add another miniblock with events to the storage.
+#[async_trait]
+impl HttpTest for TransactionCountTest {
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let test_address = Address::repeat_byte(11);
         let mut storage = pool.access_storage().await?;
-        let (_, new_events) = store_events(&mut storage, 2, 4).await?;
-        drop(storage);
-        let new_events: Vec<_> = new_events.iter().collect();
+        let mut miniblock_number = MiniblockNumber(0);
+        for nonce in [0, 1] {
+            let mut committed_tx = create_l2_transaction(10, 200);
+            committed_tx.common_data.initiator_address = test_address;
+            committed_tx.common_data.nonce = Nonce(nonce);
+            miniblock_number += 1;
+            store_miniblock(
+                &mut storage,
+                miniblock_number,
+                &[execute_l2_transaction(committed_tx)],
+            )
+            .await?;
+            let nonce_log = StorageLog::new_write_log(
+                get_nonce_key(&test_address),
+                H256::from_low_u64_be((nonce + 1).into()),
+            );
+            storage
+                .storage_logs_dal()
+                .insert_storage_logs(miniblock_number, &[(H256::zero(), vec![nonce_log])])
+                .await;
+        }
 
-        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
-        let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs);
-        };
-        assert_logs_match(&lower_bound_logs, &new_events);
+        let pending_count = client.get_transaction_count(test_address, None).await?;
+        assert_eq!(pending_count, 2.into());
+
+        let mut pending_tx = create_l2_transaction(10, 200);
+        pending_tx.common_data.initiator_address = test_address;
+        pending_tx.common_data.nonce = Nonce(2);
+        storage
+            .transactions_dal()
+            .insert_transaction_l2(pending_tx, TransactionExecutionMetrics::default())
+            .await;
+
+        let pending_count = client.get_transaction_count(test_address, None).await?;
+        assert_eq!(pending_count, 3.into());
+
+        let latest_block_numbers = [api::BlockNumber::Latest, miniblock_number.0.into()];
+        for number in latest_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let latest_count = client
+                .get_transaction_count(test_address, Some(number))
+                .await?;
+            assert_eq!(latest_count, 2.into());
+        }
 
-        let new_upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
-        assert_matches!(new_upper_bound_logs, FilterChanges::Hashes(hashes) if hashes.is_empty());
-        let new_bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
-        assert_matches!(new_bounded_logs, FilterChanges::Hashes(hashes) if hashes.is_empty());
+        let earliest_block_numbers = [api::BlockNumber::Earliest, 0.into()];
+        for number in earliest_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let historic_count = client
+                .get_transaction_count(test_address, Some(number))
+                .await?;
+            assert_eq!(historic_count, 0.into());
+        }
 
-        // Add miniblock #3. It should not be picked up by the bounded and upper bound filters,
-        // and should be picked up by the lower bound filter.
-        let mut storage = pool.access_storage().await?;
-        let (_, new_events) = store_events(&mut storage, 3, 8).await?;
-        drop(storage);
-        let new_events: Vec<_> = new_events.iter().collect();
+        let number = api::BlockIdVariant::BlockNumber(1.into());
+        let historic_count = client
+            .get_transaction_count(test_address, Some(number))
+            .await?;
+        assert_eq!(historic_count, 1.into());
 
-        let bounded_logs = client.get_filter_changes(bounded_filter_id).await?;
-        let FilterChanges::Hashes(bounded_logs) = bounded_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", bounded_logs);
-        };
-        assert!(bounded_logs.is_empty());
+        let number = api::BlockIdVariant::BlockNumber(100.into());
+        let error = client
+            .get_transaction_count(test_address, Some(number))
+            .await
+            .unwrap_err();
+        if let ClientError::Call(error) = error {
+            assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        } else {
+            panic!("Unexpected error: {error:?}");
+        }
+        Ok(())
+    }
+}
 
-        let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?;
-        let FilterChanges::Hashes(upper_bound_logs) = upper_bound_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs);
-        };
-        assert!(upper_bound_logs.is_empty());
+#[tokio::test]
+async fn getting_transaction_count_for_account() {
+    test_http_server(TransactionCountTest).await;
+}
 
-        let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?;
-        let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else {
-            panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs);
-        };
-        assert_logs_match(&lower_bound_logs, &new_events);
+#[derive(Debug)]
+struct TransactionCountAfterSnapshotRecoveryTest;
+
+#[async_trait]
+impl HttpTest for TransactionCountAfterSnapshotRecoveryTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        let test_address = Address::repeat_byte(11);
+        let nonce_log =
+            StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(3));
+        StorageInitialization::Recovery {
+            logs: vec![nonce_log],
+            factory_deps: HashMap::new(),
+        }
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let test_address = Address::repeat_byte(11);
+        let pending_count = client.get_transaction_count(test_address, None).await?;
+        assert_eq!(pending_count, 3.into());
+
+        let mut pending_tx = create_l2_transaction(10, 200);
+        pending_tx.common_data.initiator_address = test_address;
+        pending_tx.common_data.nonce = Nonce(3);
+        let mut storage = pool.access_storage().await?;
+        storage
+            .transactions_dal()
+            .insert_transaction_l2(pending_tx, TransactionExecutionMetrics::default())
+            .await;
+
+        let pending_count = client.get_transaction_count(test_address, None).await?;
+        assert_eq!(pending_count, 4.into());
+
+        let pruned_block_numbers = [
+            api::BlockNumber::Earliest,
+            0.into(),
+            StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.into(),
+        ];
+        for number in pruned_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let error = client
+                .get_transaction_count(test_address, Some(number))
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1);
+        }
+
+        let latest_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1;
+        store_miniblock(&mut storage, MiniblockNumber(latest_miniblock_number), &[]).await?;
+
+        let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.into()];
+        for number in latest_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let latest_count = client
+                .get_transaction_count(test_address, Some(number))
+                .await?;
+            assert_eq!(latest_count, 3.into());
+        }
         Ok(())
     }
 }
 
 #[tokio::test]
-async fn log_filter_changes_with_block_boundaries() {
-    test_http_server(LogFilterChangesWithBlockBoundariesTest).await;
+async fn getting_transaction_count_for_account_after_snapshot_recovery() {
+    test_http_server(TransactionCountAfterSnapshotRecoveryTest).await;
 }
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs
index 70ad7d28fa25..1765a7c2397d 100644
--- a/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/tests/snapshots.rs
@@ -2,32 +2,9 @@
 
 use std::collections::HashSet;
 
-use zksync_types::block::BlockGasCount;
 use zksync_web3_decl::namespaces::SnapshotsNamespaceClient;
 
 use super::*;
-use crate::utils::testonly::{create_l1_batch, create_l1_batch_metadata};
-
-async fn seal_l1_batch(
-    storage: &mut StorageProcessor<'_>,
-    number: L1BatchNumber,
-) -> anyhow::Result<()> {
-    let header = create_l1_batch(number.0);
-    storage
-        .blocks_dal()
-        .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[], 0)
-        .await?;
-    storage
-        .blocks_dal()
-        .mark_miniblocks_as_executed_in_l1_batch(number)
-        .await?;
-    let metadata = create_l1_batch_metadata(number.0);
-    storage
-        .blocks_dal()
-        .save_l1_batch_metadata(number, &metadata, H256::zero(), false)
-        .await?;
-    Ok(())
-}
 
 #[derive(Debug)]
 struct SnapshotBasicsTest {
@@ -52,7 +29,12 @@ impl SnapshotBasicsTest {
 impl HttpTest for SnapshotBasicsTest {
     async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
         let mut storage = pool.access_storage().await.unwrap();
-        store_miniblock(&mut storage).await?;
+        store_miniblock(
+            &mut storage,
+            MiniblockNumber(1),
+            &[execute_l2_transaction(create_l2_transaction(1, 2))],
+        )
+        .await?;
         seal_l1_batch(&mut storage, L1BatchNumber(1)).await?;
         storage
             .snapshots_dal()
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs
new file mode 100644
index 000000000000..bc152a449ff9
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs
@@ -0,0 +1,237 @@
+//! Tests for the VM-instantiating methods (e.g., `eth_call`).
+
+// TODO: Test other VM methods (`debug_traceCall`, `eth_estimateGas`)
+
+use multivm::interface::ExecutionResult;
+use zksync_types::{
+    get_intrinsic_constants, transaction_request::CallRequest, L2ChainId, PackedEthSignature, U256,
+};
+use zksync_utils::u256_to_h256;
+
+use super::*;
+
+#[derive(Debug)]
+struct CallTest;
+
+impl CallTest {
+    fn call_request() -> CallRequest {
+        CallRequest {
+            from: Some(Address::repeat_byte(1)),
+            to: Some(Address::repeat_byte(2)),
+            data: Some(b"call".to_vec().into()),
+            ..CallRequest::default()
+        }
+    }
+}
+
+#[async_trait]
+impl HttpTest for CallTest {
+    fn transaction_executor(&self) -> MockTransactionExecutor {
+        let mut tx_executor = MockTransactionExecutor::default();
+        tx_executor.insert_call_response(
+            Self::call_request().data.unwrap().0,
+            ExecutionResult::Success {
+                output: b"output".to_vec(),
+            },
+        );
+        tx_executor
+    }
+
+    async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> {
+        let call_result = client.call(Self::call_request(), None).await?;
+        assert_eq!(call_result.0, b"output");
+
+        let valid_block_numbers = [
+            api::BlockNumber::Pending,
+            api::BlockNumber::Latest,
+            0.into(),
+        ];
+        for number in valid_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let call_result = client.call(Self::call_request(), Some(number)).await?;
+            assert_eq!(call_result.0, b"output");
+        }
+
+        let invalid_block_number = api::BlockNumber::from(100);
+        let number = api::BlockIdVariant::BlockNumber(invalid_block_number);
+        let error = client
+            .call(Self::call_request(), Some(number))
+            .await
+            .unwrap_err();
+        if let ClientError::Call(error) = error {
+            assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+        } else {
+            panic!("Unexpected error: {error:?}");
+        }
+
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn call_method_basics() {
+    test_http_server(CallTest).await;
+}
+
+#[derive(Debug)]
+struct CallTestAfterSnapshotRecovery;
+
+#[async_trait]
+impl HttpTest for CallTestAfterSnapshotRecovery {
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::empty_recovery()
+    }
+
+    fn transaction_executor(&self) -> MockTransactionExecutor {
+        CallTest.transaction_executor()
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        let call_result = client.call(CallTest::call_request(), None).await?;
+        assert_eq!(call_result.0, b"output");
+        let pending_block_number = api::BlockIdVariant::BlockNumber(api::BlockNumber::Pending);
+        let call_result = client
+            .call(CallTest::call_request(), Some(pending_block_number))
+            .await?;
+        assert_eq!(call_result.0, b"output");
+
+        let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1;
+        let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.into()];
+        for number in first_miniblock_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let error = client
+                .call(CallTest::call_request(), Some(number))
+                .await
+                .unwrap_err();
+            if let ClientError::Call(error) = error {
+                assert_eq!(error.code(), ErrorCode::InvalidParams.code());
+            } else {
+                panic!("Unexpected error: {error:?}");
+            }
+        }
+
+        let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK];
+        for number in pruned_block_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number.into());
+            let error = client
+                .call(CallTest::call_request(), Some(number))
+                .await
+                .unwrap_err();
+            assert_pruned_block_error(&error, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1);
+        }
+
+        let mut storage = pool.access_storage().await?;
+        store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?;
+        drop(storage);
+
+        for number in first_miniblock_numbers {
+            let number = api::BlockIdVariant::BlockNumber(number);
+            let call_result = client.call(CallTest::call_request(), Some(number)).await?;
+            assert_eq!(call_result.0, b"output");
+        }
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn call_method_after_snapshot_recovery() {
+    test_http_server(CallTestAfterSnapshotRecovery).await;
+}
+
+#[derive(Debug)]
+struct SendRawTransactionTest {
+    snapshot_recovery: bool,
+}
+
+impl SendRawTransactionTest {
+    fn transaction_bytes_and_hash() -> (Vec<u8>, H256) {
+        let private_key = H256::repeat_byte(11);
+        let address = PackedEthSignature::address_from_private_key(&private_key).unwrap();
+
+        let tx_request = api::TransactionRequest {
+            chain_id: Some(L2ChainId::default().as_u64()),
+            from: Some(address),
+            to: Some(Address::repeat_byte(2)),
+            value: 123_456.into(),
+            gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(),
+            gas_price: StateKeeperConfig::for_tests().fair_l2_gas_price.into(),
+            input: vec![1, 2, 3, 4].into(),
+            ..api::TransactionRequest::default()
+        };
+        let mut rlp = Default::default();
+        tx_request.rlp(&mut rlp, L2ChainId::default().as_u64(), None);
+        let data = rlp.out();
+        let signed_message = PackedEthSignature::message_to_signed_bytes(&data);
+        let signature = PackedEthSignature::sign_raw(&private_key, &signed_message).unwrap();
+
+        let mut rlp = Default::default();
+        tx_request.rlp(&mut rlp, L2ChainId::default().as_u64(), Some(&signature));
+        let data = rlp.out();
+        let (_, tx_hash) =
+            api::TransactionRequest::from_bytes(&data, L2ChainId::default()).unwrap();
+        (data.into(), tx_hash)
+    }
+
+    fn balance_storage_log() -> StorageLog {
+        let private_key = H256::repeat_byte(11);
+        let address = PackedEthSignature::address_from_private_key(&private_key).unwrap();
+        let balance_key = storage_key_for_eth_balance(&address);
+        StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64))
+    }
+}
+
+#[async_trait]
+impl HttpTest for SendRawTransactionTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        if self.snapshot_recovery {
+            let logs = vec![Self::balance_storage_log()];
+            StorageInitialization::Recovery {
+                logs,
+                factory_deps: HashMap::default(),
+            }
+        } else {
+            StorageInitialization::Genesis
+        }
+    }
+
+    fn transaction_executor(&self) -> MockTransactionExecutor {
+        let mut tx_executor = MockTransactionExecutor::default();
+        tx_executor.insert_tx_response(
+            Self::transaction_bytes_and_hash().1,
+            ExecutionResult::Success { output: vec![] },
+        );
+        tx_executor
+    }
+
+    async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> {
+        if !self.snapshot_recovery {
+            // Manually set sufficient balance for the transaction account.
+            let mut storage = pool.access_storage().await?;
+            storage
+                .storage_dal()
+                .apply_storage_logs(&[(H256::zero(), vec![Self::balance_storage_log()])])
+                .await;
+        }
+
+        let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash();
+        let send_result = client.send_raw_transaction(tx_bytes.into()).await?;
+        assert_eq!(send_result, tx_hash);
+        Ok(())
+    }
+}
+
+#[tokio::test]
+async fn send_raw_transaction_basics() {
+    test_http_server(SendRawTransactionTest {
+        snapshot_recovery: false,
+    })
+    .await;
+}
+
+#[tokio::test]
+async fn send_raw_transaction_after_snapshot_recovery() {
+    test_http_server(SendRawTransactionTest {
+        snapshot_recovery: true,
+    })
+    .await;
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs
index 509b1e194e7f..0a82c3d0f216 100644
--- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs
+++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs
@@ -44,9 +44,9 @@ async fn wait_for_subscription(
 }
 
 #[allow(clippy::needless_pass_by_ref_mut)] // false positive
-async fn wait_for_notifier(
+async fn wait_for_notifiers(
     events: &mut mpsc::UnboundedReceiver<PubSubEvent>,
-    sub_type: SubscriptionType,
+    sub_types: &[SubscriptionType],
 ) {
     let wait_future = tokio::time::timeout(TEST_TIMEOUT, async {
         loop {
@@ -54,18 +54,67 @@ async fn wait_for_notifier(
                 .recv()
                 .await
                 .expect("Events emitter unexpectedly dropped");
-            if matches!(event, PubSubEvent::NotifyIterationFinished(ty) if ty == sub_type) {
+            if matches!(event, PubSubEvent::NotifyIterationFinished(ty) if sub_types.contains(&ty))
+            {
                 break;
             } else {
                 tracing::trace!(?event, "Skipping event");
             }
         }
     });
-    wait_future.await.expect("Timed out waiting for notifier")
+    wait_future.await.expect("Timed out waiting for notifier");
+}
+
+#[tokio::test]
+async fn notifiers_start_after_snapshot_recovery() {
+    let pool = ConnectionPool::test_pool().await;
+    let mut storage = pool.access_storage().await.unwrap();
+    prepare_empty_recovery_snapshot(&mut storage, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK)
+        .await;
+
+    let (stop_sender, stop_receiver) = watch::channel(false);
+    let (events_sender, mut events_receiver) = mpsc::unbounded_channel();
+    let mut subscribe_logic = EthSubscribe::new();
+    subscribe_logic.set_events_sender(events_sender);
+    let notifier_handles =
+        subscribe_logic.spawn_notifiers(pool.clone(), POLL_INTERVAL, stop_receiver);
+    assert!(!notifier_handles.is_empty());
+
+    // Wait a little doing nothing and check that notifier tasks are still active (i.e., have not panicked).
+    tokio::time::sleep(POLL_INTERVAL).await;
+    for handle in &notifier_handles {
+        assert!(!handle.is_finished());
+    }
+
+    // Emulate creating the first miniblock; check that notifiers react to it.
+    let first_local_miniblock = MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1);
+    store_miniblock(&mut storage, first_local_miniblock, &[])
+        .await
+        .unwrap();
+
+    wait_for_notifiers(
+        &mut events_receiver,
+        &[
+            SubscriptionType::Blocks,
+            SubscriptionType::Txs,
+            SubscriptionType::Logs,
+        ],
+    )
+    .await;
+
+    stop_sender.send_replace(true);
+    for handle in notifier_handles {
+        handle.await.unwrap().expect("Notifier task failed");
+    }
 }
 
 #[async_trait]
-trait WsTest {
+trait WsTest: Send + Sync {
+    /// Prepares the storage before the server is started. The default implementation performs genesis.
+    fn storage_initialization(&self) -> StorageInitialization {
+        StorageInitialization::Genesis
+    }
+
     async fn test(
         &self,
         client: &WsClient,
@@ -82,15 +131,10 @@ async fn test_ws_server(test: impl WsTest) {
     let pool = ConnectionPool::test_pool().await;
     let network_config = NetworkConfig::for_tests();
     let mut storage = pool.access_storage().await.unwrap();
-    if storage.blocks_dal().is_genesis_needed().await.unwrap() {
-        ensure_genesis_state(
-            &mut storage,
-            network_config.zksync_network_id,
-            &GenesisParams::mock(),
-        )
+    test.storage_initialization()
+        .prepare_storage(&network_config, &mut storage)
         .await
-        .unwrap();
-    }
+        .expect("Failed preparing storage for test");
     drop(storage);
 
     let (stop_sender, stop_receiver) = watch::channel(false);
@@ -145,10 +189,20 @@ async fn ws_server_can_start() {
 }
 
 #[derive(Debug)]
-struct BasicSubscriptionsTest;
+struct BasicSubscriptionsTest {
+    snapshot_recovery: bool,
+}
 
 #[async_trait]
 impl WsTest for BasicSubscriptionsTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        if self.snapshot_recovery {
+            StorageInitialization::empty_recovery()
+        } else {
+            StorageInitialization::Genesis
+        }
+    }
+
     async fn test(
         &self,
         client: &WsClient,
@@ -157,8 +211,11 @@ impl WsTest for BasicSubscriptionsTest {
     ) -> anyhow::Result<()> {
         // Wait for the notifiers to get initialized so that they don't skip notifications
         // for the created subscriptions.
-        wait_for_notifier(&mut pub_sub_events, SubscriptionType::Blocks).await;
-        wait_for_notifier(&mut pub_sub_events, SubscriptionType::Txs).await;
+        wait_for_notifiers(
+            &mut pub_sub_events,
+            &[SubscriptionType::Blocks, SubscriptionType::Txs],
+        )
+        .await;
 
         let params = rpc_params!["newHeads"];
         let mut blocks_subscription = client
@@ -172,8 +229,16 @@ impl WsTest for BasicSubscriptionsTest {
             .await?;
         wait_for_subscription(&mut pub_sub_events, SubscriptionType::Txs).await;
 
-        let (new_miniblock, new_tx_hash) =
-            store_miniblock(&mut pool.access_storage().await?).await?;
+        let mut storage = pool.access_storage().await?;
+        let tx_result = execute_l2_transaction(create_l2_transaction(1, 2));
+        let new_tx_hash = tx_result.hash;
+        let miniblock_number = MiniblockNumber(if self.snapshot_recovery {
+            StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1
+        } else {
+            1
+        });
+        let new_miniblock = store_miniblock(&mut storage, miniblock_number, &[tx_result]).await?;
+        drop(storage);
 
         let received_tx_hash = tokio::time::timeout(TEST_TIMEOUT, txs_subscription.next())
             .await
@@ -182,11 +247,17 @@ impl WsTest for BasicSubscriptionsTest {
         assert_eq!(received_tx_hash, new_tx_hash);
         let received_block_header = tokio::time::timeout(TEST_TIMEOUT, blocks_subscription.next())
             .await
-            .context("Timed out waiting for new block hash")?
+            .context("Timed out waiting for new block header")?
             .context("New blocks subscription terminated")??;
-        assert_eq!(received_block_header.number, Some(1.into()));
+        assert_eq!(
+            received_block_header.number,
+            Some(new_miniblock.number.0.into())
+        );
         assert_eq!(received_block_header.hash, Some(new_miniblock.hash));
-        assert_eq!(received_block_header.timestamp, 1.into());
+        assert_eq!(
+            received_block_header.timestamp,
+            new_miniblock.timestamp.into()
+        );
         blocks_subscription.unsubscribe().await?;
         Ok(())
     }
@@ -194,27 +265,40 @@ impl WsTest for BasicSubscriptionsTest {
 
 #[tokio::test]
 async fn basic_subscriptions() {
-    test_ws_server(BasicSubscriptionsTest).await;
+    test_ws_server(BasicSubscriptionsTest {
+        snapshot_recovery: false,
+    })
+    .await;
+}
+
+#[tokio::test]
+async fn basic_subscriptions_after_snapshot_recovery() {
+    test_ws_server(BasicSubscriptionsTest {
+        snapshot_recovery: true,
+    })
+    .await;
 }
 
 #[derive(Debug)]
-struct LogSubscriptionsTest;
+struct LogSubscriptionsTest {
+    snapshot_recovery: bool,
+}
 
 #[derive(Debug)]
-struct Subscriptions {
+struct LogSubscriptions {
     all_logs_subscription: Subscription<api::Log>,
     address_subscription: Subscription<api::Log>,
     topic_subscription: Subscription<api::Log>,
 }
 
-impl Subscriptions {
+impl LogSubscriptions {
     async fn new(
         client: &WsClient,
         pub_sub_events: &mut mpsc::UnboundedReceiver<PubSubEvent>,
     ) -> anyhow::Result<Self> {
         // Wait for the notifier to get initialized so that it doesn't skip notifications
         // for the created subscriptions.
-        wait_for_notifier(pub_sub_events, SubscriptionType::Logs).await;
+        wait_for_notifiers(pub_sub_events, &[SubscriptionType::Logs]).await;
 
         let params = rpc_params!["logs"];
         let all_logs_subscription = client
@@ -250,20 +334,33 @@ impl Subscriptions {
 
 #[async_trait]
 impl WsTest for LogSubscriptionsTest {
+    fn storage_initialization(&self) -> StorageInitialization {
+        if self.snapshot_recovery {
+            StorageInitialization::empty_recovery()
+        } else {
+            StorageInitialization::Genesis
+        }
+    }
+
     async fn test(
         &self,
         client: &WsClient,
         pool: &ConnectionPool,
         mut pub_sub_events: mpsc::UnboundedReceiver<PubSubEvent>,
     ) -> anyhow::Result<()> {
-        let Subscriptions {
+        let LogSubscriptions {
             mut all_logs_subscription,
             mut address_subscription,
             mut topic_subscription,
-        } = Subscriptions::new(client, &mut pub_sub_events).await?;
+        } = LogSubscriptions::new(client, &mut pub_sub_events).await?;
 
         let mut storage = pool.access_storage().await?;
-        let (tx_location, events) = store_events(&mut storage, 1, 0).await?;
+        let miniblock_number = if self.snapshot_recovery {
+            StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1
+        } else {
+            1
+        };
+        let (tx_location, events) = store_events(&mut storage, miniblock_number, 0).await?;
         drop(storage);
         let events: Vec<_> = events.iter().collect();
 
@@ -272,7 +369,7 @@ impl WsTest for LogSubscriptionsTest {
             assert_eq!(log.transaction_index, Some(0.into()));
             assert_eq!(log.log_index, Some(i.into()));
             assert_eq!(log.transaction_hash, Some(tx_location.tx_hash));
-            assert_eq!(log.block_number, Some(1.into()));
+            assert_eq!(log.block_number, Some(miniblock_number.into()));
         }
         assert_logs_match(&all_logs, &events);
 
@@ -282,7 +379,7 @@ impl WsTest for LogSubscriptionsTest {
         let topic_logs = collect_logs(&mut topic_subscription, 2).await?;
         assert_logs_match(&topic_logs, &[events[1], events[3]]);
 
-        wait_for_notifier(&mut pub_sub_events, SubscriptionType::Logs).await;
+        wait_for_notifiers(&mut pub_sub_events, &[SubscriptionType::Logs]).await;
 
         // Check that no new notifications were sent to subscribers.
         tokio::time::timeout(POLL_INTERVAL, all_logs_subscription.next())
@@ -315,7 +412,18 @@ async fn collect_logs(
 
 #[tokio::test]
 async fn log_subscriptions() {
-    test_ws_server(LogSubscriptionsTest).await;
+    test_ws_server(LogSubscriptionsTest {
+        snapshot_recovery: false,
+    })
+    .await;
+}
+
+#[tokio::test]
+async fn log_subscriptions_after_snapshot_recovery() {
+    test_ws_server(LogSubscriptionsTest {
+        snapshot_recovery: true,
+    })
+    .await;
 }
 
 #[derive(Debug)]
@@ -329,11 +437,11 @@ impl WsTest for LogSubscriptionsWithNewBlockTest {
         pool: &ConnectionPool,
         mut pub_sub_events: mpsc::UnboundedReceiver<PubSubEvent>,
     ) -> anyhow::Result<()> {
-        let Subscriptions {
+        let LogSubscriptions {
             mut all_logs_subscription,
             mut address_subscription,
             ..
-        } = Subscriptions::new(client, &mut pub_sub_events).await?;
+        } = LogSubscriptions::new(client, &mut pub_sub_events).await?;
 
         let mut storage = pool.access_storage().await?;
         let (_, events) = store_events(&mut storage, 1, 0).await?;
@@ -377,11 +485,11 @@ impl WsTest for LogSubscriptionsWithManyBlocksTest {
         pool: &ConnectionPool,
         mut pub_sub_events: mpsc::UnboundedReceiver<PubSubEvent>,
     ) -> anyhow::Result<()> {
-        let Subscriptions {
+        let LogSubscriptions {
             mut all_logs_subscription,
             mut address_subscription,
             ..
-        } = Subscriptions::new(client, &mut pub_sub_events).await?;
+        } = LogSubscriptions::new(client, &mut pub_sub_events).await?;
 
         // Add two blocks in the storage atomically.
         let mut storage = pool.access_storage().await?;
@@ -431,7 +539,7 @@ impl WsTest for LogSubscriptionsWithDelayTest {
         while pub_sub_events.try_recv().is_ok() {
             // Drain all existing pub-sub events.
         }
-        wait_for_notifier(&mut pub_sub_events, SubscriptionType::Logs).await;
+        wait_for_notifiers(&mut pub_sub_events, &[SubscriptionType::Logs]).await;
 
         let params = rpc_params!["logs"];
         let mut all_logs_subscription = client
diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs
index e0d8db17574c..5631574fa55f 100644
--- a/core/lib/zksync_core/src/consensus/storage/mod.rs
+++ b/core/lib/zksync_core/src/consensus/storage/mod.rs
@@ -46,7 +46,8 @@ impl<'a> CtxStorage<'a> {
         let number = ctx
             .wait(self.0.blocks_dal().get_sealed_miniblock_number())
             .await?
-            .context("sqlx")?;
+            .context("sqlx")?
+            .context("no miniblocks in storage")?; // FIXME (PLA-703): handle empty storage
         Ok(validator::BlockNumber(number.0.into()))
     }
 
diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs
index 83d68a812d4b..8de75757f335 100644
--- a/core/lib/zksync_core/src/consensus/testonly.rs
+++ b/core/lib/zksync_core/src/consensus/testonly.rs
@@ -297,6 +297,7 @@ impl StateKeeper {
     // Wait for all pushed miniblocks to be produced.
     pub async fn wait_for_miniblocks(&self, ctx: &ctx::Ctx) -> ctx::Result<()> {
         const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100);
+
         loop {
             let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?;
             if storage
diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs
index 09f80d80b5e1..180894f2fc7d 100644
--- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs
+++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs
@@ -12,11 +12,7 @@ use zksync_config::configs::{
 };
 use zksync_health_check::{CheckHealth, ReactiveHealthCheck};
 use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction};
-use zksync_types::{
-    block::{L1BatchHeader, MiniblockHeader},
-    fee_model::BatchFeeInput,
-    L1BatchNumber, L2ChainId, ProtocolVersion, ProtocolVersionId, StorageLog,
-};
+use zksync_types::{L1BatchNumber, L2ChainId, StorageLog};
 use zksync_utils::h256_to_u256;
 
 use super::*;
@@ -30,6 +26,7 @@ use crate::{
         },
         MetadataCalculator, MetadataCalculatorConfig,
     },
+    utils::testonly::prepare_recovery_snapshot,
 };
 
 #[test]
@@ -103,7 +100,7 @@ async fn create_tree_recovery(path: PathBuf, l1_batch: L1BatchNumber) -> AsyncTr
 async fn basic_recovery_workflow() {
     let pool = ConnectionPool::test_pool().await;
     let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB");
-    let snapshot_recovery = prepare_recovery_snapshot(&pool, &temp_dir).await;
+    let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await;
     let snapshot = SnapshotParameters::new(&pool, &snapshot_recovery)
         .await
         .unwrap();
@@ -134,7 +131,7 @@ async fn basic_recovery_workflow() {
     }
 }
 
-async fn prepare_recovery_snapshot(
+async fn prepare_recovery_snapshot_with_genesis(
     pool: &ConnectionPool,
     temp_dir: &TempDir,
 ) -> SnapshotRecoveryStatus {
@@ -213,7 +210,7 @@ impl HandleRecoveryEvent for TestEventListener {
 async fn recovery_fault_tolerance(chunk_count: usize) {
     let pool = ConnectionPool::test_pool().await;
     let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB");
-    let snapshot_recovery = prepare_recovery_snapshot(&pool, &temp_dir).await;
+    let snapshot_recovery = prepare_recovery_snapshot_with_genesis(&pool, &temp_dir).await;
 
     let tree_path = temp_dir.path().join("recovery");
     let tree = create_tree_recovery(tree_path.clone(), L1BatchNumber(1)).await;
@@ -283,7 +280,7 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) {
     // Emulate the recovered view of Postgres. Unlike with previous tests, we don't perform genesis.
     let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap();
     let mut storage = pool.access_storage().await.unwrap();
-    let snapshot_recovery = prepare_clean_recovery_snapshot(&mut storage, &snapshot_logs).await;
+    let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &snapshot_logs).await;
 
     let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB");
     let merkle_tree_config = MerkleTreeConfig {
@@ -355,76 +352,3 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) {
     stop_sender.send_replace(true);
     calculator_task.await.expect("calculator panicked").unwrap();
 }
-
-/// Prepares a recovery snapshot without performing genesis.
-async fn prepare_clean_recovery_snapshot(
-    storage: &mut StorageProcessor<'_>,
-    snapshot_logs: &[StorageLog],
-) -> SnapshotRecoveryStatus {
-    let written_keys: Vec<_> = snapshot_logs.iter().map(|log| log.key).collect();
-    let tree_instructions: Vec<_> = snapshot_logs
-        .iter()
-        .enumerate()
-        .map(|(i, log)| TreeInstruction::write(log.key, i as u64 + 1, log.value))
-        .collect();
-    let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash;
-
-    storage
-        .protocol_versions_dal()
-        .save_protocol_version_with_tx(ProtocolVersion::default())
-        .await;
-    // TODO (PLA-596): Don't insert L1 batches / miniblocks once the relevant foreign keys are removed
-    let miniblock = MiniblockHeader {
-        number: MiniblockNumber(23),
-        timestamp: 23,
-        hash: H256::zero(),
-        l1_tx_count: 0,
-        l2_tx_count: 0,
-        base_fee_per_gas: 100,
-        batch_fee_input: BatchFeeInput::l1_pegged(100, 100),
-        base_system_contracts_hashes: Default::default(),
-        protocol_version: Some(ProtocolVersionId::latest()),
-        virtual_blocks: 0,
-    };
-    storage
-        .blocks_dal()
-        .insert_miniblock(&miniblock)
-        .await
-        .unwrap();
-    let l1_batch = L1BatchHeader::new(
-        L1BatchNumber(23),
-        23,
-        Default::default(),
-        Default::default(),
-        ProtocolVersionId::latest(),
-    );
-    storage
-        .blocks_dal()
-        .insert_l1_batch(&l1_batch, &[], Default::default(), &[], &[], 0)
-        .await
-        .unwrap();
-
-    storage
-        .storage_logs_dedup_dal()
-        .insert_initial_writes(l1_batch.number, &written_keys)
-        .await;
-    storage
-        .storage_logs_dal()
-        .insert_storage_logs(miniblock.number, &[(H256::zero(), snapshot_logs.to_vec())])
-        .await;
-
-    let snapshot_recovery = SnapshotRecoveryStatus {
-        l1_batch_number: l1_batch.number,
-        l1_batch_root_hash,
-        miniblock_number: miniblock.number,
-        miniblock_root_hash: H256::zero(), // not used
-        last_finished_chunk_id: None,
-        total_chunk_count: 100,
-    };
-    storage
-        .snapshot_recovery_dal()
-        .set_applied_snapshot_status(&snapshot_recovery)
-        .await
-        .unwrap();
-    snapshot_recovery
-}
diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs
index 106202389ad3..c399ed4c488b 100644
--- a/core/lib/zksync_core/src/reorg_detector/mod.rs
+++ b/core/lib/zksync_core/src/reorg_detector/mod.rs
@@ -254,8 +254,11 @@ impl ReorgDetector {
                 .get_last_l1_batch_number_with_metadata()
                 .await?
                 .context("L1 batches table unexpectedly emptied")?;
-            let sealed_miniblock_number =
-                storage.blocks_dal().get_sealed_miniblock_number().await?;
+            let sealed_miniblock_number = storage
+                .blocks_dal()
+                .get_sealed_miniblock_number()
+                .await?
+                .context("miniblocks table unexpectedly emptied")?;
             drop(storage);
 
             tracing::trace!(
diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs
index 20af1b9b221d..9ba6ff0ac9fd 100644
--- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs
+++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs
@@ -426,7 +426,8 @@ impl MempoolIO {
             .blocks_dal()
             .get_sealed_miniblock_number()
             .await
-            .unwrap();
+            .unwrap()
+            .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage
 
         drop(storage);
 
diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs
index 52a5f26dcfd3..af7e39122f8a 100644
--- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs
+++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs
@@ -394,14 +394,14 @@ async fn test_miniblock_and_l1_batch_processing(
             .get_sealed_miniblock_number()
             .await
             .unwrap(),
-        MiniblockNumber(2) // + fictive miniblock
+        Some(MiniblockNumber(2)) // + fictive miniblock
     );
     let l1_batch_header = conn
         .blocks_dal()
         .get_l1_batch_header(L1BatchNumber(1))
         .await
         .unwrap()
-        .unwrap();
+        .expect("No L1 batch #1");
     assert_eq!(l1_batch_header.l2_tx_count, 1);
     assert!(l1_batch_header.is_finished);
 }
diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs
index 0b5bef237b6e..5c09cba7ad75 100644
--- a/core/lib/zksync_core/src/sync_layer/external_io.rs
+++ b/core/lib/zksync_core/src/sync_layer/external_io.rs
@@ -82,7 +82,8 @@ impl ExternalIO {
             .blocks_dal()
             .get_sealed_miniblock_number()
             .await
-            .unwrap();
+            .unwrap()
+            .expect("empty storage not supported"); // FIXME (PLA-703): handle empty storage
         drop(storage);
 
         tracing::info!(
diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs
index 35de5e597df1..848ce625ad70 100644
--- a/core/lib/zksync_core/src/sync_layer/tests.rs
+++ b/core/lib/zksync_core/src/sync_layer/tests.rs
@@ -489,8 +489,13 @@ async fn fetcher_with_real_server() {
     // Start the API server.
     let network_config = NetworkConfig::for_tests();
     let (stop_sender, stop_receiver) = watch::channel(false);
-    let server_handles =
-        spawn_http_server(&network_config, pool.clone(), stop_receiver.clone()).await;
+    let server_handles = spawn_http_server(
+        &network_config,
+        pool.clone(),
+        Default::default(),
+        stop_receiver.clone(),
+    )
+    .await;
     server_handles.wait_until_ready().await;
     let server_addr = &server_handles.local_addr;
 
diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs
index c84754f7cd30..e6f50de91742 100644
--- a/core/lib/zksync_core/src/utils/testonly.rs
+++ b/core/lib/zksync_core/src/utils/testonly.rs
@@ -2,6 +2,7 @@
 
 use zksync_contracts::BaseSystemContractsHashes;
 use zksync_dal::StorageProcessor;
+use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction};
 use zksync_system_constants::ZKPORTER_IS_AVAILABLE;
 use zksync_types::{
     block::{L1BatchHeader, MiniblockHeader},
@@ -12,9 +13,11 @@ use zksync_types::{
     snapshots::SnapshotRecoveryStatus,
     transaction_request::PaymasterParams,
     Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersion, ProtocolVersionId,
-    H256, U256,
+    StorageLog, H256, U256,
 };
 
+use crate::l1_gas_price::L1GasPriceProvider;
+
 /// Creates a miniblock header with the specified number and deterministic contents.
 pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader {
     MiniblockHeader {
@@ -96,6 +99,71 @@ pub(crate) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L
     tx
 }
 
+/// Prepares a recovery snapshot without performing genesis.
+pub(crate) async fn prepare_recovery_snapshot(
+    storage: &mut StorageProcessor<'_>,
+    l1_batch_number: u32,
+    snapshot_logs: &[StorageLog],
+) -> SnapshotRecoveryStatus {
+    let mut storage = storage.start_transaction().await.unwrap();
+
+    let written_keys: Vec<_> = snapshot_logs.iter().map(|log| log.key).collect();
+    let tree_instructions: Vec<_> = snapshot_logs
+        .iter()
+        .enumerate()
+        .map(|(i, log)| TreeInstruction::write(log.key, i as u64 + 1, log.value))
+        .collect();
+    let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash;
+
+    storage
+        .protocol_versions_dal()
+        .save_protocol_version_with_tx(ProtocolVersion::default())
+        .await;
+    // TODO (PLA-596): Don't insert L1 batches / miniblocks once the relevant foreign keys are removed
+    let miniblock = create_miniblock(l1_batch_number);
+    storage
+        .blocks_dal()
+        .insert_miniblock(&miniblock)
+        .await
+        .unwrap();
+    let l1_batch = create_l1_batch(l1_batch_number);
+    storage
+        .blocks_dal()
+        .insert_l1_batch(&l1_batch, &[], Default::default(), &[], &[], 0)
+        .await
+        .unwrap();
+
+    storage
+        .storage_logs_dedup_dal()
+        .insert_initial_writes(l1_batch.number, &written_keys)
+        .await;
+    storage
+        .storage_logs_dal()
+        .insert_storage_logs(miniblock.number, &[(H256::zero(), snapshot_logs.to_vec())])
+        .await;
+    storage
+        .storage_dal()
+        .apply_storage_logs(&[(H256::zero(), snapshot_logs.to_vec())])
+        .await;
+
+    let snapshot_recovery = SnapshotRecoveryStatus {
+        l1_batch_number: l1_batch.number,
+        l1_batch_root_hash,
+        miniblock_number: miniblock.number,
+        miniblock_root_hash: H256::zero(), // not used
+        last_finished_chunk_id: None,
+        total_chunk_count: 100,
+    };
+    storage
+        .snapshot_recovery_dal()
+        .set_applied_snapshot_status(&snapshot_recovery)
+        .await
+        .unwrap();
+    storage.commit().await.unwrap();
+    snapshot_recovery
+}
+
+// TODO (PLA-596): Replace with `prepare_recovery_snapshot(.., &[])`
 pub(crate) async fn prepare_empty_recovery_snapshot(
     storage: &mut StorageProcessor<'_>,
     l1_batch_number: u32,
@@ -120,3 +188,17 @@ pub(crate) async fn prepare_empty_recovery_snapshot(
         .unwrap();
     snapshot_recovery
 }
+
+/// Mock [`L1GasPriceProvider`] that returns a constant value.
+#[derive(Debug)]
+pub(crate) struct MockL1GasPriceProvider(pub u64);
+
+impl L1GasPriceProvider for MockL1GasPriceProvider {
+    fn estimate_effective_gas_price(&self) -> u64 {
+        self.0
+    }
+
+    fn estimate_effective_pubdata_price(&self) -> u64 {
+        self.0 * u64::from(zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE)
+    }
+}
diff --git a/prover/Cargo.lock b/prover/Cargo.lock
index 1ed73bdb4d1d..edd6e8280b77 100644
--- a/prover/Cargo.lock
+++ b/prover/Cargo.lock
@@ -89,6 +89,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
 dependencies = [
  "cfg-if 1.0.0",
+ "getrandom 0.2.11",
  "once_cell",
  "version_check",
  "zerocopy",
@@ -103,6 +104,12 @@ dependencies = [
  "memchr",
 ]
 
+[[package]]
+name = "allocator-api2"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
+
 [[package]]
 name = "android-tzdata"
 version = "0.1.1"
@@ -228,13 +235,23 @@ dependencies = [
 
 [[package]]
 name = "atoi"
-version = "0.4.0"
+version = "2.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "616896e05fc0e2649463a93a15183c6a16bf03413a7af88ef1285ddedfa9cda5"
+checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528"
 dependencies = [
  "num-traits",
 ]
 
+[[package]]
+name = "atomic-write-file"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436"
+dependencies = [
+ "nix",
+ "rand 0.8.5",
+]
+
 [[package]]
 name = "atty"
 version = "0.2.14"
@@ -354,11 +371,11 @@ dependencies = [
 
 [[package]]
 name = "bigdecimal"
-version = "0.2.2"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d1e50562e37200edf7c6c43e54a08e64a5553bfb59d9c297d5572512aa517256"
+checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa"
 dependencies = [
- "num-bigint 0.3.3",
+ "num-bigint 0.4.4",
  "num-integer",
  "num-traits",
  "serde",
@@ -446,6 +463,9 @@ name = "bitflags"
 version = "2.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
+dependencies = [
+ "serde",
+]
 
 [[package]]
 name = "bitvec"
@@ -633,6 +653,30 @@ dependencies = [
  "lazy_static",
 ]
 
+[[package]]
+name = "borsh"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028"
+dependencies = [
+ "borsh-derive",
+ "cfg_aliases",
+]
+
+[[package]]
+name = "borsh-derive"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0"
+dependencies = [
+ "once_cell",
+ "proc-macro-crate 2.0.1",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 2.0.39",
+ "syn_derive",
+]
+
 [[package]]
 name = "bumpalo"
 version = "3.14.0"
@@ -645,6 +689,28 @@ version = "1.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c"
 
+[[package]]
+name = "bytecheck"
+version = "0.6.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627"
+dependencies = [
+ "bytecheck_derive",
+ "ptr_meta",
+ "simdutf8",
+]
+
+[[package]]
+name = "bytecheck_derive"
+version = "0.6.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61"
+dependencies = [
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
 [[package]]
 name = "bytecount"
 version = "0.6.7"
@@ -741,6 +807,12 @@ version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 
+[[package]]
+name = "cfg_aliases"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
 [[package]]
 name = "chrono"
 version = "0.4.31"
@@ -1003,18 +1075,18 @@ dependencies = [
 
 [[package]]
 name = "crc"
-version = "2.1.0"
+version = "3.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23"
+checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
 dependencies = [
  "crc-catalog",
 ]
 
 [[package]]
 name = "crc-catalog"
-version = "1.1.1"
+version = "2.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403"
+checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
 
 [[package]]
 name = "crc32fast"
@@ -1392,7 +1464,7 @@ dependencies = [
  "hashbrown 0.14.2",
  "lock_api",
  "once_cell",
- "parking_lot_core 0.9.9",
+ "parking_lot_core",
 ]
 
 [[package]]
@@ -1476,35 +1548,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
 dependencies = [
  "block-buffer 0.10.4",
+ "const-oid",
  "crypto-common",
  "subtle",
 ]
 
 [[package]]
-name = "dirs"
-version = "4.0.0"
+name = "dotenvy"
+version = "0.15.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059"
-dependencies = [
- "dirs-sys",
-]
-
-[[package]]
-name = "dirs-sys"
-version = "0.3.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6"
-dependencies = [
- "libc",
- "redox_users",
- "winapi",
-]
-
-[[package]]
-name = "dotenv"
-version = "0.15.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
+checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
 
 [[package]]
 name = "dtoa"
@@ -1656,6 +1709,17 @@ dependencies = [
  "version_check",
 ]
 
+[[package]]
+name = "etcetera"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943"
+dependencies = [
+ "cfg-if 1.0.0",
+ "home",
+ "windows-sys",
+]
+
 [[package]]
 name = "ethabi"
 version = "18.0.0"
@@ -1847,6 +1911,17 @@ dependencies = [
  "miniz_oxide",
 ]
 
+[[package]]
+name = "flume"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "spin 0.9.8",
+]
+
 [[package]]
 name = "fnv"
 version = "1.0.7"
@@ -2008,13 +2083,13 @@ dependencies = [
 
 [[package]]
 name = "futures-intrusive"
-version = "0.4.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5"
+checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f"
 dependencies = [
  "futures-core",
  "lock_api",
- "parking_lot 0.11.2",
+ "parking_lot",
 ]
 
 [[package]]
@@ -2240,19 +2315,13 @@ dependencies = [
 
 [[package]]
 name = "hashbrown"
-version = "0.11.2"
+version = "0.12.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
 dependencies = [
  "ahash 0.7.7",
 ]
 
-[[package]]
-name = "hashbrown"
-version = "0.12.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
-
 [[package]]
 name = "hashbrown"
 version = "0.13.1"
@@ -2267,14 +2336,18 @@ name = "hashbrown"
 version = "0.14.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"
+dependencies = [
+ "ahash 0.8.6",
+ "allocator-api2",
+]
 
 [[package]]
 name = "hashlink"
-version = "0.7.0"
+version = "0.8.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf"
+checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7"
 dependencies = [
- "hashbrown 0.11.2",
+ "hashbrown 0.14.2",
 ]
 
 [[package]]
@@ -2594,15 +2667,6 @@ dependencies = [
  "hashbrown 0.14.2",
 ]
 
-[[package]]
-name = "instant"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
-dependencies = [
- "cfg-if 1.0.0",
-]
-
 [[package]]
 name = "ipnet"
 version = "2.9.0"
@@ -2611,9 +2675,12 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3"
 
 [[package]]
 name = "ipnetwork"
-version = "0.17.0"
+version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b"
+checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e"
+dependencies = [
+ "serde",
+]
 
 [[package]]
 name = "is-terminal"
@@ -2644,6 +2711,15 @@ dependencies = [
  "either",
 ]
 
+[[package]]
+name = "itertools"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0"
+dependencies = [
+ "either",
+]
+
 [[package]]
 name = "itoa"
 version = "1.0.9"
@@ -2714,6 +2790,9 @@ name = "lazy_static"
 version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+dependencies = [
+ "spin 0.5.2",
+]
 
 [[package]]
 name = "lazycell"
@@ -2743,17 +2822,6 @@ version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058"
 
-[[package]]
-name = "libredox"
-version = "0.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8"
-dependencies = [
- "bitflags 2.4.1",
- "libc",
- "redox_syscall 0.4.1",
-]
-
 [[package]]
 name = "librocksdb-sys"
 version = "0.11.0+8.1.1"
@@ -2768,6 +2836,17 @@ dependencies = [
  "libz-sys",
 ]
 
+[[package]]
+name = "libsqlite3-sys"
+version = "0.27.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf4e226dcd58b4be396f7bd3c20da8fdee2911400705297ba7d2d7cc2c30f716"
+dependencies = [
+ "cc",
+ "pkg-config",
+ "vcpkg",
+]
+
 [[package]]
 name = "libz-sys"
 version = "1.1.12"
@@ -3084,6 +3163,7 @@ dependencies = [
  "zk_evm 1.3.1",
  "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)",
  "zk_evm 1.4.0",
+ "zkevm_test_harness 1.4.0",
  "zksync_contracts",
  "zksync_state",
  "zksync_system_constants",
@@ -3208,7 +3288,6 @@ dependencies = [
  "autocfg 1.1.0",
  "num-integer",
  "num-traits",
- "serde",
 ]
 
 [[package]]
@@ -3223,6 +3302,23 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "num-bigint-dig"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
+dependencies = [
+ "byteorder",
+ "lazy_static",
+ "libm",
+ "num-integer",
+ "num-iter",
+ "num-traits",
+ "rand 0.8.5",
+ "smallvec",
+ "zeroize",
+]
+
 [[package]]
 name = "num-complex"
 version = "0.3.1"
@@ -3230,7 +3326,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5"
 dependencies = [
  "num-traits",
- "serde",
 ]
 
 [[package]]
@@ -3240,6 +3335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214"
 dependencies = [
  "num-traits",
+ "serde",
 ]
 
 [[package]]
@@ -3305,7 +3401,6 @@ dependencies = [
  "num-bigint 0.3.3",
  "num-integer",
  "num-traits",
- "serde",
 ]
 
 [[package]]
@@ -3318,6 +3413,7 @@ dependencies = [
  "num-bigint 0.4.4",
  "num-integer",
  "num-traits",
+ "serde",
 ]
 
 [[package]]
@@ -3355,7 +3451,7 @@ version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6"
 dependencies = [
- "proc-macro-crate",
+ "proc-macro-crate 1.3.1",
  "proc-macro2 1.0.69",
  "quote 1.0.33",
  "syn 2.0.39",
@@ -3563,7 +3659,7 @@ version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27"
 dependencies = [
- "proc-macro-crate",
+ "proc-macro-crate 1.3.1",
  "proc-macro2 1.0.69",
  "quote 1.0.33",
  "syn 1.0.109",
@@ -3575,23 +3671,12 @@ version = "3.6.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260"
 dependencies = [
- "proc-macro-crate",
+ "proc-macro-crate 1.3.1",
  "proc-macro2 1.0.69",
  "quote 1.0.33",
  "syn 1.0.109",
 ]
 
-[[package]]
-name = "parking_lot"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
-dependencies = [
- "instant",
- "lock_api",
- "parking_lot_core 0.8.6",
-]
-
 [[package]]
 name = "parking_lot"
 version = "0.12.1"
@@ -3599,21 +3684,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
 dependencies = [
  "lock_api",
- "parking_lot_core 0.9.9",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
-dependencies = [
- "cfg-if 1.0.0",
- "instant",
- "libc",
- "redox_syscall 0.2.16",
- "smallvec",
- "winapi",
+ "parking_lot_core",
 ]
 
 [[package]]
@@ -3624,7 +3695,7 @@ checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"
 dependencies = [
  "cfg-if 1.0.0",
  "libc",
- "redox_syscall 0.4.1",
+ "redox_syscall",
  "smallvec",
  "windows-targets",
 ]
@@ -3784,6 +3855,17 @@ version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
 
+[[package]]
+name = "pkcs1"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
+dependencies = [
+ "der 0.7.8",
+ "pkcs8 0.10.2",
+ "spki 0.7.2",
+]
+
 [[package]]
 name = "pkcs8"
 version = "0.9.0"
@@ -3908,6 +3990,16 @@ dependencies = [
  "toml_edit 0.19.15",
 ]
 
+[[package]]
+name = "proc-macro-crate"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97dc5fea232fc28d2f597b37c4876b348a40e33f3b02cc975c8d006d78d94b1a"
+dependencies = [
+ "toml_datetime",
+ "toml_edit 0.20.2",
+]
+
 [[package]]
 name = "proc-macro-error"
 version = "1.0.4"
@@ -3964,7 +4056,7 @@ checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2"
 dependencies = [
  "dtoa",
  "itoa",
- "parking_lot 0.12.1",
+ "parking_lot",
  "prometheus-client-derive-encode",
 ]
 
@@ -4108,6 +4200,26 @@ dependencies = [
  "thiserror",
 ]
 
+[[package]]
+name = "ptr_meta"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
+dependencies = [
+ "ptr_meta_derive",
+]
+
+[[package]]
+name = "ptr_meta_derive"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
+dependencies = [
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
 [[package]]
 name = "pulldown-cmark"
 version = "0.9.3"
@@ -4421,15 +4533,6 @@ dependencies = [
  "rand_core 0.3.1",
 ]
 
-[[package]]
-name = "redox_syscall"
-version = "0.2.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
-dependencies = [
- "bitflags 1.3.2",
-]
-
 [[package]]
 name = "redox_syscall"
 version = "0.4.1"
@@ -4439,17 +4542,6 @@ dependencies = [
  "bitflags 1.3.2",
 ]
 
-[[package]]
-name = "redox_users"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4"
-dependencies = [
- "getrandom 0.2.11",
- "libredox",
- "thiserror",
-]
-
 [[package]]
 name = "regex"
 version = "1.10.2"
@@ -4494,6 +4586,15 @@ version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
 
+[[package]]
+name = "rend"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd"
+dependencies = [
+ "bytecheck",
+]
+
 [[package]]
 name = "reqwest"
 version = "0.11.22"
@@ -4633,6 +4734,35 @@ dependencies = [
  "opaque-debug",
 ]
 
+[[package]]
+name = "rkyv"
+version = "0.7.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5"
+dependencies = [
+ "bitvec 1.0.1",
+ "bytecheck",
+ "bytes",
+ "hashbrown 0.12.3",
+ "ptr_meta",
+ "rend",
+ "rkyv_derive",
+ "seahash",
+ "tinyvec",
+ "uuid",
+]
+
+[[package]]
+name = "rkyv_derive"
+version = "0.7.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033"
+dependencies = [
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 1.0.109",
+]
+
 [[package]]
 name = "rlp"
 version = "0.5.2"
@@ -4653,6 +4783,42 @@ dependencies = [
  "librocksdb-sys",
 ]
 
+[[package]]
+name = "rsa"
+version = "0.9.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474"
+dependencies = [
+ "const-oid",
+ "digest 0.10.7",
+ "num-bigint-dig",
+ "num-integer",
+ "num-traits",
+ "pkcs1",
+ "pkcs8 0.10.2",
+ "rand_core 0.6.4",
+ "signature 2.2.0",
+ "spki 0.7.2",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "rust_decimal"
+version = "1.33.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4"
+dependencies = [
+ "arrayvec 0.7.4",
+ "borsh",
+ "bytes",
+ "num-traits",
+ "rand 0.8.5",
+ "rkyv",
+ "serde",
+ "serde_json",
+]
+
 [[package]]
 name = "rustc-demangle"
 version = "0.1.23"
@@ -4807,6 +4973,12 @@ dependencies = [
  "untrusted 0.9.0",
 ]
 
+[[package]]
+name = "seahash"
+version = "4.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
+
 [[package]]
 name = "sec1"
 version = "0.3.0"
@@ -5080,17 +5252,6 @@ dependencies = [
  "syn 1.0.109",
 ]
 
-[[package]]
-name = "sha-1"
-version = "0.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c"
-dependencies = [
- "cfg-if 1.0.0",
- "cpufeatures",
- "digest 0.10.7",
-]
-
 [[package]]
 name = "sha1"
 version = "0.10.6"
@@ -5226,9 +5387,16 @@ version = "2.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
 dependencies = [
+ "digest 0.10.7",
  "rand_core 0.6.4",
 ]
 
+[[package]]
+name = "simdutf8"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
+
 [[package]]
 name = "simple_asn1"
 version = "0.6.2"
@@ -5321,6 +5489,9 @@ name = "spin"
 version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+dependencies = [
+ "lock_api",
+]
 
 [[package]]
 name = "spki"
@@ -5350,85 +5521,94 @@ checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a"
 
 [[package]]
 name = "sqlformat"
-version = "0.1.8"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4b7922be017ee70900be125523f38bdd644f4f06a1b16e8fa5a8ee8c34bffd4"
+checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c"
 dependencies = [
- "itertools 0.10.5",
+ "itertools 0.12.0",
  "nom",
  "unicode_categories",
 ]
 
 [[package]]
 name = "sqlx"
-version = "0.5.13"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "551873805652ba0d912fec5bbb0f8b4cdd96baf8e2ebf5970e5671092966019b"
+checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf"
 dependencies = [
  "sqlx-core",
  "sqlx-macros",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
 ]
 
 [[package]]
 name = "sqlx-core"
-version = "0.5.13"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e48c61941ccf5ddcada342cd59e3e5173b007c509e1e8e990dafc830294d9dc5"
+checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd"
 dependencies = [
- "ahash 0.7.7",
+ "ahash 0.8.6",
  "atoi",
- "base64 0.13.1",
  "bigdecimal",
- "bitflags 1.3.2",
  "byteorder",
  "bytes",
  "chrono",
  "crc",
  "crossbeam-queue 0.3.8",
- "dirs",
+ "dotenvy",
  "either",
  "event-listener",
  "futures-channel",
  "futures-core",
  "futures-intrusive",
+ "futures-io",
  "futures-util",
  "hashlink",
  "hex",
- "hkdf",
- "hmac 0.12.1",
- "indexmap 1.9.3",
+ "indexmap 2.1.0",
  "ipnetwork",
- "itoa",
- "libc",
  "log",
- "md-5",
  "memchr",
- "num-bigint 0.3.3",
+ "native-tls",
  "once_cell",
  "paste",
  "percent-encoding",
- "rand 0.8.5",
+ "rust_decimal",
  "serde",
  "serde_json",
- "sha-1",
  "sha2 0.10.8",
  "smallvec",
  "sqlformat",
- "sqlx-rt",
- "stringprep",
  "thiserror",
+ "tokio",
  "tokio-stream",
+ "tracing",
  "url",
- "whoami",
 ]
 
 [[package]]
 name = "sqlx-macros"
-version = "0.5.13"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5"
+dependencies = [
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "sqlx-core",
+ "sqlx-macros-core",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "sqlx-macros-core"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc0fba2b0cae21fc00fe6046f8baa4c7fcb49e379f0f592b04696607f69ed2e1"
+checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841"
 dependencies = [
- "dotenv",
+ "atomic-write-file",
+ "dotenvy",
  "either",
  "heck 0.4.1",
  "hex",
@@ -5439,21 +5619,126 @@ dependencies = [
  "serde_json",
  "sha2 0.10.8",
  "sqlx-core",
- "sqlx-rt",
+ "sqlx-mysql",
+ "sqlx-postgres",
+ "sqlx-sqlite",
  "syn 1.0.109",
+ "tempfile",
+ "tokio",
  "url",
 ]
 
 [[package]]
-name = "sqlx-rt"
-version = "0.5.13"
+name = "sqlx-mysql"
+version = "0.7.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4db708cd3e459078f85f39f96a00960bd841f66ee2a669e90bf36907f5a79aae"
+checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4"
 dependencies = [
- "native-tls",
+ "atoi",
+ "base64 0.21.5",
+ "bigdecimal",
+ "bitflags 2.4.1",
+ "byteorder",
+ "bytes",
+ "chrono",
+ "crc",
+ "digest 0.10.7",
+ "dotenvy",
+ "either",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "generic-array",
+ "hex",
+ "hkdf",
+ "hmac 0.12.1",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
  "once_cell",
- "tokio",
- "tokio-native-tls",
+ "percent-encoding",
+ "rand 0.8.5",
+ "rsa",
+ "rust_decimal",
+ "serde",
+ "sha1",
+ "sha2 0.10.8",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-postgres"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24"
+dependencies = [
+ "atoi",
+ "base64 0.21.5",
+ "bigdecimal",
+ "bitflags 2.4.1",
+ "byteorder",
+ "chrono",
+ "crc",
+ "dotenvy",
+ "etcetera",
+ "futures-channel",
+ "futures-core",
+ "futures-io",
+ "futures-util",
+ "hex",
+ "hkdf",
+ "hmac 0.12.1",
+ "home",
+ "ipnetwork",
+ "itoa",
+ "log",
+ "md-5",
+ "memchr",
+ "num-bigint 0.4.4",
+ "once_cell",
+ "rand 0.8.5",
+ "rust_decimal",
+ "serde",
+ "serde_json",
+ "sha1",
+ "sha2 0.10.8",
+ "smallvec",
+ "sqlx-core",
+ "stringprep",
+ "thiserror",
+ "tracing",
+ "whoami",
+]
+
+[[package]]
+name = "sqlx-sqlite"
+version = "0.7.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490"
+dependencies = [
+ "atoi",
+ "chrono",
+ "flume",
+ "futures-channel",
+ "futures-core",
+ "futures-executor",
+ "futures-intrusive",
+ "futures-util",
+ "libsqlite3-sys",
+ "log",
+ "percent-encoding",
+ "serde",
+ "sqlx-core",
+ "tracing",
+ "url",
+ "urlencoding",
 ]
 
 [[package]]
@@ -5576,6 +5861,18 @@ dependencies = [
  "unicode-ident",
 ]
 
+[[package]]
+name = "syn_derive"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b"
+dependencies = [
+ "proc-macro-error",
+ "proc-macro2 1.0.69",
+ "quote 1.0.33",
+ "syn 2.0.39",
+]
+
 [[package]]
 name = "sync_vm"
 version = "1.3.3"
@@ -5640,7 +5937,7 @@ checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
 dependencies = [
  "cfg-if 1.0.0",
  "fastrand",
- "redox_syscall 0.4.1",
+ "redox_syscall",
  "rustix",
  "windows-sys",
 ]
@@ -5796,7 +6093,7 @@ dependencies = [
  "libc",
  "mio",
  "num_cpus",
- "parking_lot 0.12.1",
+ "parking_lot",
  "pin-project-lite",
  "signal-hook-registry",
  "socket2 0.5.5",
@@ -5862,9 +6159,9 @@ dependencies = [
 
 [[package]]
 name = "toml_datetime"
-version = "0.6.5"
+version = "0.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
+checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
 
 [[package]]
 name = "toml_edit"
@@ -5888,6 +6185,17 @@ dependencies = [
  "winnow",
 ]
 
+[[package]]
+name = "toml_edit"
+version = "0.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338"
+dependencies = [
+ "indexmap 2.1.0",
+ "toml_datetime",
+ "winnow",
+]
+
 [[package]]
 name = "tower-service"
 version = "0.3.2"
@@ -5900,6 +6208,7 @@ version = "0.1.40"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
 dependencies = [
+ "log",
  "pin-project-lite",
  "tracing-attributes",
  "tracing-core",
@@ -6387,7 +6696,7 @@ dependencies = [
  "jsonrpc-core",
  "log",
  "once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
  "pin-project",
  "reqwest",
  "rlp",
@@ -6421,10 +6730,6 @@ name = "whoami"
 version = "1.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50"
-dependencies = [
- "wasm-bindgen",
- "web-sys",
-]
 
 [[package]]
 name = "winapi"
@@ -6674,6 +6979,7 @@ version = "0.1.0"
 source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#32dd320953841aa78579d9da08abbc70bcaed175"
 dependencies = [
  "anyhow",
+ "num_enum",
  "serde",
  "static_assertions",
  "zkevm_opcode_defs 1.3.2",
@@ -6930,7 +7236,7 @@ dependencies = [
  "bincode",
  "hex",
  "itertools 0.10.5",
- "num 0.3.1",
+ "num 0.4.1",
  "once_cell",
  "prost",
  "rand 0.8.5",
@@ -7219,7 +7525,7 @@ dependencies = [
  "codegen 0.1.0",
  "ethereum-types 0.12.1",
  "hex",
- "num 0.3.1",
+ "num 0.4.1",
  "num_enum",
  "once_cell",
  "parity-crypto",
@@ -7253,7 +7559,7 @@ dependencies = [
  "hex",
  "itertools 0.10.5",
  "metrics",
- "num 0.3.1",
+ "num 0.4.1",
  "reqwest",
  "serde",
  "thiserror",