diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000..f9478d13691 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +beacon_node/network/ @jxs +beacon_node/lighthouse_network/ @jxs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 65663e0cf49..45f3b757e74 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -350,7 +350,7 @@ jobs: - name: Check formatting with cargo fmt run: make cargo-fmt - name: Lint code for quality and style with Clippy - run: make lint + run: make lint-full - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock - name: Typecheck benchmark code without running it diff --git a/Makefile b/Makefile index 958abf87058..8faf8a2e54b 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ @@ -220,6 +220,10 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint +# Also run the lints on the optimized-only tests +lint-full: + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 1680c0298d1..bd47e82215e 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ - Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, + Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -243,6 +244,83 @@ fn build_data_column_sidecars( Ok(sidecars) } +/// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). +/// +/// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated +/// with the block. +pub fn reconstruct_blobs( + kzg: &Kzg, + data_columns: &[Arc>], + blob_indices_opt: Option>, + signed_block: &SignedBlindedBeaconBlock, +) -> Result, String> { + // The data columns are from the database, so we assume their correctness. + let first_data_column = data_columns + .first() + .ok_or("data_columns should have at least one element".to_string())?; + + let blob_indices: Vec = match blob_indices_opt { + Some(indices) => indices.into_iter().map(|i| i as usize).collect(), + None => { + let num_of_blobs = first_data_column.kzg_commitments.len(); + (0..num_of_blobs).collect() + } + }; + + let blob_sidecars = blob_indices + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column + .column + .get(row_index) + .ok_or(format!("Missing data column at row index {row_index}")) + .and_then(|cell| { + ssz_cell_to_crypto_cell::(cell).map_err(|e| format!("{e:?}")) + })?; + + cells.push(cell); + cell_ids.push(data_column.index); + } + + let (cells, _kzg_proofs) = kzg + .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + .map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?; + + let num_cells_original_blob = cells.len() / 2; + let blob_bytes = cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| cell.into_iter()) + .collect(); + + let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; + let kzg_commitment = first_data_column + .kzg_commitments + .get(row_index) + .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; + let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) + .map_err(|e| format!("{e:?}"))?; + + BlobSidecar::::new_with_existing_proof( + row_index, + blob, + signed_block, + first_data_column.signed_block_header.clone(), + &first_data_column.kzg_commitments_inclusion_proof, + kzg_proof, + ) + .map(Arc::new) + .map_err(|e| format!("{e:?}")) + }) + .collect::, _>>()? + .into(); + + Ok(blob_sidecars) +} + /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). pub fn reconstruct_data_columns( kzg: &Kzg, @@ -265,7 +343,7 @@ pub fn reconstruct_data_columns( for data_column in data_columns { let cell = data_column.column.get(row_index).ok_or( KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" + "Missing data column at row index {row_index}" )), )?; @@ -289,12 +367,16 @@ pub fn reconstruct_data_columns( #[cfg(test)] mod test { - use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use crate::kzg_utils::{ + blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + }; use bls::Signature; + use eth2::types::BlobsBundle; + use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, - ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, + EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -308,6 +390,7 @@ mod test { test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns(&kzg, &spec); } #[track_caller] @@ -379,6 +462,36 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let signed_blinded_block = signed_block.into(); + let blob_indices = vec![3, 4, 5]; + let reconstructed_blobs = reconstruct_blobs( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + Some(blob_indices.clone()), + &signed_blinded_block, + ) + .unwrap(); + + for i in blob_indices { + let reconstructed_blob = &reconstructed_blobs + .iter() + .find(|sidecar| sidecar.index == i) + .map(|sidecar| sidecar.blob.clone()) + .expect("reconstructed blob should exist"); + let original_blob = blobs.get(i as usize).unwrap(); + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) @@ -397,12 +510,20 @@ mod test { KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) .unwrap(); - let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs: _, + } = blobs_bundle; - let blobs = (0..num_of_blobs) - .map(|_| Blob::::default()) - .collect::>() - .into(); + *signed_block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .unwrap() = commitments; (signed_block, blobs) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c6aa9fbcac7..ae3add7f032 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1656,7 +1656,7 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> }); pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { try_create_histogram_vec_with_buckets( - "data_column_sidecar_computation_seconds", + "beacon_data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], @@ -1665,7 +1665,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_column_sidecar_inclusion_proof_verification_seconds", + "beacon_data_column_sidecar_inclusion_proof_verification_seconds", "Time taken to verify data_column sidecar inclusion proof", ) }); @@ -1693,7 +1693,7 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_column_sidecar_processing_successes_total", + "beacon_data_column_sidecar_processing_successes_total", "Number of data column sidecars verified for gossip", ) }); @@ -1847,7 +1847,7 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_single_seconds", + "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification", Ok(vec![ 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, @@ -1857,7 +1857,7 @@ pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_batch_seconds", + "beacon_kzg_verification_data_column_batch_seconds", "Runtime of batched data column kzg verification", Ok(vec![ 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, @@ -1910,14 +1910,14 @@ pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_availability_reconstruction_time_seconds", + "beacon_data_availability_reconstruction_time_seconds", "Time taken to reconstruct columns", ) }); pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "data_availability_reconstructed_columns_total", + "beacon_data_availability_reconstructed_columns_total", "Total count of reconstructed columns", ) }); @@ -1925,7 +1925,7 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_attempts", + "beacon_kzg_data_column_reconstruction_attempts", "Count of times data column reconstruction has been attempted", ) }); @@ -1933,7 +1933,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_failures", + "beacon_kzg_data_column_reconstruction_failures", "Count of times data column reconstruction has failed", ) }); @@ -1941,7 +1941,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( - "kzg_data_column_reconstruction_incomplete_total", + "beacon_kzg_data_column_reconstruction_incomplete_total", "Count of times data column reconstruction attempts did not result in an import", &["reason"], ) diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index dba8eb1ef32..b9e48833184 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,4 +1,5 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; @@ -9,6 +10,7 @@ use types::{ BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; +use warp::Rejection; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -261,7 +263,7 @@ impl BlockId { #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, - indices: BlobIndicesQuery, + query: BlobIndicesQuery, chain: &BeaconChain, ) -> Result< ( @@ -286,20 +288,32 @@ impl BlockId { // Return the `BlobSidecarList` identified by `self`. let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { - chain - .store - .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "no blobs stored for block {root}" - )) - })? + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? + } else { + Self::get_blobs(chain, root, query.indices)? + } } else { BlobSidecarList::default() }; - let blob_sidecar_list_filtered = match indices.indices { + Ok((block, blob_sidecar_list, execution_optimistic, finalized)) + } + + fn get_blobs( + chain: &BeaconChain, + root: Hash256, + indices: Option>, + ) -> Result, Rejection> { + let blob_sidecar_list = chain + .store + .get_blobs(&root) + .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) + })?; + + let blob_sidecar_list_filtered = match indices { Some(vec) => { let list = blob_sidecar_list .into_iter() @@ -310,12 +324,48 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(( - block, - blob_sidecar_list_filtered, - execution_optimistic, - finalized, - )) + + Ok(blob_sidecar_list_filtered) + } + + fn get_blobs_from_data_columns( + chain: &BeaconChain, + root: Hash256, + blob_indices: Option>, + block: &SignedBlindedBeaconBlock<::EthSpec>, + ) -> Result, Rejection> { + let column_indices = chain.store.get_data_column_keys(root).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error fetching data columns keys: {e:?}" + )) + })?; + + let num_found_column_keys = column_indices.len(); + let num_required_columns = chain.spec.number_of_columns / 2; + let is_blob_available = num_found_column_keys >= num_required_columns; + + if is_blob_available { + let data_columns = column_indices + .into_iter() + .filter_map( + |column_index| match chain.get_data_column(&root, &column_index) { + Ok(Some(data_column)) => Some(Ok(data_column)), + Ok(None) => None, + Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + }, + ) + .collect::, _>>()?; + + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }) + } else { + Err(warp_utils::reject::custom_server_error( + format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") + )) + } } } diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 5a330388cce..302aa2a4c18 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,9 @@ use crate::test_utils::TestRandom; -use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; +use crate::{AbstractExecPayload, ForkName}; use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; @@ -150,10 +150,10 @@ impl BlobSidecar { }) } - pub fn new_with_existing_proof( + pub fn new_with_existing_proof>( index: usize, blob: Blob, - signed_block: &SignedBeaconBlock, + signed_block: &SignedBeaconBlock, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &[Hash256], kzg_proof: KzgProof, diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index c5b303e4d18..1945399c86d 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -345,7 +345,7 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { } #[test] -fn http_token_path_flag() { +fn http_token_path_flag_present() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() .flag("http", None) @@ -359,6 +359,19 @@ fn http_token_path_flag() { }); } +#[test] +fn http_token_path_default() { + CommandLineTest::new() + .flag("http", None) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + config.validator_dir.join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index f3dab3780c0..73ebe717af3 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -106,6 +106,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { + // This value is always overridden when building config from CLI. let http_token_path = dirs::home_dir() .unwrap_or_else(|| PathBuf::from(".")) .join(DEFAULT_ROOT_DIR) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 0fecb5202d1..bb72ef81c80 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -314,10 +314,11 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } - if cli_args.get_one::("http-token-path").is_some() { - config.http_api.http_token_path = parse_required(cli_args, "http-token-path") - // For backward compatibility, default to the path under the validator dir if not provided. - .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + config.http_api.http_token_path = PathBuf::from(http_token_path); + } else { + // For backward compatibility, default to the path under the validator dir if not provided. + config.http_api.http_token_path = config.validator_dir.join(PK_FILENAME); } /*