Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Merged by Bors] - Fix broken Nethermind integration tests #4836

Closed
wants to merge 14 commits into from
6 changes: 5 additions & 1 deletion .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -317,17 +317,21 @@ jobs:
./doppelganger_protection.sh success genesis.json
execution-engine-integration-ubuntu:
name: execution-engine-integration-ubuntu
runs-on: ubuntu-latest
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've just realized that we're one runner short of running the entire workflow in parallel after adding this one, it may be worth considering adding one more runner instance.

steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
with:
channel: stable
cache-target: release
cache: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Add go compiler to $PATH
if: env.SELF_HOSTED_RUNNERS == 'true'
run: echo "/usr/local/go/bin" >> $GITHUB_PATH
- name: Run exec engine integration tests in release
run: make test-exec-engine
check-code:
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/tests/beacon_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -835,7 +835,7 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl
let id = "bn-1";
let version = "Lighthouse-v2.1.3";
CommandLineTest::new()
.flag("execution-endpoint", Some(execution_endpoint.clone()))
.flag("execution-endpoint", Some(execution_endpoint))
.flag(jwt_flag, dir.path().join(jwt_file).as_os_str().to_str())
.flag(jwt_id_flag, Some(id))
.flag(jwt_version_flag, Some(version))
Expand Down
1 change: 1 addition & 0 deletions testing/execution_engine_integration/src/build_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ pub fn get_latest_release(repo_dir: &Path, branch_name: &str) -> Result<String,
Command::new("git")
.arg("fetch")
.arg("--tags")
.arg("--force")
.current_dir(repo_dir)
.output()
.map_err(|e| format!("Failed to fetch tags for {repo_dir:?}: Err: {e}"))?,
Expand Down
33 changes: 21 additions & 12 deletions testing/execution_engine_integration/src/genesis_json.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@ pub fn geth_genesis_json() -> Value {
"muirGlacierBlock":0,
"berlinBlock":0,
"londonBlock":0,
"clique": {
"period": 5,
"epoch": 30000
},
"terminalTotalDifficulty":0
"mergeNetsplitBlock": 0,
"shanghaiTime": 0,
"terminalTotalDifficulty": 0,
"terminalTotalDifficultyPassed": true
},
"nonce":"0x42",
"timestamp":"0x0",
Expand Down Expand Up @@ -72,8 +71,10 @@ pub fn nethermind_genesis_json() -> Value {
"accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID": "0x1469ca",
"MergeForkIdTransition": "0x3e8",
"networkID": "0x00146A2E",
"MergeForkIdTransition": "0x0",
"maxCodeSize": "0x6000",
"maxCodeSizeTransition": "0x0",
"eip150Transition": "0x0",
"eip158Transition": "0x0",
"eip160Transition": "0x0",
Expand Down Expand Up @@ -101,7 +102,15 @@ pub fn nethermind_genesis_json() -> Value {
"eip1559Transition": "0x0",
"eip3198Transition": "0x0",
"eip3529Transition": "0x0",
"eip3541Transition": "0x0"
"eip3541Transition": "0x0",
"eip3540TransitionTimestamp": "0x0",
"eip3651TransitionTimestamp": "0x0",
"eip3670TransitionTimestamp": "0x0",
"eip3675TransitionTimestamp": "0x0",
"eip3855TransitionTimestamp": "0x0",
"eip3860TransitionTimestamp": "0x0",
"eip4895TransitionTimestamp": "0x0",
"terminalTotalDifficulty": "0x0"
},
"genesis": {
"seal": {
Expand All @@ -112,20 +121,20 @@ pub fn nethermind_genesis_json() -> Value {
},
"difficulty": "0x01",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x0",
"timestamp": "0x63585F88",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "",
"gasLimit": "0x1C9C380"
"gasLimit": "0x400000"
},
"accounts": {
"0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": {
"balance": "10000000000000000000000000"
},
"0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": {
"balance": "10000000000000000000000000"
},
}
},
"nodes": []
}
}
)
}
11 changes: 9 additions & 2 deletions testing/execution_engine_integration/src/geth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::execution_engine::GenericExecutionEngine;
use crate::genesis_json::geth_genesis_json;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Output};
use std::{env, fs::File};
use std::{env, fs};
use tempfile::TempDir;
use unused_port::unused_tcp4_port;

Expand Down Expand Up @@ -36,6 +36,13 @@ pub fn build(execution_clients_dir: &Path) {
});
}

pub fn clean(execution_clients_dir: &Path) {
let repo_dir = execution_clients_dir.join("go-ethereum");
if let Err(e) = fs::remove_dir_all(repo_dir) {
eprintln!("Error while deleting folder: {}", e);
}
}

/*
* Geth-specific Implementation for GenericExecutionEngine
*/
Expand All @@ -60,7 +67,7 @@ impl GenericExecutionEngine for GethEngine {
let datadir = TempDir::new().unwrap();

let genesis_json_path = datadir.path().join("genesis.json");
let mut file = File::create(&genesis_json_path).unwrap();
let mut file = fs::File::create(&genesis_json_path).unwrap();
let json = geth_genesis_json();
serde_json::to_writer(&mut file, &json).unwrap();

Expand Down
3 changes: 3 additions & 0 deletions testing/execution_engine_integration/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#![recursion_limit = "256"] // for inline json

/// This binary runs integration tests between Lighthouse and execution engines.
///
/// It will first attempt to build any supported integration clients, then it will run tests.
Expand Down Expand Up @@ -31,6 +33,7 @@ fn test_geth() {
let test_dir = build_utils::prepare_dir();
geth::build(&test_dir);
TestRig::new(GethEngine).perform_tests_blocking();
geth::clean(&test_dir);
}

fn test_nethermind() {
Expand Down
15 changes: 11 additions & 4 deletions testing/execution_engine_integration/src/nethermind.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::build_utils;
use crate::execution_engine::GenericExecutionEngine;
use crate::genesis_json::nethermind_genesis_json;
use std::env;
use std::fs::File;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Output};
use tempfile::TempDir;
Expand All @@ -11,7 +11,7 @@ use unused_port::unused_tcp4_port;
/// We've pinned the Nethermind version since our method of using the `master` branch to
/// find the latest tag isn't working. It appears Nethermind don't always tag on `master`.
/// We should fix this so we always pull the latest version of Nethermind.
const NETHERMIND_BRANCH: &str = "release/1.18.2";
const NETHERMIND_BRANCH: &str = "release/1.21.0";
const NETHERMIND_REPO_URL: &str = "https://github.com/NethermindEth/nethermind";

fn build_result(repo_dir: &Path) -> Output {
Expand Down Expand Up @@ -47,6 +47,12 @@ pub fn build(execution_clients_dir: &Path) {
build_utils::check_command_output(build_result(&repo_dir), || {
format!("nethermind build failed using release {last_release}")
});

// Cleanup some disk space by removing nethermind's tests
let tests_dir = execution_clients_dir.join("nethermind/src/tests");
if let Err(e) = fs::remove_dir_all(tests_dir) {
eprintln!("Error while deleting folder: {}", e);
}
Copy link
Member

@jimmygchen jimmygchen Oct 18, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wow just realized these are 2.44 GB on my machine, perhaps we don't even need to clone them in the first place? I think we can optionally exclude submodule during checkout:

output_to_result(
Command::new("git")
.arg("submodule")
.arg("update")
.arg("--init")
.arg("--recursive")

}

/*
Expand All @@ -68,15 +74,16 @@ impl NethermindEngine {
.join("bin")
.join("Release")
.join("net7.0")
.join("Nethermind.Runner")
.join("linux-x64")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this part of the path didn't exist on my machine, lets see if it exists on CI 🤞

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It no longer runs on macos though :(

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

path on my machine: testing/execution_engine_integration/execution_clients/nethermind/src/Nethermind/Nethermind.Runner/bin/Release/net7.0/nethermind

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm deleting it. I think maybe this was why the job was failing

.join("nethermind")
}
}

impl GenericExecutionEngine for NethermindEngine {
fn init_datadir() -> TempDir {
let datadir = TempDir::new().unwrap();
let genesis_json_path = datadir.path().join("genesis.json");
let mut file = File::create(genesis_json_path).unwrap();
let mut file = fs::File::create(genesis_json_path).unwrap();
let json = nethermind_genesis_json();
serde_json::to_writer(&mut file, &json).unwrap();
datadir
Expand Down
51 changes: 31 additions & 20 deletions testing/execution_engine_integration/src/test_rig.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ use types::{
Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader,
ForkName, FullPayload, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256,
};
const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(30);
const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60);

const TEST_FORK: ForkName = ForkName::Capella;

struct ExecutionPair<E, T: EthSpec> {
/// The Lighthouse `ExecutionLayer` struct, connected to the `execution_engine` via HTTP.
Expand Down Expand Up @@ -110,7 +112,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
let (runtime_shutdown, exit) = exit_future::signal();
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let executor = TaskExecutor::new(Arc::downgrade(&runtime), exit, log.clone(), shutdown_tx);
let mut spec = MainnetEthSpec::default_spec();
let mut spec = TEST_FORK.make_genesis_spec(MainnetEthSpec::default_spec());
spec.terminal_total_difficulty = Uint256::zero();

let fee_recipient = None;
Expand Down Expand Up @@ -269,12 +271,11 @@ impl<E: GenericExecutionEngine> TestRig<E> {
Slot::new(1), // Insert proposer for the next slot
head_root,
proposer_index,
// TODO: think about how to test different forks
PayloadAttributes::new(
timestamp,
prev_randao,
Address::repeat_byte(42),
None,
Some(vec![]),
None,
),
)
Expand Down Expand Up @@ -314,8 +315,13 @@ impl<E: GenericExecutionEngine> TestRig<E> {
.execution_layer
.get_suggested_fee_recipient(proposer_index)
.await;
let payload_attributes =
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None);
let payload_attributes = PayloadAttributes::new(
timestamp,
prev_randao,
suggested_fee_recipient,
Some(vec![]),
None,
);
let valid_payload = self
.ee_a
.execution_layer
Expand All @@ -324,8 +330,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
&payload_attributes,
forkchoice_update_params,
builder_params,
// FIXME: think about how to test other forks
ForkName::Merge,
TEST_FORK,
&self.spec,
)
.await
Expand Down Expand Up @@ -456,8 +461,13 @@ impl<E: GenericExecutionEngine> TestRig<E> {
.execution_layer
.get_suggested_fee_recipient(proposer_index)
.await;
let payload_attributes =
PayloadAttributes::new(timestamp, prev_randao, suggested_fee_recipient, None, None);
let payload_attributes = PayloadAttributes::new(
timestamp,
prev_randao,
suggested_fee_recipient,
Some(vec![]),
None,
);
let second_payload = self
.ee_a
.execution_layer
Expand All @@ -466,8 +476,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
&payload_attributes,
forkchoice_update_params,
builder_params,
// FIXME: think about how to test other forks
ForkName::Merge,
TEST_FORK,
&self.spec,
)
.await
Expand Down Expand Up @@ -498,11 +507,15 @@ impl<E: GenericExecutionEngine> TestRig<E> {
*/
let head_block_hash = valid_payload.block_hash();
let finalized_block_hash = ExecutionBlockHash::zero();
// TODO: think about how to handle different forks
// To save sending proposer preparation data, just set the fee recipient
// to the fee recipient configured for EE A.
let payload_attributes =
PayloadAttributes::new(timestamp, prev_randao, Address::repeat_byte(42), None, None);
let payload_attributes = PayloadAttributes::new(
timestamp,
prev_randao,
Address::repeat_byte(42),
Some(vec![]),
None,
);
let slot = Slot::new(42);
let head_block_root = Hash256::repeat_byte(100);
let validator_index = 0;
Expand Down Expand Up @@ -536,11 +549,7 @@ impl<E: GenericExecutionEngine> TestRig<E> {
.notify_new_payload(second_payload.clone().try_into().unwrap())
.await
.unwrap();
// TODO: we should remove the `Accepted` status here once Geth fixes it
assert!(matches!(
status,
PayloadStatus::Syncing | PayloadStatus::Accepted
));
assert!(matches!(status, PayloadStatus::Syncing));

/*
* Execution Engine B:
Expand Down Expand Up @@ -641,11 +650,13 @@ async fn check_payload_reconstruction<E: GenericExecutionEngine>(
.get_engine_capabilities(None)
.await
.unwrap();

assert!(
// if the engine doesn't have these capabilities, we need to update the client in our tests
capabilities.get_payload_bodies_by_hash_v1 && capabilities.get_payload_bodies_by_range_v1,
"Testing engine does not support payload bodies methods"
);

let mut bodies = ee
.execution_layer
.get_payload_bodies_by_hash(vec![payload.block_hash()])
Expand Down