diff --git a/narwhal/primary/src/consensus/bullshark.rs b/narwhal/primary/src/consensus/bullshark.rs index a40ad1a4aae86..4812fc1b0c9b6 100644 --- a/narwhal/primary/src/consensus/bullshark.rs +++ b/narwhal/primary/src/consensus/bullshark.rs @@ -399,10 +399,7 @@ impl Bullshark { leader_round: Round, reputation_scores: &ReputationScores, ) -> bool { - // Do not perform any update if the feature is disabled - if self.protocol_config.narwhal_new_leader_election_schedule() - && reputation_scores.final_of_schedule - { + if reputation_scores.final_of_schedule { // create the new swap table and update the scheduler self.leader_schedule .update_leader_swap_table(LeaderSwapTable::new( diff --git a/narwhal/primary/src/consensus/tests/bullshark_tests.rs b/narwhal/primary/src/consensus/tests/bullshark_tests.rs index 123f487656979..dbf203f826d2f 100644 --- a/narwhal/primary/src/consensus/tests/bullshark_tests.rs +++ b/narwhal/primary/src/consensus/tests/bullshark_tests.rs @@ -88,23 +88,10 @@ async fn commit_one_with_leader_schedule_change() { } let test_cases: Vec = vec![ - TestCase { - description: "When no schedule change is enabled, then all leaders commit in round robin fashion".to_string(), - protocol_config: latest_protocol_version(), - rounds: 11, - expected_leaders: VecDeque::from(vec![ - AuthorityIdentifier(0), - AuthorityIdentifier(1), - AuthorityIdentifier(2), - AuthorityIdentifier(3), - AuthorityIdentifier(0), - ]), - }, TestCase { description: "When schedule change is enabled, then authority 0 is bad node and swapped with authority 3".to_string(), protocol_config: { let mut config: ProtocolConfig = latest_protocol_version(); - config.set_narwhal_new_leader_election_schedule(true); config.set_consensus_bad_nodes_stake_threshold(33); config }, @@ -250,7 +237,6 @@ async fn not_enough_support_with_leader_schedule_change() { certificates.extend(out); let mut config: ProtocolConfig = latest_protocol_version(); - config.set_narwhal_new_leader_election_schedule(true); config.set_consensus_bad_nodes_stake_threshold(33); let metrics = Arc::new(ConsensusMetrics::new(&Registry::new())); diff --git a/narwhal/primary/src/consensus/tests/consensus_tests.rs b/narwhal/primary/src/consensus/tests/consensus_tests.rs index 3cb3ceff48baa..ae5f3ee8a81ab 100644 --- a/narwhal/primary/src/consensus/tests/consensus_tests.rs +++ b/narwhal/primary/src/consensus/tests/consensus_tests.rs @@ -8,7 +8,7 @@ use prometheus::Registry; use storage::NodeStorage; use sui_protocol_config::ProtocolConfig; use telemetry_subscribers::TelemetryGuards; -use test_utils::{get_protocol_config, latest_protocol_version}; +use test_utils::latest_protocol_version; use test_utils::{temp_dir, CommitteeFixture}; use tokio::sync::watch; use types::{ @@ -32,21 +32,8 @@ use crate::NUM_SHUTDOWN_RECEIVERS; /// * no forks created #[tokio::test] async fn test_consensus_recovery_with_bullshark() { - let _guard = setup_tracing(); - - // TODO: remove once the new leader schedule has been enabled. - // Run with default config settings where the new leader schedule is disabled - let config: ProtocolConfig = get_protocol_config(19); - test_consensus_recovery_with_bullshark_with_config(config).await; - - // Run with the new leader election schedule enabled - let mut config: ProtocolConfig = latest_protocol_version(); - config.set_consensus_bad_nodes_stake_threshold(33); - test_consensus_recovery_with_bullshark_with_config(config).await; -} - -async fn test_consensus_recovery_with_bullshark_with_config(config: ProtocolConfig) { // GIVEN + let _guard = setup_tracing(); let num_sub_dags_per_schedule = 3; let storage = NodeStorage::reopen(temp_dir(), None); @@ -57,6 +44,9 @@ async fn test_consensus_recovery_with_bullshark_with_config(config: ProtocolConf let fixture = CommitteeFixture::builder().build(); let committee = fixture.committee(); + let mut config: ProtocolConfig = latest_protocol_version(); + config.set_consensus_bad_nodes_stake_threshold(33); + // AND make certificates for rounds 1 to 7 (inclusive) let ids: Vec<_> = fixture.authorities().map(|a| a.id()).collect(); let genesis = Certificate::genesis(&latest_protocol_version(), &committee) diff --git a/narwhal/primary/src/consensus/tests/leader_schedule_tests.rs b/narwhal/primary/src/consensus/tests/leader_schedule_tests.rs index 5e445f591b783..857e77c2f1229 100644 --- a/narwhal/primary/src/consensus/tests/leader_schedule_tests.rs +++ b/narwhal/primary/src/consensus/tests/leader_schedule_tests.rs @@ -203,17 +203,8 @@ async fn test_leader_schedule_from_store() { .write_consensus_state(&HashMap::new(), &sub_dag) .unwrap(); - // WHEN flag is disabled for the new schedule algorithm - let protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); - let schedule = LeaderSchedule::from_store(committee.clone(), store.clone(), protocol_config); - - // THEN the default should be returned. In this case we detect since good/bad nodes will be empty - assert!(schedule.leader_swap_table.read().good_nodes.is_empty()); - assert!(schedule.leader_swap_table.read().bad_nodes.is_empty()); - - // WHEN flag is enabled for the new schedule algorithm + // WHEN let mut protocol_config = ProtocolConfig::get_for_max_version_UNSAFE(); - protocol_config.set_narwhal_new_leader_election_schedule(true); protocol_config.set_consensus_bad_nodes_stake_threshold(33); let schedule = LeaderSchedule::from_store(committee, store, protocol_config); diff --git a/narwhal/primary/src/consensus/tests/randomized_tests.rs b/narwhal/primary/src/consensus/tests/randomized_tests.rs index 3890d195c18da..0de4010f92887 100644 --- a/narwhal/primary/src/consensus/tests/randomized_tests.rs +++ b/narwhal/primary/src/consensus/tests/randomized_tests.rs @@ -23,9 +23,9 @@ use std::ops::RangeInclusive; use std::sync::Arc; use storage::ConsensusStore; use sui_protocol_config::ProtocolConfig; +use test_utils::latest_protocol_version; use test_utils::mock_certificate_with_rand; use test_utils::CommitteeFixture; -use test_utils::{get_protocol_config, latest_protocol_version}; #[allow(unused_imports)] use tokio::sync::mpsc::channel; use types::CertificateAPI; @@ -76,18 +76,6 @@ impl ExecutionPlan { #[ignore] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn bullshark_randomised_tests() { - // Run the consensus tests without the new consensus schedule changes - let protocol_config = get_protocol_config(19); - bullshark_randomised_tests_with_config(protocol_config).await; - - // TODO: remove once the new leader election schedule feature is enabled on a protocol version - // Run the consensus tests with the new consensus schedule changes enabled - let mut config: ProtocolConfig = latest_protocol_version(); - config.set_consensus_bad_nodes_stake_threshold(33); - bullshark_randomised_tests_with_config(config).await; -} - -async fn bullshark_randomised_tests_with_config(protocol_config: ProtocolConfig) { // Configuration regarding the randomized tests. The tests will run for different values // on the below parameters to increase the different cases we can generate. @@ -122,6 +110,9 @@ async fn bullshark_randomised_tests_with_config(protocol_config: ProtocolConfig) }, ]; + let mut config: ProtocolConfig = latest_protocol_version(); + config.set_consensus_bad_nodes_stake_threshold(33); + let mut test_execution_list = FuturesUnordered::new(); let (tx, mut rx) = channel(1000); @@ -186,7 +177,7 @@ async fn bullshark_randomised_tests_with_config(protocol_config: ProtocolConfig) let consensus_store = store.clone(); - let config = protocol_config.clone(); + let config = config.clone(); let handle = tokio::spawn(async move { // Create a randomized DAG