diff --git a/RELEASE_CHECKLIST.md b/RELEASE_CHECKLIST.md index 33c1613b4..dc4a9792a 100644 --- a/RELEASE_CHECKLIST.md +++ b/RELEASE_CHECKLIST.md @@ -5,6 +5,9 @@ tagged as the final release. ## Pre-Prep - [ ] Inform relevant parties that you're preparing a release (e.g, by posting on Discord) +- [ ] Manually run e2e reshare test +(first compile node with features=reshare-test and then run +`cargo test --release --features=reshare-test -- --test test_reshare_e2e`) - [ ] Create a release branch, e.g., for release candidate `1`: `release/vX.Y.Z-rc.1`. ## Prep the Runtime and Node diff --git a/crates/threshold-signature-server/Cargo.toml b/crates/threshold-signature-server/Cargo.toml index f0471d3fd..8b20111d1 100644 --- a/crates/threshold-signature-server/Cargo.toml +++ b/crates/threshold-signature-server/Cargo.toml @@ -114,3 +114,5 @@ bob =[] # Enable this feature to run the integration tests for the wasm API of entropy-protocol # This requires the entropy-protocol node-js module to be built and so is not run by default wasm_test=[] +# Sets a shorter session duration for the entropy-tss test_reshare_e2e +reshare-test=[] diff --git a/crates/threshold-signature-server/src/validator/tests.rs b/crates/threshold-signature-server/src/validator/tests.rs index f3830c72f..870fe9716 100644 --- a/crates/threshold-signature-server/src/validator/tests.rs +++ b/crates/threshold-signature-server/src/validator/tests.rs @@ -239,6 +239,82 @@ async fn test_reshare_basic() { clean_tests(); } +#[cfg(feature = "reshare-test")] +#[tokio::test] +#[serial] +async fn test_reshare_e2e() { + initialize_test_logger().await; + clean_tests(); + + let (_validator_ips, _validator_ids) = + spawn_testing_validators(crate::helpers::tests::ChainSpecType::IntegrationJumpStarted) + .await; + + let force_authoring = true; + let context = + test_node_process_testing_state(ChainSpecType::IntegrationJumpStarted, force_authoring) + .await; + let api = get_api(&context[0].ws_url).await.unwrap(); + let rpc = get_rpc(&context[0].ws_url).await.unwrap(); + let client = reqwest::Client::new(); + + // Get current signers + let signer_query = entropy::storage().staking_extension().signers(); + let signer_stash_accounts = query_chain(&api, &rpc, signer_query, None).await.unwrap().unwrap(); + let old_signer_ids: HashSet<[u8; 32]> = + HashSet::from_iter(signer_stash_accounts.clone().into_iter().map(|id| id.0)); + let key_share_before = unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), 3002).await; + + let mut i = 0; + // Wait up to 2min for reshare to complete: check once every second if we have a new set of signers. + let old_signer_ids_2 = loop { + let new_signer_ids: HashSet<[u8; 32]> = { + let signer_query = entropy::storage().staking_extension().signers(); + let signer_ids = query_chain(&api, &rpc, signer_query, None).await.unwrap().unwrap(); + HashSet::from_iter(signer_ids.into_iter().map(|id| id.0)) + }; + if new_signer_ids != old_signer_ids { + break Ok(new_signer_ids); + } + if i > 240 { + break Err("Timed out waiting for reshare"); + } + i += 1; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + .unwrap(); + // wait for rotate keyshare + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let key_share_after = unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), 3002).await; + assert_ne!(key_share_before, key_share_after); + + let key_share_before_2 = unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), 3003).await; + + let _ = loop { + let new_signer_ids: HashSet<[u8; 32]> = { + let signer_query = entropy::storage().staking_extension().signers(); + let signer_ids = query_chain(&api, &rpc, signer_query, None).await.unwrap().unwrap(); + HashSet::from_iter(signer_ids.into_iter().map(|id| id.0)) + }; + if new_signer_ids != old_signer_ids_2 { + break Ok(new_signer_ids); + } + if i > 240 { + break Err("Timed out waiting for second reshare"); + } + i += 1; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + .unwrap(); + + // wait for rotate keyshare 2 + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let key_share_after_2 = unsafe_get(&client, hex::encode(NETWORK_PARENT_KEY), 3003).await; + assert_ne!(key_share_before_2, key_share_after_2); +} + #[tokio::test] #[serial] async fn test_reshare_none_called() { diff --git a/node/cli/Cargo.toml b/node/cli/Cargo.toml index a6f89f065..3944f50f9 100644 --- a/node/cli/Cargo.toml +++ b/node/cli/Cargo.toml @@ -122,3 +122,5 @@ try-runtime=["entropy-runtime/try-runtime", "try-runtime-cli/try-runtime"] # Enables real PCK certificate chain verification - which means TSS nodes must be runnning on TDX # hardware production=["entropy-runtime/production"] +# Sets a shorter session duration for the entropy-tss test_reshare_e2e +reshare-test=["entropy-runtime/reshare-test"] diff --git a/node/cli/src/chain_spec/integration_tests.rs b/node/cli/src/chain_spec/integration_tests.rs index 192f7f646..e3aeaab69 100644 --- a/node/cli/src/chain_spec/integration_tests.rs +++ b/node/cli/src/chain_spec/integration_tests.rs @@ -151,7 +151,9 @@ pub fn integration_tests_genesis_config( "staking": StakingConfig { validator_count: initial_authorities.len() as u32, minimum_validator_count: 0, - invulnerables: vec![], + invulnerables: initial_authorities + .iter() + .map(|x| {x.0.clone()}).collect::>(), slash_reward_fraction: Perbill::from_percent(10), stakers, ..Default::default() diff --git a/pallets/staking/src/lib.rs b/pallets/staking/src/lib.rs index 516e6a3e6..367f53031 100644 --- a/pallets/staking/src/lib.rs +++ b/pallets/staking/src/lib.rs @@ -849,6 +849,7 @@ pub mod pallet { let mut new_signers: Vec> = vec![]; let mut count = 0u32; let mut remove_indicies_len = 0; + let mut removed_signers = vec![]; // removes first signer and pushes new signer to back if total signers not increased if current_signers_length >= signers_info.total_signers as usize { let mut remove_indicies = vec![]; @@ -859,6 +860,7 @@ pub mod pallet { } } if remove_indicies.is_empty() { + removed_signers.push(current_signers[0].clone()); current_signers.remove(0); } else { remove_indicies_len = remove_indicies.len(); @@ -876,6 +878,7 @@ pub mod pallet { }; for remove_index in truncated { + removed_signers.push(current_signers[*remove_index].clone()); current_signers.remove(*remove_index); } } @@ -887,7 +890,9 @@ pub mod pallet { let mut next_signer_up = ¤t_signers[0].clone(); let mut index; // loops to find signer in validator that is not already signer - while current_signers.contains(next_signer_up) { + while current_signers.contains(next_signer_up) + || removed_signers.contains(next_signer_up) + { index = randomness.next_u32() % validators.len() as u32; next_signer_up = &validators[index as usize]; count += 1; diff --git a/pallets/staking/src/tests.rs b/pallets/staking/src/tests.rs index 4d971598f..f37c88244 100644 --- a/pallets/staking/src/tests.rs +++ b/pallets/staking/src/tests.rs @@ -573,6 +573,7 @@ fn it_tests_new_session_handler_signer_size_changes() { new_test_ext().execute_with(|| { // Start with current validators as 5 and 6 based off the Mock `GenesisConfig`. Signers::::put(vec![5, 6]); + System::set_block_number(100); assert_ok!(Staking::new_session_handler(&[6, 5, 3, 4])); // Signer size increased is reflected as 5 is not removed from vec diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 56a257e8a..54376b238 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -276,3 +276,5 @@ try-runtime=[ # Enables real PCK certificate chain verification - which means TSS nodes must be runnning on TDX # hardware production=["pallet-attestation/production"] +# Sets a shorter session duration for the entropy-tss test_reshare_e2e +reshare-test=[] diff --git a/runtime/src/constants.rs b/runtime/src/constants.rs index 80b83aa68..3ae7fbe21 100644 --- a/runtime/src/constants.rs +++ b/runtime/src/constants.rs @@ -78,7 +78,10 @@ pub mod time { // NOTE: Currently it is not possible to change the epoch duration after the chain has started. // Attempting to do so will brick block production. + #[cfg(not(feature = "reshare-test"))] pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 4 * HOURS; + #[cfg(feature = "reshare-test")] + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 15 / (SECS_PER_BLOCK as BlockNumber); pub const EPOCH_DURATION_IN_SLOTS: u64 = { const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64;