Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

merge queue: embarking main (6c5c61b), #4062, #4019 and #4036 together #4066

Closed
wants to merge 17 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Validate validator metadata from on-chain validator creation and metadata
changes. ([\#4036](https://github.com/anoma/namada/pull/4036))
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
- The speculative shielded context now avoids updating its
state if the transaction failed. Added a test for it.
([\#4019](https://github.com/anoma/namada/pull/4019))
49 changes: 35 additions & 14 deletions .mergify.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,54 @@ merge_queue:
max_parallel_checks: 3

queue_rules:
- name: duplicated main-queue from automatic merge to main or backport branch
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = main
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- &id001
or:
- base = main
- base = maint-0.45
merge_conditions: []
merge_method: merge
autosquash: true
- name: main-queue
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = main
merge_method: merge

- name: duplicated backport-0.45-queue from automatic merge to main or backport
branch
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = maint-0.45
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- *id001
merge_conditions: []
merge_method: merge
autosquash: true
- name: backport-0.45-queue
batch_size: 3
queue_conditions:
- "#approved-reviews-by >= 1"
- base = maint-0.45
merge_method: merge

pull_request_rules:
- name: automatic merge to main or backport branch
conditions:
- label = "merge"
- label != "do-not-merge"
- "#approved-reviews-by >= 1"
- or:
- base = main
- base = maint-0.45
actions:
queue:
autosquash: true

pull_request_rules:
- name: notify when a PR is removed from the queue
conditions:
- queue-dequeue-reason != none
- queue-dequeue-reason != pr-merged
- queue-dequeue-reason != none
- queue-dequeue-reason != pr-merged
actions:
comment:
message: >
Expand All @@ -49,3 +66,7 @@ pull_request_rules:
backport:
branches:
- "maint-0.45"
- name: refactored queue action rule
conditions: []
actions:
queue:
10 changes: 10 additions & 0 deletions crates/apps_lib/src/client/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::io;

use borsh::BorshDeserialize;
use color_eyre::owo_colors::OwoColorize;
use data_encoding::HEXLOWER;
use either::Either;
use masp_primitives::asset_type::AssetType;
Expand Down Expand Up @@ -395,6 +396,15 @@ async fn query_shielded_balance(
context: &impl Namada,
args: args::QueryBalance,
) {
display_line!(
context.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"The resulting balance could be outdated, make sure to run `namadac \
shielded-sync` before querying the balance to get the most recent \
value."
);

let args::QueryBalance {
// Token owner (needs to be a viewing key)
owner,
Expand Down
103 changes: 96 additions & 7 deletions crates/apps_lib/src/client/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ use std::io::Write;

use borsh::BorshDeserialize;
use borsh_ext::BorshSerializeExt;
use color_eyre::owo_colors::OwoColorize;
use ledger_namada_rs::{BIP44Path, NamadaApp};
use namada_core::masp::MaspTransaction;
use namada_sdk::address::{Address, ImplicitAddress};
use namada_sdk::args::TxBecomeValidator;
use namada_sdk::collections::HashSet;
Expand Down Expand Up @@ -637,11 +639,10 @@ pub async fn submit_become_validator(
.is_applied_and_valid(wrapper_hash.as_ref(), &cmt)
.is_none()
{
display_line!(
namada.io(),
return Err(error::Error::Tx(error::TxSubmitError::Other(
"Transaction failed. No key or addresses have been saved."
);
safe_exit(1)
.to_string(),
)));
}

// add validator address and keys to the wallet
Expand Down Expand Up @@ -829,13 +830,33 @@ pub async fn submit_shielded_transfer(
namada: &impl Namada,
args: args::TxShieldedTransfer,
) -> Result<(), error::Error> {
display_line!(
namada.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"Some information might be leaked if your shielded wallet is not up \
to date, make sure to run `namadac shielded-sync` before running \
this command.",
);

let (mut tx, signing_data) = args.clone().build(namada).await?;

let masp_section = tx
.sections
.iter()
.find_map(|section| section.masp_tx())
.ok_or_else(|| {
error::Error::Other(
"Missing MASP section in shielded transaction".to_string(),
)
})?;
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
pre_cache_masp_data(namada, &masp_section).await;
} else {
sign(namada, &mut tx, &args.tx, signing_data).await?;
namada.submit(tx, &args.tx).await?;
let res = namada.submit(tx, &args.tx).await?;
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
Ok(())
}
Expand Down Expand Up @@ -900,13 +921,33 @@ pub async fn submit_unshielding_transfer(
namada: &impl Namada,
args: args::TxUnshieldingTransfer,
) -> Result<(), error::Error> {
display_line!(
namada.io(),
"{}: {}\n",
"WARNING".bold().underline().yellow(),
"Some information might be leaked if your shielded wallet is not up \
to date, make sure to run `namadac shielded-sync` before running \
this command.",
);

let (mut tx, signing_data) = args.clone().build(namada).await?;

let masp_section = tx
.sections
.iter()
.find_map(|section| section.masp_tx())
.ok_or_else(|| {
error::Error::Other(
"Missing MASP section in shielded transaction".to_string(),
)
})?;
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
pre_cache_masp_data(namada, &masp_section).await;
} else {
sign(namada, &mut tx, &args.tx, signing_data).await?;
namada.submit(tx, &args.tx).await?;
let res = namada.submit(tx, &args.tx).await?;
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
Ok(())
}
Expand All @@ -920,16 +961,25 @@ where
{
let (tx, signing_data, _) = args.build(namada).await?;

let opt_masp_section =
tx.sections.iter().find_map(|section| section.masp_tx());
if args.tx.dump_tx || args.tx.dump_wrapper_tx {
tx::dump_tx(namada.io(), &args.tx, tx)?;
if let Some(masp_section) = opt_masp_section {
pre_cache_masp_data(namada, &masp_section).await;
}
} else {
batch_opt_reveal_pk_and_submit(
let res = batch_opt_reveal_pk_and_submit(
namada,
&args.tx,
&[&args.source.effective_address()],
(tx, signing_data),
)
.await?;

if let Some(masp_section) = opt_masp_section {
pre_cache_masp_data_on_tx_result(namada, &res, &masp_section).await;
}
}
// NOTE that the tx could fail when its submission epoch doesn't match
// construction epoch
Expand Down Expand Up @@ -1482,3 +1532,42 @@ pub async fn gen_ibc_shielding_transfer(
}
Ok(())
}

// Pre-cache the data for the provided MASP transaction. Log an error on
// failure.
async fn pre_cache_masp_data(namada: &impl Namada, masp_tx: &MaspTransaction) {
if let Err(e) = namada
.shielded_mut()
.await
.pre_cache_transaction(masp_tx)
.await
{
// Just display the error but do not propagate it
edisplay_line!(namada.io(), "Failed to pre-cache masp data: {}.", e);
}
}

// Check the result of a transaction and pre-cache the masp data accordingly
async fn pre_cache_masp_data_on_tx_result(
namada: &impl Namada,
tx_result: &ProcessTxResponse,
masp_tx: &MaspTransaction,
) {
match tx_result {
ProcessTxResponse::Applied(resp) => {
if let Some(InnerTxResult::Success(_)) =
// If we have the masp data in an ibc transfer it
// means we are unshielding, so there's no reveal pk
// tx in the batch which contains only the ibc tx
resp.batch_result().first().map(|(_, res)| res)
{
pre_cache_masp_data(namada, masp_tx).await;
}
}
ProcessTxResponse::Broadcast(_) => {
pre_cache_masp_data(namada, masp_tx).await;
}
// Do not pre-cache when dry-running
ProcessTxResponse::DryRun(_) => {}
}
}
55 changes: 4 additions & 51 deletions crates/apps_lib/src/config/genesis/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ use namada_sdk::collections::HashSet;
use namada_sdk::dec::Dec;
use namada_sdk::key::common::PublicKey;
use namada_sdk::key::{common, ed25519, RefTo, SerializeWithBorsh, SigScheme};
use namada_sdk::proof_of_stake::parameters::MAX_VALIDATOR_METADATA_LEN;
use namada_sdk::proof_of_stake::types::ValidatorMetaData;
use namada_sdk::signing::{sign_tx, SigningTxData};
use namada_sdk::string_encoding::StringEncoded;
Expand Down Expand Up @@ -1385,60 +1384,14 @@ pub fn validate_validator_account(

// Check that the validator metadata is not too large
let metadata = &signed_tx.data.metadata;
if metadata.email.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
let errors = metadata.validate();
if !errors.is_empty() {
panic!(
"The email metadata of the validator with address {} is too long, \
must be within {MAX_VALIDATOR_METADATA_LEN} characters",
"Metadata of the validator with address {} are invalid: \
{errors:#?}",
signed_tx.data.address
);
}
if let Some(description) = metadata.description.as_ref() {
if description.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
panic!(
"The description metadata of the validator with address {} is \
too long, must be within {MAX_VALIDATOR_METADATA_LEN} \
characters",
signed_tx.data.address
);
}
}
if let Some(website) = metadata.website.as_ref() {
if website.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
panic!(
"The website metadata of the validator with address {} is too \
long, must be within {MAX_VALIDATOR_METADATA_LEN} characters",
signed_tx.data.address
);
}
}
if let Some(discord_handle) = metadata.discord_handle.as_ref() {
if discord_handle.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
panic!(
"The discord handle metadata of the validator with address {} \
is too long, must be within {MAX_VALIDATOR_METADATA_LEN} \
characters",
signed_tx.data.address
);
}
}
if let Some(avatar) = metadata.avatar.as_ref() {
if avatar.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
panic!(
"The avatar metadata of the validator with address {} is too \
long, must be within {MAX_VALIDATOR_METADATA_LEN} characters",
signed_tx.data.address
);
}
}
if let Some(name) = metadata.name.as_ref() {
if name.len() as u64 > MAX_VALIDATOR_METADATA_LEN {
panic!(
"The name metadata of the validator with address {} is too \
long, must be within {MAX_VALIDATOR_METADATA_LEN} characters",
signed_tx.data.address
);
}
}

// Check signature
let mut is_valid = {
Expand Down
1 change: 0 additions & 1 deletion crates/node/src/bench_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,6 @@ impl BenchShieldedCtx {
vec![masp_transfer_data],
None,
expiration,
true,
)
.await
})
Expand Down
11 changes: 11 additions & 0 deletions crates/proof_of_stake/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use namada_core::chain::Epoch;
use namada_core::dec::Dec;
use thiserror::Error;

use crate::parameters::MAX_VALIDATOR_METADATA_LEN;
use crate::types::ValidatorState;
use crate::{rewards, Error};

Expand Down Expand Up @@ -165,6 +166,16 @@ pub enum ConsensusKeyChangeError {
MustBeEd25519,
}

#[allow(missing_docs)]
#[derive(Error, Debug)]
pub enum ValidatorMetaDataError {
#[error(
"The {0} metadata is too long, must be within \
{MAX_VALIDATOR_METADATA_LEN} characters"
)]
FieldTooLong(&'static str),
}

impl From<BecomeValidatorError> for Error {
fn from(err: BecomeValidatorError) -> Self {
Self::new(err)
Expand Down
Loading