Skip to content

Commit

Permalink
Merge branch 'fraccaman/improve-cli-check' (#1258)
Browse files Browse the repository at this point in the history
* fraccaman/improve-cli-check:
  ledger: wait for node to sync before running cli commands
  ledger: change wait time strategy
  fixup: improve retry logic
  ledger: wait for node to sync before running cli commands
  Namada 0.14.3
  changelog: add missing changelogs
  fix tests for new epoch
  Added record to changelog.
  Now load conversions from storage even for epoch 1.
  DB prune function given an epoch
  prune older merkle tree stores
  fix: review comments
  fix comments
  retry CI
  fix: logic
  misc: refactor + clippy + fmt
  ledger: check if pk is valid validator in pre-genesis setup
  e2e/ledger_tests: fix run_ledger_load_state_and_reset in debug build
  [ci] wasm checksums update
  fix to rebuild merkle tree before read
  fix the unit test
  compare keys as string
  fix for the first height
  [ci] wasm checksums update
  add changelog
  write Merkle tree stores less often
  • Loading branch information
tzemanovic committed Mar 30, 2023
2 parents 4f8bec0 + 6354951 commit 17044de
Show file tree
Hide file tree
Showing 44 changed files with 847 additions and 153 deletions.
2 changes: 2 additions & 0 deletions .changelog/v0.14.3/bug-fixes/1140-check-pre-genesis-pk.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Check if validators are valid in pre-genesis setup.
([#1140](https://github.com/anoma/namada/pull/1140))
2 changes: 2 additions & 0 deletions .changelog/v0.14.3/bug-fixes/1244-conversion-loading-fix.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Now load conversions from storage even for epoch 1.
([\#1244](https://github.com/anoma/namada/pull/1244))
2 changes: 2 additions & 0 deletions .changelog/v0.14.3/improvements/1113-write-tree-stores.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Write Merkle tree stores only when a new epoch
([#1113](https://github.com/anoma/namada/issues/1113))
2 changes: 2 additions & 0 deletions .changelog/v0.14.3/improvements/1237-prune_tree_stores.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Prune old Merkle tree stores.
([#1237](https://github.com/anoma/namada/pull/1237))
2 changes: 2 additions & 0 deletions .changelog/v0.14.3/summary.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Namada 0.14.3 is a bugfix release addressing mainly disk usage
inefficiencies.
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
- Fixed run_ledger_load_state_and_reset test in debug build.
([#1131](https://github.com/anoma/namada/pull/1131))
24 changes: 24 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,29 @@
# CHANGELOG

## v0.14.3

Namada 0.14.3 is a bugfix release addressing mainly disk usage
inefficiencies.

### BUG FIXES

- Check if validators are valid in pre-genesis setup.
([#1140](https://github.com/anoma/namada/pull/1140))
- Now load conversions from storage even for epoch 1.
([\#1244](https://github.com/anoma/namada/pull/1244))

### IMPROVEMENTS

- Write Merkle tree stores only when a new epoch
([#1113](https://github.com/anoma/namada/issues/1113))
- Prune old Merkle tree stores.
([#1237](https://github.com/anoma/namada/pull/1237))

### TESTING

- Fixed run_ledger_load_state_and_reset test in debug build.
([#1131](https://github.com/anoma/namada/pull/1131))

## v0.14.2

Namada 0.14.2 is a maintenance release addressing issues with
Expand Down
22 changes: 11 additions & 11 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion apps/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ license = "GPL-3.0"
name = "namada_apps"
readme = "../README.md"
resolver = "2"
version = "0.14.2"
version = "0.14.3"
default-run = "namada"

# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
Expand Down
96 changes: 95 additions & 1 deletion apps/src/bin/namada-client/cli.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
//! Namada client CLI.
use std::time::Duration;

use color_eyre::eyre::Result;
use namada_apps::cli;
use namada_apps::cli::cmds::*;
use namada_apps::cli::{self, safe_exit};
use namada_apps::client::{rpc, tx, utils};
use namada_apps::facade::tendermint::block::Height;
use namada_apps::facade::tendermint_config::net::Address as TendermintAddress;
use namada_apps::facade::tendermint_rpc::{Client, HttpClient};
use tokio::time::sleep;

pub async fn main() -> Result<()> {
match cli::namada_client_cli()? {
Expand All @@ -13,86 +19,126 @@ pub async fn main() -> Result<()> {
match cmd {
// Ledger cmds
Sub::TxCustom(TxCustom(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_custom(ctx, args).await;
}
Sub::TxTransfer(TxTransfer(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_transfer(ctx, args).await;
}
Sub::TxIbcTransfer(TxIbcTransfer(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_ibc_transfer(ctx, args).await;
}
Sub::TxUpdateVp(TxUpdateVp(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_update_vp(ctx, args).await;
}
Sub::TxInitAccount(TxInitAccount(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_init_account(ctx, args).await;
}
Sub::TxInitValidator(TxInitValidator(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_init_validator(ctx, args).await;
}
Sub::TxInitProposal(TxInitProposal(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_init_proposal(ctx, args).await;
}
Sub::TxVoteProposal(TxVoteProposal(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_vote_proposal(ctx, args).await;
}
Sub::TxRevealPk(TxRevealPk(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_reveal_pk(ctx, args).await;
}
Sub::Bond(Bond(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_bond(ctx, args).await;
}
Sub::Unbond(Unbond(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_unbond(ctx, args).await;
}
Sub::Withdraw(Withdraw(args)) => {
wait_until_node_is_synched(&args.tx.ledger_address).await;
tx::submit_withdraw(ctx, args).await;
}
// Ledger queries
Sub::QueryEpoch(QueryEpoch(args)) => {
wait_until_node_is_synched(&args.ledger_address).await;
rpc::query_and_print_epoch(args).await;
}
Sub::QueryTransfers(QueryTransfers(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_transfers(ctx, args).await;
}
Sub::QueryConversions(QueryConversions(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_conversions(ctx, args).await;
}
Sub::QueryBlock(QueryBlock(args)) => {
wait_until_node_is_synched(&args.ledger_address).await;
rpc::query_block(args).await;
}
Sub::QueryBalance(QueryBalance(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_balance(ctx, args).await;
}
Sub::QueryBonds(QueryBonds(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_bonds(ctx, args).await.unwrap();
}
Sub::QueryBondedStake(QueryBondedStake(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_bonded_stake(ctx, args).await;
}
Sub::QueryCommissionRate(QueryCommissionRate(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_and_print_commission_rate(ctx, args).await;
}
Sub::QuerySlashes(QuerySlashes(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_slashes(ctx, args).await;
}
Sub::QueryDelegations(QueryDelegations(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_delegations(ctx, args).await;
}
Sub::QueryResult(QueryResult(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_result(ctx, args).await;
}
Sub::QueryRawBytes(QueryRawBytes(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_raw_bytes(ctx, args).await;
}

Sub::QueryProposal(QueryProposal(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_proposal(ctx, args).await;
}
Sub::QueryProposalResult(QueryProposalResult(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_proposal_result(ctx, args).await;
}
Sub::QueryProtocolParameters(QueryProtocolParameters(args)) => {
wait_until_node_is_synched(&args.query.ledger_address)
.await;
rpc::query_protocol_parameters(ctx, args).await;
}
}
Expand All @@ -115,3 +161,51 @@ pub async fn main() -> Result<()> {
}
Ok(())
}

/// Wait for a first block and node to be synced. Will attempt to
async fn wait_until_node_is_synched(ledger_address: &TendermintAddress) {
let client = HttpClient::new(ledger_address.clone()).unwrap();
let height_one = Height::try_from(1_u64).unwrap();
let mut try_count = 0_u64;
const MAX_TRIES: u64 = 5;

loop {
let node_status = client.status().await;
match node_status {
Ok(status) => {
let latest_block_height = status.sync_info.latest_block_height;
let is_catching_up = status.sync_info.catching_up;
let is_at_least_height_one = latest_block_height >= height_one;
if is_at_least_height_one && !is_catching_up {
return;
} else {
if try_count > MAX_TRIES {
println!(
"Node is still catching up, wait for it to finish \
synching."
);
safe_exit(1)
} else {
println!(
" Waiting for {} ({}/{} tries)...",
if is_at_least_height_one {
"a first block"
} else {
"node to sync"
},
try_count + 1,
MAX_TRIES
);
sleep(Duration::from_secs((try_count + 1).pow(2)))
.await;
}
try_count += 1;
}
}
Err(e) => {
eprintln!("Failed to query node status with error: {}", e);
safe_exit(1)
}
}
}
}
Loading

0 comments on commit 17044de

Please sign in to comment.