Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
GoodDaisy committed Dec 17, 2023
1 parent 8f848b7 commit 2579f20
Show file tree
Hide file tree
Showing 51 changed files with 77 additions and 77 deletions.
2 changes: 1 addition & 1 deletion accounts-db/src/account_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ const CACHED_OFFSET: OffsetReduced = (1 << (OffsetReduced::BITS - 1)) - 1;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
pub struct PackedOffsetAndFlags {
/// this provides 2^31 bits, which when multipled by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
/// this provides 2^31 bits, which when multiplied by 8 (sizeof(u64)) = 16G, which is the maximum size of an append vec
offset_reduced: B31,
/// use 1 bit to specify that the entry is zero lamport
is_zero_lamport: bool,
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2243,7 +2243,7 @@ mod tests {
);

/* This test assumes pubkey0 < pubkey1 < pubkey2.
* But the keys created with new_unique() does not gurantee this
* But the keys created with new_unique() does not guarantee this
* order because of the endianness. new_unique() calls add 1 at each
* key generaration as the little endian integer. A pubkey stores its
* value in a 32-byte array bytes, and its eq-partial trait considers
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/accounts_hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1230,7 +1230,7 @@ pub enum ZeroLamportAccounts {
pub struct AccountHash(pub Hash);

// Ensure the newtype wrapper never changes size from the underlying Hash
// This also ensures there are no padding bytes, which is requried to safely implement Pod
// This also ensures there are no padding bytes, which is required to safely implement Pod
const _: () = assert!(std::mem::size_of::<AccountHash>() == std::mem::size_of::<Hash>());

/// Hash of accounts
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/tiered_storage/byte_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ impl ByteBlockWriter {

/// Write all the Some fields of the specified AccountMetaOptionalFields.
///
/// Note that the existance of each optional field is stored separately in
/// Note that the existence of each optional field is stored separately in
/// AccountMetaFlags.
pub fn write_optional_fields(
&mut self,
Expand Down
2 changes: 1 addition & 1 deletion bench-tps/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ where
// Move on to next chunk
self.chunk_index = (self.chunk_index + 1) % self.account_chunks.source.len();

// Switch directions after transfering for each "chunk"
// Switch directions after transferring for each "chunk"
if self.chunk_index == 0 {
self.reclaim_lamports_back_to_source_account =
!self.reclaim_lamports_back_to_source_account;
Expand Down
2 changes: 1 addition & 1 deletion bucket_map/src/restart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ impl RestartableBucket {
bucket.random = random;
}
}
/// retreive the file_name and random that were used prior to the current restart.
/// retrieve the file_name and random that were used prior to the current restart.
/// This was written into the restart file on the prior run by `set_file`.
pub(crate) fn get(&self) -> Option<(u128, u64)> {
self.restart.as_ref().map(|restart| {
Expand Down
4 changes: 2 additions & 2 deletions cli/src/cluster_query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ pub fn process_catchup(
if node_json_rpc_url.is_some() && node_json_rpc_url != gussed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given rpc ({}) as us, although --our-localhost is given\n",
"Preferring explicitly given rpc ({}) as us, although --our-localhost is given\n",
node_json_rpc_url.as_ref().unwrap()
);
} else {
Expand All @@ -771,7 +771,7 @@ pub fn process_catchup(
(if node_pubkey.is_some() && node_pubkey != guessed_default {
// go to new line to leave this message on console
println!(
"Prefering explicitly given node pubkey ({}) as us, although --our-localhost \
"Preferring explicitly given node pubkey ({}) as us, although --our-localhost \
is given\n",
node_pubkey.unwrap()
);
Expand Down
2 changes: 1 addition & 1 deletion cli/src/spend_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ where
dummy_message.recent_blockhash = *blockhash;
get_fee_for_messages(rpc_client, &[&dummy_message])?
}
None => 0, // Offline, cannot calulate fee
None => 0, // Offline, cannot calculate fee
};

match amount {
Expand Down
2 changes: 1 addition & 1 deletion client/src/connection_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ impl ConnectionCache {
Self::new_with_client_options(name, connection_pool_size, None, None, None)
}

/// Create a quic conneciton_cache with more client options
/// Create a quic connection_cache with more client options
pub fn new_with_client_options(
name: &'static str,
connection_pool_size: usize,
Expand Down
2 changes: 1 addition & 1 deletion core/src/banking_stage/latest_unprocessed_votes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub enum VoteSource {
Tpu,
}

/// Holds deserialized vote messages as well as their source, foward status and slot
/// Holds deserialized vote messages as well as their source, forward status and slot
#[derive(Debug, Clone)]
pub struct LatestValidatorVotePacket {
vote_source: VoteSource,
Expand Down
4 changes: 2 additions & 2 deletions core/src/banking_stage/leader_slot_metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ pub(crate) struct ProcessTransactionsSummary {
// Total amount of time spent running the cost model
pub cost_model_us: u64,

// Breakdown of time spent executing and comitting transactions
// Breakdown of time spent executing and committing transactions
pub execute_and_commit_timings: LeaderExecuteAndCommitTimings,

// Breakdown of all the transaction errors from transactions passed for execution
Expand Down Expand Up @@ -104,7 +104,7 @@ struct LeaderSlotPacketCountMetrics {

// total number of transactions that were executed, but failed to be committed into the Poh stream because
// the block ended. Some of these may be already counted in `nonretryable_errored_transactions_count` if they
// then hit the age limit after failing to be comitted.
// then hit the age limit after failing to be committed.
executed_transactions_failed_commit_count: u64,

// total number of transactions that were excluded from the block because there were concurrent write locks active.
Expand Down
4 changes: 2 additions & 2 deletions core/src/consensus/heaviest_subtree_fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice {
let mut update_operations: UpdateOperations = BTreeMap::new();
// Insert aggregate operations up to the root
self.insert_aggregate_operations(&mut update_operations, *slot_hash_key);
// Remove child link so that this slot cannot be choosen as best or deepest
// Remove child link so that this slot cannot be chosen as best or deepest
assert!(self
.fork_infos
.get_mut(&parent)
Expand Down Expand Up @@ -1308,7 +1308,7 @@ impl ForkChoice for HeaviestSubtreeForkChoice {
// be for a slot that we currently do not have in our bank forks, so we
// return None.
//
// We are guarenteed that we will eventually repair a duplicate confirmed version
// We are guaranteed that we will eventually repair a duplicate confirmed version
// of this slot because the state machine will never dump a slot unless it has
// observed a duplicate confirmed version of the slot.
//
Expand Down
2 changes: 1 addition & 1 deletion core/src/repair/duplicate_repair_status.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1123,7 +1123,7 @@ pub mod tests {
let request_slot = 100;
let mut test_setup = setup_add_response_test_pruned(request_slot, 10);

// Insert all the correct ancestory
// Insert all the correct ancestry
let tree = test_setup
.correct_ancestors_response
.iter()
Expand Down
10 changes: 5 additions & 5 deletions core/src/repair/repair_weight.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ impl RepairWeight {
}
Some(TreeRoot::PrunedRoot(subtree_root)) => {
// Even if these orphaned slots were previously pruned, they should be added back to
// `self.trees` as we are no longer sure of their ancestory.
// `self.trees` as we are no longer sure of their ancestry.
// After they are repaired there is a chance that they are now part of the rooted path.
// This is possible for a duplicate slot with multiple ancestors, if the
// version we had pruned before had the wrong ancestor, and the correct version is
Expand Down Expand Up @@ -892,7 +892,7 @@ impl RepairWeight {
);
}

/// Finds any ancestors avaiable from `blockstore` for `slot`.
/// Finds any ancestors available from `blockstore` for `slot`.
/// Ancestor search is stopped when finding one that chains to any
/// tree in `self.trees` or `self.pruned_trees` or if the ancestor is < self.root.
///
Expand Down Expand Up @@ -2201,21 +2201,21 @@ mod test {
let (blockstore, _, mut repair_weight) = setup_orphan_repair_weight();

// Ancestor of slot 4 is slot 2, with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 4),
(VecDeque::from([2]), Some(TreeRoot::Root(0)))
);

// Ancestors of 5 are [1, 3], with an existing subtree rooted at 0
// because there wass a vote for a descendant
// because there was a vote for a descendant
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 5),
(VecDeque::from([1, 3]), Some(TreeRoot::Root(0)))
);

// Ancestors of slot 23 are [20, 22], with an existing subtree of 20
// because there wass a vote for 20
// because there was a vote for 20
assert_eq!(
repair_weight.find_ancestor_subtree_of_slot(&blockstore, 23),
(VecDeque::from([20, 22]), Some(TreeRoot::Root(20)))
Expand Down
2 changes: 1 addition & 1 deletion core/src/repair/serve_repair.rs
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ impl ServeRepair {
stats.dropped_requests_outbound_bandwidth += 1;
continue;
}
// Bypass ping/pong check for requests comming from QUIC endpoint.
// Bypass ping/pong check for requests coming from QUIC endpoint.
if !matches!(&request, RepairProtocol::Pong(_)) && response_sender.is_none() {
let (check, ping_pkt) =
Self::check_ping_cache(ping_cache, &request, &from_addr, &identity_keypair);
Expand Down
2 changes: 1 addition & 1 deletion core/tests/snapshots.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct SnapshotTestConfig {
full_snapshot_archives_dir: TempDir,
bank_snapshots_dir: TempDir,
accounts_dir: PathBuf,
// as the underscore prefix indicates, this isn't explictly used; but it's needed to keep
// as the underscore prefix indicates, this isn't explicitly used; but it's needed to keep
// TempDir::drop from running to retain that dir for the duration of test
_accounts_tmp_dir: TempDir,
}
Expand Down
4 changes: 2 additions & 2 deletions gossip/src/cluster_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1218,7 +1218,7 @@ impl ClusterInfo {
}

/// Returns epoch-slots inserted since the given cursor.
/// Excludes entries from nodes with unkown or different shred version.
/// Excludes entries from nodes with unknown or different shred version.
pub fn get_epoch_slots(&self, cursor: &mut Cursor) -> Vec<EpochSlots> {
let self_shred_version = Some(self.my_shred_version());
let gossip_crds = self.gossip.crds.read().unwrap();
Expand Down Expand Up @@ -1752,7 +1752,7 @@ impl ClusterInfo {
match gossip_crds.trim(cap, &keep, stakes, timestamp()) {
Err(err) => {
self.stats.trim_crds_table_failed.add_relaxed(1);
// TODO: Stakes are comming from the root-bank. Debug why/when
// TODO: Stakes are coming from the root-bank. Debug why/when
// they are empty/zero.
debug!("crds table trim failed: {:?}", err);
}
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/contact_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ impl ContactInfo {
}

// Removes the IP address at the given index if
// no socket entry refrences that index.
// no socket entry references that index.
fn maybe_remove_addr(&mut self, index: u8) {
if !self.sockets.iter().any(|entry| entry.index == index) {
self.addrs.remove(usize::from(index));
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/crds_value.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1066,7 +1066,7 @@ mod test {
assert!(!other.check_duplicate(&node_crds));
assert_eq!(node.overrides(&other_crds), None);
assert_eq!(other.overrides(&node_crds), None);
// Differnt crds value is not a duplicate.
// Different crds value is not a duplicate.
let other = LegacyContactInfo::new_rand(&mut rng, Some(pubkey));
let other = CrdsValue::new_unsigned(CrdsData::LegacyContactInfo(other));
assert!(!node.check_duplicate(&other));
Expand Down
8 changes: 4 additions & 4 deletions ledger/src/blockstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6341,7 +6341,7 @@ pub mod tests {
assert_eq!(
blockstore.find_missing_data_indexes(
slot,
0, // first_timestmap
0, // first_timestamp
0, // defer_threshold_ticks
0, // start_index
gap - 1, // end_index
Expand All @@ -6352,7 +6352,7 @@ pub mod tests {
assert_eq!(
blockstore.find_missing_data_indexes(
slot,
0, // first_timestmap
0, // first_timestamp
0, // defer_threshold_ticks
gap - 2, // start_index
gap, // end_index
Expand Down Expand Up @@ -9951,7 +9951,7 @@ pub mod tests {
}

#[test]
fn test_rewards_protobuf_backward_compatability() {
fn test_rewards_protobuf_backward_compatibility() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

Expand Down Expand Up @@ -9994,7 +9994,7 @@ pub mod tests {
// ledger archives, but typically those require contemporaraneous software for other reasons.
// However, we are persisting the test since the apis still exist in `blockstore_db`.
#[test]
fn test_transaction_status_protobuf_backward_compatability() {
fn test_transaction_status_protobuf_backward_compatibility() {
let ledger_path = get_tmp_ledger_path_auto_delete!();
let blockstore = Blockstore::open(ledger_path.path()).unwrap();

Expand Down
2 changes: 1 addition & 1 deletion ledger/src/blockstore_cleanup_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ mod tests {
fn flush_blockstore_contents_to_disk(blockstore: Blockstore) -> Blockstore {
// The find_slots_to_clean() routine uses a method that queries data
// from RocksDB SST files. On a running validator, these are created
// fairly reguarly as new data is coming in and contents of memory are
// fairly regularly as new data is coming in and contents of memory are
// pushed to disk. In a unit test environment, we aren't pushing nearly
// enough data for this to happen organically. So, instead open and
// close the Blockstore which will perform the flush to SSTs.
Expand Down
8 changes: 4 additions & 4 deletions local-cluster/tests/local_cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2385,7 +2385,7 @@ fn test_hard_fork_with_gap_in_roots() {
.reversed_rooted_slot_iterator(common_root)
.unwrap()
.collect::<Vec<_>>();
// artifically restore the forcibly purged genesis only for the validator A just for the sake of
// artificially restore the forcibly purged genesis only for the validator A just for the sake of
// the final assertions.
slots_a.push(genesis_slot);
roots_a.push(genesis_slot);
Expand Down Expand Up @@ -4282,7 +4282,7 @@ fn test_leader_failure_4() {
//
// Validator A (60%)
// Validator B (40%)
// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initally not able to see the other fork at all)
// / --- 10 --- [..] --- 16 (B is voting, due to network issues is initially not able to see the other fork at all)
// /
// 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 (A votes 1 - 9 votes are landing normally. B does the same however votes are not landing)
// \
Expand Down Expand Up @@ -4478,7 +4478,7 @@ fn test_slot_hash_expiry() {
);
}

// This test simulates a case where a leader sends a duplicate block with different ancestory. One
// This test simulates a case where a leader sends a duplicate block with different ancestry. One
// version builds off of the rooted path, however the other version builds off a pruned branch. The
// validators that receive the pruned version will need to repair in order to continue, which
// requires an ancestor hashes repair.
Expand Down Expand Up @@ -4507,7 +4507,7 @@ fn test_slot_hash_expiry() {
// reached as minority cannot pass threshold otherwise).
// 4) Let minority produce forks on pruned forks until out of leader slots then kill.
// 5) Truncate majority ledger past fork slot so it starts building off of fork slot.
// 6) Restart majority and wait untill it starts producing blocks on main fork and roots something
// 6) Restart majority and wait until it starts producing blocks on main fork and roots something
// past the fork slot.
// 7) Construct our ledger by copying majority ledger and copying blocks from minority for the pruned path.
// 8) In our node's ledger, change the parent of the latest slot in majority fork to be the latest
Expand Down
2 changes: 1 addition & 1 deletion perf/src/recycler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ mod tests {
let count = rng.gen_range(1..128);
let _packets: Vec<_> = repeat_with(|| recycler.allocate("")).take(count).collect();
}
// Assert that the gc size has shrinked.
// Assert that the gc size has shrunk.
assert_eq!(
recycler.recycler.gc.lock().unwrap().len(),
RECYCLER_SHRINK_SIZE
Expand Down
2 changes: 1 addition & 1 deletion poh/src/poh_recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1608,7 +1608,7 @@ mod tests {
assert!(poh_recorder.working_bank.is_some());

// Drop entry receiver, and try to tick again. Because
// the reciever is closed, the ticks will not be drained from the cache,
// the receiver is closed, the ticks will not be drained from the cache,
// and the working bank will be cleared
drop(entry_receiver);
poh_recorder.tick();
Expand Down
2 changes: 1 addition & 1 deletion pubsub-client/src/nonblocking/pubsub_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -519,7 +519,7 @@ impl PubsubClient {
/// Receives messages of type [`SlotUpdate`] when various updates to a slot occur.
///
/// Note that this method operates differently than other subscriptions:
/// instead of sending the message to a reciever on a channel, it accepts a
/// instead of sending the message to a receiver on a channel, it accepts a
/// `handler` callback that processes the message directly. This processing
/// occurs on another thread.
///
Expand Down
2 changes: 1 addition & 1 deletion pubsub-client/src/pubsub_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ impl PubsubClient {
/// Receives messages of type [`SlotUpdate`] when various updates to a slot occur.
///
/// Note that this method operates differently than other subscriptions:
/// instead of sending the message to a reciever on a channel, it accepts a
/// instead of sending the message to a receiver on a channel, it accepts a
/// `handler` callback that processes the message directly. This processing
/// occurs on another thread.
///
Expand Down
2 changes: 1 addition & 1 deletion quic-client/src/quic_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ const SEND_DATA_TIMEOUT: Duration = Duration::from_secs(10);

/// A semaphore used for limiting the number of asynchronous tasks spawn to the
/// runtime. Before spawnning a task, use acquire. After the task is done (be it
/// succsess or failure), call release.
/// success or failure), call release.
struct AsyncTaskSemaphore {
/// Keep the counter info about the usage
counter: Mutex<u64>,
Expand Down
2 changes: 1 addition & 1 deletion quic-client/tests/quic_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ mod tests {
fn test_quic_bi_direction() {
/// This tests bi-directional quic communication. There are the following components
/// The request receiver -- responsible for receiving requests
/// The request sender -- responsible sending requests to the request reciever using quic
/// The request sender -- responsible sending requests to the request receiver using quic
/// The response receiver -- responsible for receiving the responses to the requests
/// The response sender -- responsible for sending responses to the response receiver.
/// In this we demonstrate that the request sender and the response receiver use the
Expand Down
2 changes: 1 addition & 1 deletion rpc-client/src/mock_sender.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ pub struct MockSender {
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
/// return successfully, though this value is not actually interpreted.
///
/// Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation for specifics.
Expand Down
Loading

0 comments on commit 2579f20

Please sign in to comment.