Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move EpochRewardsHasher to solana-sdk #34934

Merged
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion programs/sbf/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ regex = { workspace = true }
serde = { workspace = true, features = ["rc"] }
serde_derive = { workspace = true }
serde_json = { workspace = true }
siphasher = { workspace = true }
solana-accounts-db = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
solana-bpf-loader-program = { workspace = true }
Expand Down
182 changes: 2 additions & 180 deletions runtime/src/epoch_rewards_hasher.rs
Original file line number Diff line number Diff line change
@@ -1,45 +1,8 @@
use {
crate::bank::StakeRewards,
siphasher::sip::SipHasher13,
solana_sdk::{hash::Hash, pubkey::Pubkey},
std::hash::Hasher,
solana_sdk::{epoch_rewards_hasher::EpochRewardsHasher, hash::Hash},
};

#[derive(Debug, Clone)]
pub(crate) struct EpochRewardsHasher {
hasher: SipHasher13,
partitions: usize,
}

impl EpochRewardsHasher {
/// Use SipHasher13 keyed on the `seed` for calculating epoch reward partition
pub(crate) fn new(partitions: usize, seed: &Hash) -> Self {
let mut hasher = SipHasher13::new();
hasher.write(seed.as_ref());
Self { hasher, partitions }
}

/// Return partition index (0..partitions) by hashing `address` with the `hasher`
pub(crate) fn hash_address_to_partition(self, address: &Pubkey) -> usize {
let Self {
mut hasher,
partitions,
} = self;
hasher.write(address.as_ref());
let hash64 = hasher.finish();

hash_to_partition(hash64, partitions)
}
}

/// Compute the partition index by modulo the address hash to number of partitions w.o bias.
/// (rand_int * DESIRED_RANGE_MAX) / (RAND_MAX + 1)
fn hash_to_partition(hash: u64, partitions: usize) -> usize {
((partitions as u128)
.saturating_mul(u128::from(hash))
.saturating_div(u128::from(u64::MAX).saturating_add(1))) as usize
}

pub(crate) fn hash_rewards_into_partitions(
t-nelson marked this conversation as resolved.
Show resolved Hide resolved
stake_rewards: StakeRewards,
parent_blockhash: &Hash,
Expand All @@ -62,148 +25,7 @@ pub(crate) fn hash_rewards_into_partitions(

#[cfg(test)]
mod tests {
use {
super::*,
solana_accounts_db::stake_rewards::StakeReward,
std::{collections::HashMap, ops::RangeInclusive},
};

#[test]
fn test_get_equal_partition_range() {
// show how 2 equal partition ranges are 0..=(max/2), (max/2+1)..=max
// the inclusive is tricky to think about
let range = get_equal_partition_range(0, 2);
assert_eq!(*range.start(), 0);
assert_eq!(*range.end(), u64::MAX / 2);
let range = get_equal_partition_range(1, 2);
assert_eq!(*range.start(), u64::MAX / 2 + 1);
assert_eq!(*range.end(), u64::MAX);
}

#[test]
fn test_hash_to_partitions() {
let partitions = 16;
assert_eq!(hash_to_partition(0, partitions), 0);
assert_eq!(hash_to_partition(u64::MAX / 16, partitions), 0);
assert_eq!(hash_to_partition(u64::MAX / 16 + 1, partitions), 1);
assert_eq!(hash_to_partition(u64::MAX / 16 * 2, partitions), 1);
assert_eq!(hash_to_partition(u64::MAX / 16 * 2 + 1, partitions), 1);
assert_eq!(hash_to_partition(u64::MAX - 1, partitions), partitions - 1);
assert_eq!(hash_to_partition(u64::MAX, partitions), partitions - 1);
}

fn test_partitions(partition: usize, partitions: usize) {
let partition = partition.min(partitions - 1);
let range = get_equal_partition_range(partition, partitions);
// beginning and end of this partition
assert_eq!(hash_to_partition(*range.start(), partitions), partition);
assert_eq!(hash_to_partition(*range.end(), partitions), partition);
if partition < partitions - 1 {
// first index in next partition
assert_eq!(
hash_to_partition(*range.end() + 1, partitions),
partition + 1
);
} else {
assert_eq!(*range.end(), u64::MAX);
}
if partition > 0 {
// last index in previous partition
assert_eq!(
hash_to_partition(*range.start() - 1, partitions),
partition - 1
);
} else {
assert_eq!(*range.start(), 0);
}
}

#[test]
fn test_hash_to_partitions_equal_ranges() {
for partitions in [2, 4, 8, 16, 4096] {
assert_eq!(hash_to_partition(0, partitions), 0);
for partition in [0, 1, 2, partitions - 1] {
test_partitions(partition, partitions);
}

let range = get_equal_partition_range(0, partitions);
for partition in 1..partitions {
let this_range = get_equal_partition_range(partition, partitions);
assert_eq!(
this_range.end() - this_range.start(),
range.end() - range.start()
);
}
}
// verify non-evenly divisible partitions (partitions will be different sizes by at most 1 from any other partition)
for partitions in [3, 19, 1019, 4095] {
for partition in [0, 1, 2, partitions - 1] {
test_partitions(partition, partitions);
}
let expected_len_of_partition =
((u128::from(u64::MAX) + 1) / partitions as u128) as u64;
for partition in 0..partitions {
let this_range = get_equal_partition_range(partition, partitions);
let len = this_range.end() - this_range.start();
// size is same or 1 less
assert!(
len == expected_len_of_partition || len + 1 == expected_len_of_partition,
"{}, {}, {}, {}",
expected_len_of_partition,
len,
partition,
partitions
);
}
}
}

/// return start and end_inclusive of `partition` indexes out of from u64::MAX+1 elements in equal `partitions`
/// These will be equal as long as (u64::MAX + 1) divides by `partitions` evenly
fn get_equal_partition_range(partition: usize, partitions: usize) -> RangeInclusive<u64> {
let max_inclusive = u128::from(u64::MAX);
let max_plus_1 = max_inclusive + 1;
let partition = partition as u128;
let partitions = partitions as u128;
let mut start = max_plus_1 * partition / partitions;
if partition > 0 && start * partitions / max_plus_1 == partition - 1 {
// partitions don't evenly divide and the start of this partition needs to be 1 greater
start += 1;
}

let mut end_inclusive = start + max_plus_1 / partitions - 1;
if partition < partitions.saturating_sub(1) {
let next = end_inclusive + 1;
if next * partitions / max_plus_1 == partition {
// this partition is far enough into partitions such that the len of this partition is 1 larger than expected
end_inclusive += 1;
}
} else {
end_inclusive = max_inclusive;
}
RangeInclusive::new(start as u64, end_inclusive as u64)
}

/// Make sure that each time hash_address_to_partition is called, it uses the initial seed state and that clone correctly copies the initial hasher state.
#[test]
fn test_hasher_copy() {
let seed = Hash::new_unique();
let partitions = 10;
let hasher = EpochRewardsHasher::new(partitions, &seed);

let pk = Pubkey::new_unique();

let b1 = hasher.clone().hash_address_to_partition(&pk);
let b2 = hasher.hash_address_to_partition(&pk);
assert_eq!(b1, b2);

// make sure b1 includes the seed's hash
let mut hasher = SipHasher13::new();
hasher.write(seed.as_ref());
hasher.write(pk.as_ref());
let partition = hash_to_partition(hasher.finish(), partitions);
assert_eq!(partition, b1);
}
use {super::*, solana_accounts_db::stake_rewards::StakeReward, std::collections::HashMap};

#[test]
fn test_hash_rewards_into_partitions() {
Expand Down
1 change: 1 addition & 0 deletions sdk/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ serde_json = { workspace = true, optional = true }
serde_with = { workspace = true, features = ["macros"] }
sha2 = { workspace = true }
sha3 = { workspace = true, optional = true }
siphasher = { workspace = true }
solana-frozen-abi = { workspace = true }
solana-frozen-abi-macro = { workspace = true }
solana-logger = { workspace = true, optional = true }
Expand Down
Loading