Skip to content

Commit

Permalink
Merge pull request #2202 from maqi/node_size_env_evm
Browse files Browse the repository at this point in the history
feat(node): env MAX_STORAGE_SPACE for MAX_RECORD_COUNT calculate
  • Loading branch information
RolandSherwin authored Oct 7, 2024
2 parents b170790 + 2baa523 commit 7dcf27e
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 23 deletions.
31 changes: 29 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions sn_networking/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ prometheus-client = { version = "0.22", optional = true }
rand = { version = "~0.8.5", features = ["small_rng"] }
rayon = "1.8.0"
rmp-serde = "1.1.1"
self_encryption = "~0.30.0"
serde = { version = "1.0.133", features = ["derive", "rc"] }
sn_build_info = { path="../sn_build_info", version = "0.1.14" }
sn_protocol = { path = "../sn_protocol", version = "0.17.10" }
Expand Down
58 changes: 37 additions & 21 deletions sn_networking/src/record_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use aes_gcm_siv::{
};

use itertools::Itertools;
use lazy_static::lazy_static;
use libp2p::{
identity::PeerId,
kad::{
Expand All @@ -29,6 +30,7 @@ use libp2p::{
use prometheus_client::metrics::gauge::Gauge;
use rand::RngCore;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use self_encryption::MAX_CHUNK_SIZE;
use serde::{Deserialize, Serialize};
use sn_evm::{AttoTokens, QuotingMetrics};
use sn_protocol::{
Expand All @@ -48,13 +50,27 @@ use tokio::sync::mpsc;
use walkdir::{DirEntry, WalkDir};
use xor_name::XorName;

// A spend record is at the size of 4KB roughly.
// Given chunk record is maxed at size of 512KB.
// During Beta phase, it's almost one spend per chunk,
// which makes the average record size is around 256k.
// Given we are targeting node size to be 32GB,
// this shall allow around 128K records.
const MAX_RECORDS_COUNT: usize = 128 * 1024;
/// The default value of targted max storage space is to be 32GB.
const DEFAULT_MAX_STORAGE_SPACE: u64 = 32 * 1024 * 1024 * 1024;

lazy_static! {
/// The max storage space for the records.
/// A `node size` is to be defined as this plus the logging space assigned.
pub static ref MAX_STORAGE_SPACE: u64 = std::option_env!("MAX_STORAGE_SPACE")
.unwrap_or(&DEFAULT_MAX_STORAGE_SPACE.to_string())
.parse::<u64>()
.unwrap_or(DEFAULT_MAX_STORAGE_SPACE);

// A spend record is at the size of 2KB roughly.
// During Beta phase, it's almost one spend per chunk,
// which makes the average record size is to be half of the MAX_CHUNK_SIZE
static ref MAX_RECORDS_COUNT: usize = {
let records_count: usize = ((*MAX_STORAGE_SPACE as f64 / *MAX_CHUNK_SIZE as f64) * 2.0) as usize;
info!("MAX_STORAGE_SPACE is {}, MAX_CHUNK_SIZE is {}, MAX_RECORDS_COUNT is {records_count}",
*MAX_STORAGE_SPACE, *MAX_CHUNK_SIZE);
records_count
};
}

/// The maximum number of records to cache in memory.
const MAX_RECORDS_CACHE_SIZE: usize = 100;
Expand Down Expand Up @@ -127,7 +143,7 @@ impl Default for NodeRecordStoreConfig {
Self {
storage_dir: historic_quote_dir.clone(),
historic_quote_dir,
max_records: MAX_RECORDS_COUNT,
max_records: *MAX_RECORDS_COUNT,
max_value_bytes: MAX_PACKET_SIZE,
records_cache_size: MAX_RECORDS_CACHE_SIZE,
}
Expand Down Expand Up @@ -461,7 +477,7 @@ impl NodeRecordStore {
// result in mis-calculation of relevant records.
pub fn cleanup_unrelevant_records(&mut self) {
let accumulated_records = self.records.len();
if accumulated_records < 6 * MAX_RECORDS_COUNT / 10 {
if accumulated_records < *MAX_RECORDS_COUNT * 6 / 10 {
return;
}

Expand Down Expand Up @@ -932,7 +948,7 @@ impl RecordStore for ClientRecordStore {
pub fn calculate_cost_for_records(records_stored: usize) -> u64 {
use std::cmp::{max, min};

let max_records = MAX_RECORDS_COUNT;
let max_records = *MAX_RECORDS_COUNT;

let ori_cost = positive_input_0_1_sigmoid(records_stored as f64 / max_records as f64)
* MAX_STORE_COST as f64;
Expand Down Expand Up @@ -1009,30 +1025,30 @@ mod tests {

#[test]
fn test_calculate_max_cost_for_records() {
let sut = calculate_cost_for_records(MAX_RECORDS_COUNT + 1);
let sut = calculate_cost_for_records(*MAX_RECORDS_COUNT + 1);
assert_eq!(sut, MAX_STORE_COST - 1);
}

#[test]
fn test_calculate_50_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 50 / 100;
let percent = *MAX_RECORDS_COUNT * 50 / 100;
let sut = calculate_cost_for_records(percent);

// at this point we should be at max cost
assert_eq!(sut, 500000);
}
#[test]
fn test_calculate_60_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 60 / 100;
let percent = *MAX_RECORDS_COUNT * 60 / 100;
let sut = calculate_cost_for_records(percent);

// at this point we should be at max cost
assert_eq!(sut, 952572);
assert_eq!(sut, 952561);
}

#[test]
fn test_calculate_65_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 65 / 100;
let percent = *MAX_RECORDS_COUNT * 65 / 100;
let sut = calculate_cost_for_records(percent);

// at this point we should be at max cost
Expand All @@ -1041,7 +1057,7 @@ mod tests {

#[test]
fn test_calculate_70_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 70 / 100;
let percent = *MAX_RECORDS_COUNT * 70 / 100;
let sut = calculate_cost_for_records(percent);

// at this point we should be at max cost
Expand All @@ -1050,7 +1066,7 @@ mod tests {

#[test]
fn test_calculate_80_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 80 / 100;
let percent = *MAX_RECORDS_COUNT * 80 / 100;
let sut = calculate_cost_for_records(percent);

// at this point we should be at max cost
Expand All @@ -1059,7 +1075,7 @@ mod tests {

#[test]
fn test_calculate_90_percent_cost_for_records() {
let percent = MAX_RECORDS_COUNT * 90 / 100;
let percent = *MAX_RECORDS_COUNT * 90 / 100;
let sut = calculate_cost_for_records(percent);
// at this point we should be at max cost
assert_eq!(sut, 999993);
Expand Down Expand Up @@ -1750,8 +1766,8 @@ mod tests {
max_store_cost / min_store_cost
);
assert!(
(max_earned / min_earned) < 300000000,
"earning distribution is not balanced, expected to be < 200000000, but was {}",
(max_earned / min_earned) < 500000000,
"earning distribution is not balanced, expected to be < 500000000, but was {}",
max_earned / min_earned
);
break;
Expand Down Expand Up @@ -1779,7 +1795,7 @@ mod tests {
timestamp: std::time::SystemTime::now(),
quoting_metrics: QuotingMetrics {
close_records_stored: peer.records_stored.load(Ordering::Relaxed),
max_records: MAX_RECORDS_COUNT,
max_records: *MAX_RECORDS_COUNT,
received_payment_count: 1, // unimportant for cost calc
live_time: 0, // unimportant for cost calc
},
Expand Down

0 comments on commit 7dcf27e

Please sign in to comment.