Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(node): derive encrypt_details from self keypair #2379

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .github/workflows/merge.yml
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,11 @@ jobs:
timeout-minutes: 25
run: cargo test --release --package sn_node --lib

- name: Run network tests
- name: Run network tests (with encrypt-records)
timeout-minutes: 25
run: cargo test --release --package sn_networking --features="open-metrics, encrypt-records"

name: Run network tests (without encrypt-records)
timeout-minutes: 25
run: cargo test --release --package sn_networking --features="open-metrics"

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ jobs:

- name: Run network tests
timeout-minutes: 25
run: cargo test --release --package sn_networking --features="open-metrics"
run: cargo test --release --package sn_networking --features="open-metrics, encrypt-records"

- name: Run protocol tests
timeout-minutes: 25
Expand Down
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions sn_networking/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,14 @@ tracing = { version = "~0.1.26" }
xor_name = "5.0.0"
backoff = { version = "0.4.0", features = ["tokio"] }
aes-gcm-siv = "0.11.1"
hkdf = "0.12"
sha2 = "0.10"
walkdir = "~2.5.0"
strum = { version = "0.26.2", features = ["derive"] }
void = "1.0.2"

[dev-dependencies]
assert_fs = "1.0.0"
bls = { package = "blsttc", version = "8.0.1" }
# add rand to libp2p
libp2p-identity = { version = "0.2.7", features = ["rand"] }
Expand Down
9 changes: 9 additions & 0 deletions sn_networking/src/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ use sn_protocol::{
use sn_registers::SignedRegister;
use std::{
collections::{btree_map::Entry, BTreeMap, HashMap, HashSet},
convert::TryInto,
fmt::Debug,
fs,
io::{Read, Write},
Expand Down Expand Up @@ -389,10 +390,18 @@ impl NetworkBuilder {
source: error,
});
}
let peer_id = PeerId::from(self.keypair.public());
let encryption_seed: [u8; 16] = peer_id
.to_bytes()
.get(..16)
.expect("Cann't get encryption_seed from keypair")
.try_into()
.expect("Cann't get 16 bytes from serialised key_pair");
NodeRecordStoreConfig {
max_value_bytes: MAX_PACKET_SIZE, // TODO, does this need to be _less_ than MAX_PACKET_SIZE
storage_dir: storage_dir_path,
historic_quote_dir: root_dir.clone(),
encryption_seed,
..Default::default()
}
};
Expand Down
137 changes: 127 additions & 10 deletions sn_networking/src/record_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ use crate::send_local_swarm_cmd;
use crate::target_arch::{spawn, Instant};
use crate::{event::NetworkEvent, log_markers::Marker};
use aes_gcm_siv::{
aead::{Aead, KeyInit, OsRng},
Aes256GcmSiv, Nonce,
aead::{Aead, KeyInit},
Aes256GcmSiv, Key as AesKey, Nonce,
};

use hkdf::Hkdf;
use itertools::Itertools;
use libp2p::{
identity::PeerId,
Expand All @@ -27,9 +27,9 @@ use libp2p::{
};
#[cfg(feature = "open-metrics")]
use prometheus_client::metrics::gauge::Gauge;
use rand::RngCore;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use sn_evm::{AttoTokens, QuotingMetrics};
use sn_protocol::{
storage::{RecordHeader, RecordKind, RecordType},
Expand Down Expand Up @@ -67,6 +67,27 @@ const MAX_STORE_COST: u64 = 1_000_000;
// Min store cost for a chunk.
const MIN_STORE_COST: u64 = 1;

fn derive_aes256gcm_siv_from_seed(seed: &[u8; 16]) -> (Aes256GcmSiv, [u8; 4]) {
// shall be unique for purpose.
let salt = b"autonomi_record_store";

let hk = Hkdf::<Sha256>::new(Some(salt), seed);

let mut okm = [0u8; 32];
hk.expand(b"", &mut okm)
.expect("32 bytes is a valid length for HKDF output");

let seeded_key = AesKey::<Aes256GcmSiv>::from_slice(&okm);

let mut nonce_starter = [0u8; 4];
let bytes_to_copy = seed.len().min(nonce_starter.len());
nonce_starter[..bytes_to_copy].copy_from_slice(&seed[..bytes_to_copy]);

trace!("seeded_key is {seeded_key:?} nonce_starter is {nonce_starter:?}");

(Aes256GcmSiv::new(seeded_key), nonce_starter)
}

/// FIFO simple cache of records to reduce read times
struct RecordCache {
records_cache: HashMap<Key, (Record, SystemTime)>,
Expand Down Expand Up @@ -163,6 +184,8 @@ pub struct NodeRecordStoreConfig {
pub max_value_bytes: usize,
/// The maximum number of records to cache in memory.
pub records_cache_size: usize,
/// The seed to generate record_store encryption_details
pub encryption_seed: [u8; 16],
}

impl Default for NodeRecordStoreConfig {
Expand All @@ -174,6 +197,7 @@ impl Default for NodeRecordStoreConfig {
max_records: MAX_RECORDS_COUNT,
max_value_bytes: MAX_PACKET_SIZE,
records_cache_size: MAX_RECORDS_CACHE_SIZE,
encryption_seed: [0u8; 16],
}
}
}
Expand Down Expand Up @@ -330,12 +354,8 @@ impl NodeRecordStore {
network_event_sender: mpsc::Sender<NetworkEvent>,
swarm_cmd_sender: mpsc::Sender<LocalSwarmCmd>,
) -> Self {
let key = Aes256GcmSiv::generate_key(&mut OsRng);
let cipher = Aes256GcmSiv::new(&key);
let mut nonce_starter = [0u8; 4];
OsRng.fill_bytes(&mut nonce_starter);

let encryption_details = (cipher, nonce_starter);
info!("Using encryption_seed of {:?}", config.encryption_seed);
let encryption_details = derive_aes256gcm_siv_from_seed(&config.encryption_seed);

// Recover the quoting_metrics first, as the historical file will be cleaned by
// the later on update_records_from_an_existing_store function
Expand Down Expand Up @@ -1021,6 +1041,7 @@ mod tests {
use bls::SecretKey;
use xor_name::XorName;

use assert_fs::TempDir;
use bytes::Bytes;
use eyre::{bail, ContextCompat};
use libp2p::kad::K_VALUE;
Expand Down Expand Up @@ -1221,6 +1242,102 @@ mod tests {
assert!(store.get(&r.key).is_none());
}

#[tokio::test]
async fn can_store_after_restart() -> eyre::Result<()> {
let temp_dir = TempDir::new().expect("Should be able to create a temp dir.");
let store_config = NodeRecordStoreConfig {
storage_dir: temp_dir.to_path_buf(),
encryption_seed: [1u8; 16],
..Default::default()
};
let self_id = PeerId::random();
let (network_event_sender, _) = mpsc::channel(1);
let (swarm_cmd_sender, _) = mpsc::channel(1);

let mut store = NodeRecordStore::with_config(
self_id,
store_config.clone(),
network_event_sender.clone(),
swarm_cmd_sender.clone(),
);

// Create a chunk
let chunk_data = Bytes::from_static(b"Test chunk data");
let chunk = Chunk::new(chunk_data);
let chunk_address = *chunk.address();

// Create a record from the chunk
let record = Record {
key: NetworkAddress::ChunkAddress(chunk_address).to_record_key(),
value: try_serialize_record(&chunk, RecordKind::Chunk)?.to_vec(),
expires: None,
publisher: None,
};

// Store the chunk using put_verified
assert!(store
.put_verified(record.clone(), RecordType::Chunk)
.is_ok());

// Mark as stored (simulating the CompletedWrite event)
store.mark_as_stored(record.key.clone(), RecordType::Chunk);

// Verify the chunk is stored
let stored_record = store.get(&record.key);
assert!(stored_record.is_some(), "Chunk should be stored");

// Sleep a while to let OS completes the flush to disk
sleep(Duration::from_secs(1)).await;

// Restart the store with same encrypt_seed
drop(store);
let store = NodeRecordStore::with_config(
self_id,
store_config,
network_event_sender.clone(),
swarm_cmd_sender.clone(),
);

// Sleep a lit bit to let OS completes restoring
sleep(Duration::from_secs(1)).await;

// Verify the record still exists
let stored_record = store.get(&record.key);
assert!(stored_record.is_some(), "Chunk should be stored");

// Restart the store with different encrypt_seed
let self_id_diff = PeerId::random();
let store_config_diff = NodeRecordStoreConfig {
storage_dir: temp_dir.to_path_buf(),
encryption_seed: [2u8; 16],
..Default::default()
};
let store_diff = NodeRecordStore::with_config(
self_id_diff,
store_config_diff,
network_event_sender,
swarm_cmd_sender,
);

// Sleep a lit bit to let OS completes restoring (if has)
sleep(Duration::from_secs(1)).await;

// Verify the record existence, shall get removed when encryption enabled
if cfg!(feature = "encrypt-records") {
assert!(
store_diff.get(&record.key).is_none(),
"Chunk should be gone"
);
} else {
assert!(
store_diff.get(&record.key).is_some(),
"Chunk shall persists without encryption"
);
}

Ok(())
}

#[tokio::test]
async fn can_store_and_retrieve_chunk() {
let temp_dir = std::env::temp_dir();
Expand Down
Loading