Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Small farmer refactoring #1782

Merged
merged 2 commits into from
Aug 9, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 15 additions & 12 deletions crates/subspace-farmer-components/src/plotting.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ use std::time::Duration;
use subspace_core_primitives::crypto::kzg::Kzg;
use subspace_core_primitives::crypto::Scalar;
use subspace_core_primitives::{
ArchivedHistorySegment, Piece, PieceIndex, PieceOffset, PublicKey, Record, RecordCommitment,
RecordWitness, SBucket, SectorId, SectorIndex,
ArchivedHistorySegment, Piece, PieceIndex, PieceOffset, PublicKey, Record, SBucket, SectorId,
SectorIndex,
};
use subspace_erasure_coding::ErasureCoding;
use subspace_proof_of_space::{Quality, Table, TableGenerator};
Expand Down Expand Up @@ -338,14 +338,19 @@ where
});
});

// Write sector to disk in form of following regions:
// * sector contents map
// * record chunks as s-buckets
// * record metadata
{
let (sector_contents_map_region, remainder) =
let (sector_contents_map_region, remaining_bytes) =
sector_output.split_at_mut(SectorContentsMap::encoded_size(pieces_in_sector));
// Write sector contents map so we can decode it later
sector_contents_map_region.copy_from_slice(sector_contents_map.as_ref());
// Slice remaining memory into belonging to s-buckets and metadata
let (s_buckets_region, metadata_region) =
remainder.split_at_mut(sector_record_chunks_size(pieces_in_sector));
remaining_bytes.split_at_mut(sector_record_chunks_size(pieces_in_sector));

// Write sector contents map so we can decode it later
sector_contents_map_region.copy_from_slice(sector_contents_map.as_ref());

let num_encoded_record_chunks = sector_contents_map.num_encoded_record_chunks();
let mut next_encoded_record_chunks_offset = vec![0_usize; pieces_in_sector.into()];
Expand Down Expand Up @@ -380,12 +385,10 @@ where
output.copy_from_slice(&raw_sector.records[usize::from(piece_offset)][chunk_position]);
}

for (record_metadata, output) in raw_sector.metadata.into_iter().zip(
metadata_region.array_chunks_mut::<{ RecordCommitment::SIZE + RecordWitness::SIZE }>(),
) {
let (commitment, witness) = output.split_at_mut(RecordCommitment::SIZE);
commitment.copy_from_slice(record_metadata.commitment.as_ref());
witness.copy_from_slice(record_metadata.witness.as_ref());
let metadata_chunks =
metadata_region.array_chunks_mut::<{ RecordMetadata::encoded_size() }>();
for (record_metadata, output) in raw_sector.metadata.into_iter().zip(metadata_chunks) {
record_metadata.encode_to(&mut output.as_mut_slice());
}
}

Expand Down
136 changes: 67 additions & 69 deletions crates/subspace-farmer-components/src/proving.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,80 +210,78 @@ where
.derive_evaluation_seed(piece_offset, self.sector_metadata.history_size),
);

let maybe_chunk_cache: Result<_, ProvingError> = try {
let sector_record_chunks = read_sector_record_chunks(
piece_offset,
self.sector_metadata.pieces_in_sector,
&self.s_bucket_offsets,
&self.sector_contents_map,
&pos_table,
self.sector,
)?;

let chunk = sector_record_chunks
.get(usize::from(self.s_bucket))
.expect("Within s-bucket range; qed")
.expect("Winning chunk was plotted; qed");

let source_chunks_polynomial = self
.erasure_coding
.recover_poly(sector_record_chunks.as_slice())
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
let maybe_chunk_cache: Result<_, ProvingError> =
try {
let sector_record_chunks = read_sector_record_chunks(
piece_offset,
error,
})?;
drop(sector_record_chunks);

let (record_commitment, record_witness) = read_record_metadata(
piece_offset,
self.sector_metadata.pieces_in_sector,
self.sector,
)?;

let record_commitment =
Commitment::try_from_bytes(&record_commitment).map_err(|error| {
ProvingError::FailedToDecodeMetadataForRecord {
self.sector_metadata.pieces_in_sector,
&self.s_bucket_offsets,
&self.sector_contents_map,
&pos_table,
self.sector,
)?;

let chunk = sector_record_chunks
.get(usize::from(self.s_bucket))
.expect("Within s-bucket range; qed")
.expect("Winning chunk was plotted; qed");

let source_chunks_polynomial = self
.erasure_coding
.recover_poly(sector_record_chunks.as_slice())
.map_err(|error| ReadingError::FailedToErasureDecodeRecord {
piece_offset,
error,
}
})?;
let record_witness = Witness::try_from_bytes(&record_witness).map_err(|error| {
ProvingError::FailedToDecodeMetadataForRecord {
piece_offset,
error,
}
})?;

let proof_of_space = pos_table
.find_quality(self.s_bucket.into())
.expect(
"Quality exists for this s-bucket, otherwise it wouldn't be a winning \
chunk; qed",
)
.create_proof();

let chunk_witness = self
.kzg
.create_witness(
&source_chunks_polynomial,
Record::NUM_S_BUCKETS,
self.s_bucket.into(),
)
.map_err(|error| ProvingError::FailedToCreateChunkWitness {
})?;
drop(sector_record_chunks);

let record_metadata = read_record_metadata(
piece_offset,
self.sector_metadata.pieces_in_sector,
self.sector,
)?;

let record_commitment = Commitment::try_from_bytes(&record_metadata.commitment)
.map_err(|error| ProvingError::FailedToDecodeMetadataForRecord {
piece_offset,
error,
})?;
let record_witness = Witness::try_from_bytes(&record_metadata.witness)
.map_err(|error| ProvingError::FailedToDecodeMetadataForRecord {
piece_offset,
error,
})?;

let proof_of_space = pos_table
.find_quality(self.s_bucket.into())
.expect(
"Quality exists for this s-bucket, otherwise it wouldn't be a \
winning chunk; qed",
)
.create_proof();

let chunk_witness = self
.kzg
.create_witness(
&source_chunks_polynomial,
Record::NUM_S_BUCKETS,
self.s_bucket.into(),
)
.map_err(|error| ProvingError::FailedToCreateChunkWitness {
piece_offset,
chunk_offset,
error,
})?;

ChunkCache {
chunk,
chunk_offset,
error,
})?;

ChunkCache {
chunk,
chunk_offset,
record_commitment,
record_witness,
chunk_witness,
proof_of_space,
}
};
record_commitment,
record_witness,
chunk_witness,
proof_of_space,
}
};

let chunk_cache = match maybe_chunk_cache {
Ok(chunk_cache) => chunk_cache,
Expand Down
39 changes: 17 additions & 22 deletions crates/subspace-farmer-components/src/reading.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
use crate::sector::{
sector_record_chunks_size, sector_size, SectorContentsMap, SectorContentsMapFromBytesError,
SectorMetadata,
sector_record_chunks_size, sector_size, RecordMetadata, SectorContentsMap,
SectorContentsMapFromBytesError, SectorMetadata,
};
use parity_scale_codec::Decode;
use rayon::prelude::*;
use std::mem::ManuallyDrop;
use std::simd::Simd;
Expand Down Expand Up @@ -219,11 +220,11 @@ pub fn recover_source_record_chunks(
}

/// Read metadata (commitment and witness) for record
pub fn read_record_metadata(
pub(crate) fn read_record_metadata(
piece_offset: PieceOffset,
pieces_in_sector: u16,
sector: &[u8],
) -> Result<(RecordCommitment, RecordWitness), ReadingError> {
) -> Result<RecordMetadata, ReadingError> {
if sector.len() != sector_size(pieces_in_sector) {
return Err(ReadingError::WrongSectorSize {
expected: sector_size(pieces_in_sector),
Expand All @@ -233,21 +234,15 @@ pub fn read_record_metadata(

let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
let commitment_witness_size = RecordCommitment::SIZE + RecordWitness::SIZE;
let (record_commitment, record_witness) = sector[sector_metadata_start..]
// Move to the beginning of the commitment and witness we care about
[commitment_witness_size * usize::from(piece_offset)..]
// Take commitment and witness we care about
[..commitment_witness_size]
// Separate commitment from witness
.split_at(RecordCommitment::SIZE);

let record_commitment = RecordCommitment::try_from(record_commitment)
.expect("Correctly sliced above from correctly sized plot; qed");
let record_witness = RecordWitness::try_from(record_witness)
.expect("Correctly sliced above from correctly sized plot; qed");

Ok((record_commitment, record_witness))
// Move to the beginning of the commitment and witness we care about
let record_metadata_bytes = &sector[sector_metadata_start..]
[RecordMetadata::encoded_size() * usize::from(piece_offset)..];
let record_metadata = RecordMetadata::decode(&mut &*record_metadata_bytes).expect(
"Length is correct and checked above, contents doesn't have specific structure to \
it; qed",
);

Ok(record_metadata)
}

/// Read piece from sector
Expand Down Expand Up @@ -294,7 +289,7 @@ where
erasure_coding,
)?;

let (commitment, witness) = read_record_metadata(piece_offset, pieces_in_sector, sector)?;
let record_metadata = read_record_metadata(piece_offset, pieces_in_sector, sector)?;

let mut piece = Piece::default();

Expand All @@ -306,8 +301,8 @@ where
*output = input.to_bytes();
});

*piece.commitment_mut() = commitment;
*piece.witness_mut() = witness;
*piece.commitment_mut() = record_metadata.commitment;
*piece.witness_mut() = record_metadata.witness;

Ok(piece)
}
32 changes: 19 additions & 13 deletions crates/subspace-farmer-components/src/sector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,23 +16,23 @@ pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
pieces_in_sector as usize * Record::SIZE
}

/// Size of the part of the plot containing commitments and witnesses for records.
/// Size of the part of the plot containing record metadata.
///
/// Total size of the plot can be computed with [`sector_size()`].
pub const fn sector_commitments_witnesses_size(pieces_in_sector: u16) -> usize {
pieces_in_sector as usize * (RecordWitness::SIZE + RecordCommitment::SIZE)
pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
pieces_in_sector as usize * RecordMetadata::encoded_size()
}

/// Exact sector plot size (sector contents map, record chunks, record commitments and witnesses).
/// Exact sector plot size (sector contents map, record chunks, record metadata).
///
/// NOTE: Each sector also has corresponding fixed size metadata whose size can be obtained with
/// [`SectorMetadata::encoded_size()`], size of the record chunks (s-buckets) with
/// [`sector_record_chunks_size()`] and size of record commitments and witnesses with
/// [`sector_commitments_witnesses_size()`]. This function just combines those three together for
/// [`sector_record_metadata_size()`]. This function just combines those three together for
/// convenience.
pub const fn sector_size(pieces_in_sector: u16) -> usize {
sector_record_chunks_size(pieces_in_sector)
+ sector_commitments_witnesses_size(pieces_in_sector)
+ sector_record_metadata_size(pieces_in_sector)
+ SectorContentsMap::encoded_size(pieces_in_sector)
}

Expand Down Expand Up @@ -92,25 +92,31 @@ impl SectorMetadata {

/// Commitment and witness corresponding to the same record
#[derive(Debug, Default, Clone, Encode, Decode)]
pub struct RecordMetadata {
pub(crate) struct RecordMetadata {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't you think pub(crate) for the struct only is enough? Is it somewhere in the rust guidelines?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fields and methods were also made private, what do you mean by struct only?

I was just hiding data structures that are internal to subspace-farmer-components and not exposed in public API. This helps ensuring that when in later PR I'm adding checksums, all users of these data structures are doing checksums the same way consistently.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I meant that by making the struct private you prevent leaking the type outside of the crate and all methods and fields are unaccessible respectively. pub methods and fields are less noisy than pub(crate). But maybe it's considered malpractice by the Rust guideline with a good reason.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean if you need to make it visible publicly, you use pub, if only in parent module you use pub(super) and if only inside of the crate then pub(crate). Using pub when it is not exposed publicly is misleading and also results in data structures not reported as unused after refactoring. So everything that can be private should be private and as private as possible.

/// Record commitment
pub commitment: RecordCommitment,
pub(crate) commitment: RecordCommitment,
/// Record witness
pub witness: RecordWitness,
pub(crate) witness: RecordWitness,
}

impl RecordMetadata {
pub(crate) const fn encoded_size() -> usize {
RecordWitness::SIZE + RecordCommitment::SIZE
}
}

/// Raw sector before it is transformed and written to plot, used during plotting
#[derive(Debug, Clone)]
pub struct RawSector {
pub(crate) struct RawSector {
/// List of records, likely downloaded from the network
pub records: Vec<Record>,
pub(crate) records: Vec<Record>,
/// Metadata (commitment and witness) corresponding to the same record
pub metadata: Vec<RecordMetadata>,
pub(crate) metadata: Vec<RecordMetadata>,
}

impl RawSector {
/// Create new raw sector with internal vectors being allocated and filled with default values
pub fn new(pieces_in_sector: u16) -> Self {
pub(crate) fn new(pieces_in_sector: u16) -> Self {
Self {
records: Record::new_zero_vec(usize::from(pieces_in_sector)),
metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
Expand Down
Loading