Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Node batch verification #94

Merged
merged 29 commits into from
Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from 21 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions core/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,3 +171,18 @@ func (cb Bundles) Size() int64 {
}
return size
}

// Sample is a chunk with associated metadata used by the Universal Batch Verifier
type Sample struct {
Commitment *Commitment
Chunk *Chunk
AssignmentIndex ChunkNumber
BlobIndex int
}

// SubBatch is a part of the whole Batch with identical Encoding Parameters, i.e. (ChunkLen, NumChunk)
bxue-l2 marked this conversation as resolved.
Show resolved Hide resolved
// Blobs with the same encoding parameters are collected in a single subBatch
type SubBatch struct {
Samples []Sample
NumBlobs int
}
3 changes: 3 additions & 0 deletions core/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ type Encoder interface {
// VerifyChunks takes in the chunks, indices, commitments, and encoding parameters and returns an error if the chunks are invalid.
VerifyChunks(chunks []*Chunk, indices []ChunkNumber, commitments BlobCommitments, params EncodingParams) error

// VerifyBatch takes in the encoding parameters, samples and the number of blobs and returns an error if a chunk in any sample is invalid.
UniversalVerifySubBatch(params EncodingParams, samples []Sample, numBlobs int) error

// VerifyBlobLength takes in the commitments and returns an error if the blob length is invalid.
VerifyBlobLength(commitments BlobCommitments) error

Expand Down
19 changes: 19 additions & 0 deletions core/encoding/encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,25 @@ func (e *Encoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNumber,

}

// convert struct understandable by the crypto library
func (e *Encoder) UniversalVerifySubBatch(params core.EncodingParams, samplesCore []core.Sample, numBlobs int) error {
encParams := toEncParams(params)
samples := make([]kzgEncoder.Sample, len(samplesCore))

for i, sc := range samplesCore {
sample := kzgEncoder.Sample{
Commitment: *sc.Commitment.G1Point,
Proof: sc.Chunk.Proof,
Row: sc.BlobIndex,
Coeffs: sc.Chunk.Coeffs,
X: sc.AssignmentIndex,
}
samples[i] = sample
}

return e.EncoderGroup.UniversalVerify(encParams, samples, numBlobs)
}

// Decode takes in the chunks, indices, and encoding parameters and returns the decoded blob
// The result is trimmed to the given maxInputSize.
func (e *Encoder) Decode(chunks []*core.Chunk, indices []core.ChunkNumber, params core.EncodingParams, maxInputSize uint64) ([]byte, error) {
Expand Down
6 changes: 6 additions & 0 deletions core/encoding/mock_encoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ func (e *MockEncoder) VerifyChunks(chunks []*core.Chunk, indices []core.ChunkNum
return args.Error(0)
}

func (e *MockEncoder) UniversalVerifySubBatch(params core.EncodingParams, samples []core.Sample, numBlobs int) error {
args := e.Called(params, samples, numBlobs)
time.Sleep(e.Delay)
return args.Error(0)
}

func (e *MockEncoder) VerifyBlobLength(commitments core.BlobCommitments) error {

args := e.Called(commitments)
Expand Down
5 changes: 5 additions & 0 deletions core/mock/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@ func NewMockChunkValidator() *MockChunkValidator {
return &MockChunkValidator{}
}

func (v *MockChunkValidator) ValidateBatch(blobs []*core.BlobMessage, operatorState *core.OperatorState) error {
args := v.Called(blobs, operatorState)
return args.Error(0)
}

func (v *MockChunkValidator) ValidateBlob(blob *core.BlobMessage, operatorState *core.OperatorState) error {
args := v.Called(blob, operatorState)
return args.Error(0)
Expand Down
186 changes: 114 additions & 72 deletions core/test/core_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,82 +67,88 @@ func makeTestBlob(t *testing.T, length int, securityParams []*core.SecurityParam
return blob
}

// prepareBatch takes in a single blob, encodes it, generates the associated assignments, and the batch header.
// prepareBatch takes in multiple blob, encodes them, generates the associated assignments, and the batch header.
// These are the products that a disperser will need in order to disperse data to the DA nodes.
func prepareBatch(t *testing.T, cst core.IndexedChainState, blob core.Blob, quorumIndex uint, quantizationFactor uint, bn uint) (core.EncodedBlob, core.BatchHeader) {
func prepareBatch(t *testing.T, cst core.IndexedChainState, blobs []core.Blob, quorumIndex uint, quantizationFactor uint, bn uint) ([]core.EncodedBlob, core.BatchHeader) {

quorumID := blob.RequestHeader.SecurityParams[quorumIndex].QuorumID
quorums := []core.QuorumID{quorumID}

state, err := cst.GetOperatorState(context.Background(), bn, quorums)
if err != nil {
t.Fatal(err)
batchHeader := core.BatchHeader{
ReferenceBlockNumber: bn,
BatchRoot: [32]byte{},
}

assignments, info, err := asn.GetAssignments(state, quorumID, quantizationFactor)
if err != nil {
t.Fatal(err)
}
numBlob := len(blobs)
var encodedBlobs []core.EncodedBlob = make([]core.EncodedBlob, numBlob)

blobSize := uint(len(blob.Data))
blobLength := core.GetBlobLength(blobSize)
adversaryThreshold := blob.RequestHeader.SecurityParams[quorumIndex].AdversaryThreshold
quorumThreshold := blob.RequestHeader.SecurityParams[quorumIndex].QuorumThreshold
for z, blob := range blobs {
quorumID := blob.RequestHeader.SecurityParams[quorumIndex].QuorumID
quorums := []core.QuorumID{quorumID}

numOperators := uint(len(state.Operators[quorumID]))
state, err := cst.GetOperatorState(context.Background(), bn, quorums)
if err != nil {
t.Fatal(err)
}

chunkLength, err := asn.GetMinimumChunkLength(numOperators, blobLength, quantizationFactor, quorumThreshold, adversaryThreshold)
if err != nil {
t.Fatal(err)
}
assignments, info, err := asn.GetAssignments(state, quorumID, quantizationFactor)
if err != nil {
t.Fatal(err)
}

params, err := core.GetEncodingParams(chunkLength, info.TotalChunks)
if err != nil {
t.Fatal(err)
}
blobSize := uint(len(blob.Data))
blobLength := core.GetBlobLength(blobSize)
adversaryThreshold := blob.RequestHeader.SecurityParams[quorumIndex].AdversaryThreshold
quorumThreshold := blob.RequestHeader.SecurityParams[quorumIndex].QuorumThreshold

commitments, chunks, err := enc.Encode(blob.Data, params)
if err != nil {
t.Fatal(err)
}
numOperators := uint(len(state.Operators[quorumID]))

quorumHeader := &core.BlobQuorumInfo{
SecurityParam: core.SecurityParam{
QuorumID: quorumID,
AdversaryThreshold: adversaryThreshold,
QuorumThreshold: quorumThreshold,
},
QuantizationFactor: quantizationFactor,
EncodedBlobLength: params.ChunkLength * quantizationFactor * numOperators,
}
chunkLength, err := asn.GetMinimumChunkLength(numOperators, blobLength, quantizationFactor, quorumThreshold, adversaryThreshold)
if err != nil {
t.Fatal(err)
}

blobHeader := &core.BlobHeader{
BlobCommitments: core.BlobCommitments{
Commitment: commitments.Commitment,
LengthProof: commitments.LengthProof,
Length: commitments.Length,
},
QuorumInfos: []*core.BlobQuorumInfo{quorumHeader},
}
params, err := core.GetEncodingParams(chunkLength, info.TotalChunks)
if err != nil {
t.Fatal(err)
}

batchHeader := core.BatchHeader{
ReferenceBlockNumber: bn,
BatchRoot: [32]byte{},
}
commitments, chunks, err := enc.Encode(blob.Data, params)
if err != nil {
t.Fatal(err)
}

var encodedBlob core.EncodedBlob = make(map[core.OperatorID]*core.BlobMessage, len(assignments))
quorumHeader := &core.BlobQuorumInfo{
SecurityParam: core.SecurityParam{
QuorumID: quorumID,
AdversaryThreshold: adversaryThreshold,
QuorumThreshold: quorumThreshold,
},
QuantizationFactor: quantizationFactor,
EncodedBlobLength: params.ChunkLength * quantizationFactor * numOperators,
}

for id, assignment := range assignments {
bundles := map[core.QuorumID]core.Bundle{
quorumID: chunks[assignment.StartIndex : assignment.StartIndex+assignment.NumChunks],
blobHeader := &core.BlobHeader{
BlobCommitments: core.BlobCommitments{
Commitment: commitments.Commitment,
LengthProof: commitments.LengthProof,
Length: commitments.Length,
},
QuorumInfos: []*core.BlobQuorumInfo{quorumHeader},
}
encodedBlob[id] = &core.BlobMessage{
BlobHeader: blobHeader,
Bundles: bundles,

var encodedBlob core.EncodedBlob = make(map[core.OperatorID]*core.BlobMessage, len(assignments))
for id, assignment := range assignments {
bundles := map[core.QuorumID]core.Bundle{
quorumID: chunks[assignment.StartIndex : assignment.StartIndex+assignment.NumChunks],
}
encodedBlob[id] = &core.BlobMessage{
BlobHeader: blobHeader,
Bundles: bundles,
}
}
encodedBlobs[z] = encodedBlob

}

return encodedBlob, batchHeader
return encodedBlobs, batchHeader

}

Expand All @@ -155,18 +161,38 @@ func checkBatch(t *testing.T, cst core.IndexedChainState, encodedBlob core.Encod
state, _ := cst.GetIndexedOperatorState(context.Background(), header.ReferenceBlockNumber, quorums)

for id := range state.IndexedOperators {

val.UpdateOperatorID(id)
blobMessage := encodedBlob[id]
err := val.ValidateBlob(blobMessage, state.OperatorState)
assert.NoError(t, err)
}

}

// checkBatchByUniversalVerifier runs the verification logic for each DA node in the current OperatorState, and returns an error if any of
// the DA nodes' validation checks fails
func checkBatchByUniversalVerifier(t *testing.T, cst core.IndexedChainState, encodedBlobs []core.EncodedBlob, header core.BatchHeader) {
val := core.NewChunkValidator(enc, asn, cst, [32]byte{})

quorums := []core.QuorumID{0}
state, _ := cst.GetIndexedOperatorState(context.Background(), header.ReferenceBlockNumber, quorums)
numBlob := len(encodedBlobs)

for id := range state.IndexedOperators {
val.UpdateOperatorID(id)
err := val.ValidateBlob(blobMessage, state.OperatorState)
var blobMessages []*core.BlobMessage = make([]*core.BlobMessage, numBlob)
for z, encodedBlob := range encodedBlobs {
blobMessages[z] = encodedBlob[id]
}
err := val.ValidateBatch(blobMessages, state.OperatorState)
assert.NoError(t, err)
}

}

func TestCoreLibrary(t *testing.T) {

numBlob := 1 // must be greater than 0
blobLengths := []int{1, 64, 1000}
quantizationFactors := []uint{1, 10}
operatorCounts := []uint{1, 2, 4, 10, 30}
Expand All @@ -184,28 +210,44 @@ func TestCoreLibrary(t *testing.T) {
},
}

for _, blobLength := range blobLengths {
for _, quantizationFactor := range quantizationFactors {
for _, operatorCount := range operatorCounts {
for _, securityParam := range securityParams {
quorumIndex := uint(0)
bn := uint(0)

t.Run(fmt.Sprintf("blobLength=%v, quantizationFactor=%v, operatorCount=%v, securityParams=%v", blobLength, quantizationFactor, operatorCount, securityParam), func(t *testing.T) {
for _, operatorCount := range operatorCounts {
cst, err := mock.NewChainDataMock(core.OperatorIndex(operatorCount))
assert.NoError(t, err)
batches := make([]core.EncodedBlob, 0)
batchHeader := core.BatchHeader{
ReferenceBlockNumber: bn,
BatchRoot: [32]byte{},
}
// batch can only be tested per operatorCount, because the assignment would be wrong otherwise
for _, blobLength := range blobLengths {

blob := makeTestBlob(t, blobLength, []*core.SecurityParam{securityParam})
for _, quantizationFactor := range quantizationFactors {
for _, securityParam := range securityParams {

cst, err := mock.NewChainDataMock(core.OperatorIndex(operatorCount))
assert.NoError(t, err)
t.Run(fmt.Sprintf("blobLength=%v, quantizationFactor=%v, operatorCount=%v, securityParams=%v", blobLength, quantizationFactor, operatorCount, securityParam), func(t *testing.T) {

quorumIndex := uint(0)
bn := uint(0)
blobs := make([]core.Blob, numBlob)
for i := 0; i < numBlob; i++ {
blobs[i] = makeTestBlob(t, blobLength, []*core.SecurityParam{securityParam})
}

batch, header := prepareBatch(t, cst, blob, quorumIndex, quantizationFactor, bn)
batch, header := prepareBatch(t, cst, blobs, quorumIndex, quantizationFactor, bn)
batches = append(batches, batch...)

checkBatch(t, cst, batch, header)
checkBatch(t, cst, batch[0], header)
})
}

}

}
t.Run(fmt.Sprintf("universal verifier operatorCount=%v over %v blobs", operatorCount, len(batches)), func(t *testing.T) {
checkBatchByUniversalVerifier(t, cst, batches, batchHeader)
})

}

}
Loading
Loading