Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tests: Batching Benchmarks #9927

Merged
merged 12 commits into from
Mar 28, 2024
233 changes: 233 additions & 0 deletions op-node/benchmarks/batchbuilding_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,233 @@
package benchmarks

import (
"fmt"
"math/big"
"math/rand"
"testing"
"time"

"github.com/ethereum-optimism/optimism/op-batcher/compressor"
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
"github.com/stretchr/testify/require"
)

var (

// compressors used in the benchmark
rc, _ = compressor.NewRatioCompressor(compressor.Config{
TargetOutputSize: 100_000_000_000,
ApproxComprRatio: 0.4,
})
sc, _ = compressor.NewShadowCompressor(compressor.Config{
TargetOutputSize: 100_000_000_000,
})
nc, _ = compressor.NewNonCompressor(compressor.Config{
TargetOutputSize: 100_000_000_000,
})

compressors = map[string]derive.Compressor{
"NonCompressor": nc,
"RatioCompressor": rc,
"ShadowCompressor": sc,
}

// batch types used in the benchmark
batchTypes = []uint{
derive.SpanBatchType,
// uncomment to include singular batches in the benchmark
//derive.SingularBatchType,
axelKingsley marked this conversation as resolved.
Show resolved Hide resolved
}
)

// a test case for the benchmark controls the number of batches and transactions per batch,
// as well as the batch type and compressor used
type BatchingBenchmarkTC struct {
BatchType uint
BatchCount int
txPerBatch int
compKey string
}

func (t BatchingBenchmarkTC) String() string {
var btype string
if t.BatchType == derive.SingularBatchType {
btype = "Singular"
}
if t.BatchType == derive.SpanBatchType {
btype = "Span"
}
return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, t.compKey)
}

// BenchmarkChannelOut benchmarks the performance of adding singular batches to a channel out
// this exercises the compression and batching logic, as well as any batch-building logic
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure *only* the time to add the final batch to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits
func BenchmarkFinalBatchChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
//{10000, 1},
axelKingsley marked this conversation as resolved.
Show resolved Hide resolved

{10, 100},
{100, 100},
}

// build a set of test cases for each batch type, compressor, and target-pair
tests := []BatchingBenchmarkTC{}
for _, bt := range batchTypes {
for compkey := range compressors {
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey})
}
}
}

for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
// set the timestamp to increase with each batch
// to leverage optimizations in the Batch Linked List
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
// reset the compressor used in the test case
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
compressors[tc.compKey].Reset()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID)
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder)
// add all but the final batch to the channel out
for i := 0; i < tc.BatchCount-1; i++ {
_, err := cout.AddSingularBatch(batches[i], 0)
require.NoError(b, err)
}
// measure the time to add the final batch
b.StartTimer()
// add the final batch to the channel out
_, err := cout.AddSingularBatch(batches[tc.BatchCount-1], 0)
require.NoError(b, err)
}
})
}
}

// BenchmarkAllBatchesChannelOut benchmarks the performance of adding singular batches to a channel out
// this exercises the compression and batching logic, as well as any batch-building logic
// Every Compressor in the compressor map is benchmarked for each test case
// The results of the Benchmark measure the time to add the *all batches* to the channel out,
// not the time to send all the batches through the channel out
// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits
func BenchmarkAllBatchesChannelOut(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
//{10000, 1},
axelKingsley marked this conversation as resolved.
Show resolved Hide resolved

{10, 100},
{100, 100},
}

// build a set of test cases for each batch type, compressor, and target-pair
tests := []BatchingBenchmarkTC{}
for _, bt := range batchTypes {
for compkey := range compressors {
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey})
}
}
}

for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
// set the timestamp to increase with each batch
// to leverage optimizations in the Batch Linked List
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
// reset the compressor used in the test case
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
compressors[tc.compKey].Reset()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID)
cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder)
b.StartTimer()
// add all batches to the channel out
for i := 0; i < tc.BatchCount; i++ {
_, err := cout.AddSingularBatch(batches[i], 0)
require.NoError(b, err)
}
}
})
}
}

// BenchmarkGetRawSpanBatch benchmarks the performance of building a span batch from singular batches
// this exercises the span batch building logic directly
// The adding of batches to the span batch builder is not included in the benchmark, only the final build to RawSpanBatch
func BenchmarkGetRawSpanBatch(b *testing.B) {
// Targets define the number of batches and transactions per batch to test
type target struct{ bs, tpb int }
targets := []target{
{10, 1},
{100, 1},
{1000, 1},
{10000, 1},

{10, 100},
{100, 100},
{1000, 100},
}

tests := []BatchingBenchmarkTC{}
for _, t := range targets {
tests = append(tests, BatchingBenchmarkTC{derive.SpanBatchType, t.bs, t.tpb, "NonCompressor"})
}

for _, tc := range tests {
chainID := big.NewInt(333)
rng := rand.New(rand.NewSource(0x543331))
// pre-generate batches to keep the benchmark from including the random generation
batches := make([]*derive.SingularBatch, tc.BatchCount)
t := time.Now()
for i := 0; i < tc.BatchCount; i++ {
batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID)
batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix())
}
b.Run(tc.String(), func(b *testing.B) {
for bn := 0; bn < b.N; bn++ {
// don't measure the setup time
b.StopTimer()
spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID)
for i := 0; i < tc.BatchCount; i++ {
spanBatchBuilder.AppendSingularBatch(batches[i], 0)
}
b.StartTimer()
_, err := spanBatchBuilder.GetRawSpanBatch()
require.NoError(b, err)
}
})
}
}
22 changes: 0 additions & 22 deletions op-node/rollup/derive/batch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,28 +76,6 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch {
return &rawSpanBatch
}

func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *SingularBatch {
signer := types.NewLondonSigner(chainID)
baseFee := big.NewInt(rng.Int63n(300_000_000_000))
txsEncoded := make([]hexutil.Bytes, 0, txCount)
// force each tx to have equal chainID
for i := 0; i < txCount; i++ {
tx := testutils.RandomTx(rng, baseFee, signer)
txEncoded, err := tx.MarshalBinary()
if err != nil {
panic("tx Marshal binary" + err.Error())
}
txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded))
}
return &SingularBatch{
ParentHash: testutils.RandomHash(rng),
EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)),
EpochHash: testutils.RandomHash(rng),
Timestamp: uint64(rng.Int63n(2_000_000_000)),
Transactions: txsEncoded,
}
}

func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []*SingularBatch {
blockCount := 2 + rng.Intn(128)
l2BlockTime := uint64(2)
Expand Down
33 changes: 33 additions & 0 deletions op-node/rollup/derive/batch_test_utils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package derive

import (
"math/big"
"math/rand"
sebastianst marked this conversation as resolved.
Show resolved Hide resolved

"github.com/ethereum-optimism/optimism/op-node/rollup"
"github.com/ethereum-optimism/optimism/op-service/testutils"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)

func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *SingularBatch {
signer := types.NewLondonSigner(chainID)
baseFee := big.NewInt(rng.Int63n(300_000_000_000))
txsEncoded := make([]hexutil.Bytes, 0, txCount)
// force each tx to have equal chainID
for i := 0; i < txCount; i++ {
tx := testutils.RandomTx(rng, baseFee, signer)
txEncoded, err := tx.MarshalBinary()
if err != nil {
panic("tx Marshal binary" + err.Error())
}
txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded))
}
return &SingularBatch{
ParentHash: testutils.RandomHash(rng),
EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)),
EpochHash: testutils.RandomHash(rng),
Timestamp: uint64(rng.Int63n(2_000_000_000)),
Transactions: txsEncoded,
}
}