From 133b505e23eb9567b1f292e164b55adc63d043b9 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 20 Mar 2024 14:20:03 -0500 Subject: [PATCH 01/12] Add Benchmark for AddSingularBatch --- op-node/benchmarks/batchbuilding_test.go | 128 +++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 op-node/benchmarks/batchbuilding_test.go diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go new file mode 100644 index 000000000000..edecf610c798 --- /dev/null +++ b/op-node/benchmarks/batchbuilding_test.go @@ -0,0 +1,128 @@ +package benchmarks + +import ( + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-batcher/compressor" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *derive.SingularBatch { + signer := types.NewLondonSigner(chainID) + baseFee := big.NewInt(rng.Int63n(300_000_000_000)) + txsEncoded := make([]hexutil.Bytes, 0, txCount) + // force each tx to have equal chainID + for i := 0; i < txCount; i++ { + tx := testutils.RandomTx(rng, baseFee, signer) + txEncoded, err := tx.MarshalBinary() + if err != nil { + panic("tx Marshal binary" + err.Error()) + } + txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded)) + } + return &derive.SingularBatch{ + ParentHash: testutils.RandomHash(rng), + EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)), + EpochHash: testutils.RandomHash(rng), + Timestamp: uint64(rng.Int63n(2_000_000_000)), + Transactions: txsEncoded, + } +} + +type BatchingBenchmarkTC struct { + BatchType uint + BatchCount int + txPerBatch int +} + +func (t BatchingBenchmarkTC) String(cName string) string { + var btype string + if t.BatchType == derive.SingularBatchType { + btype = "Singular" + } + if t.BatchType == derive.SpanBatchType { + btype = "Span" + } + return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, cName) +} + +// BenchmarkChannelOut benchmarks the performance of adding singular batches to a channel out +// this exercises the compression and batching logic, as well as any batch-building logic +// Every Compressor in the compressor map is benchmarked for each test case +func BenchmarkChannelOut(b *testing.B) { + rc, _ := compressor.NewRatioCompressor(compressor.Config{ + TargetFrameSize: 100000, + TargetNumFrames: 1, + ApproxComprRatio: 0.4, + }) + sc, _ := compressor.NewShadowCompressor(compressor.Config{ + TargetFrameSize: 100000, + TargetNumFrames: 1, + ApproxComprRatio: 0.4, + }) + nc, _ := compressor.NewNonCompressor(compressor.Config{ + TargetFrameSize: 100000, + TargetNumFrames: 1, + ApproxComprRatio: 0.4, + }) + + compressors := map[string]derive.Compressor{ + "RatioCompressor": rc, + "ShadowCompressor": sc, + "NonCompressor": nc, + } + + tests := []BatchingBenchmarkTC{ + // Singular Batch Tests + // low-throughput chains + {derive.SingularBatchType, 10, 1}, + {derive.SingularBatchType, 50, 1}, + {derive.SingularBatchType, 100, 1}, + {derive.SingularBatchType, 200, 1}, + {derive.SingularBatchType, 1000, 1}, + + // higher-throughput chains + {derive.SingularBatchType, 10, 10}, + {derive.SingularBatchType, 100, 10}, + + // Span Batch Tests + // low-throughput chains + {derive.SpanBatchType, 10, 1}, + {derive.SpanBatchType, 50, 1}, + {derive.SpanBatchType, 100, 1}, + {derive.SpanBatchType, 200, 1}, + {derive.SpanBatchType, 1000, 1}, + + // higher-throughput chains + {derive.SpanBatchType, 10, 10}, + {derive.SpanBatchType, 100, 10}, + } + + // for each compressor, run each the tests + for cName, c := range compressors { + for _, tc := range tests { + chainID := big.NewInt(333) + spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) + rng := rand.New(rand.NewSource(0x543331)) + c.Reset() + // pre-generate batches to keep the benchmark from including the random generation + batches := make([]*derive.SingularBatch, tc.BatchCount) + for i := 0; i < tc.BatchCount; i++ { + batches[i] = RandomSingularBatch(rng, tc.txPerBatch, chainID) + } + b.Run(tc.String(cName), func(b *testing.B) { + cout, _ := derive.NewChannelOut(tc.BatchType, c, spanBatchBuilder) + for i := 0; i < tc.BatchCount; i++ { + cout.AddSingularBatch(batches[i], 0) + } + }) + } + } +} From cf23d257b040fcab3db3d5d90a01ea9974cfed60 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 20 Mar 2024 14:55:57 -0500 Subject: [PATCH 02/12] update compressor configs ; address PR comments --- op-node/benchmarks/batchbuilding_test.go | 38 ++++++++++++++++-------- 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index edecf610c798..4eb374df0a33 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -58,19 +58,14 @@ func (t BatchingBenchmarkTC) String(cName string) string { // Every Compressor in the compressor map is benchmarked for each test case func BenchmarkChannelOut(b *testing.B) { rc, _ := compressor.NewRatioCompressor(compressor.Config{ - TargetFrameSize: 100000, - TargetNumFrames: 1, + TargetOutputSize: 100_000_000, ApproxComprRatio: 0.4, }) sc, _ := compressor.NewShadowCompressor(compressor.Config{ - TargetFrameSize: 100000, - TargetNumFrames: 1, - ApproxComprRatio: 0.4, + TargetOutputSize: 100_000_000, }) nc, _ := compressor.NewNonCompressor(compressor.Config{ - TargetFrameSize: 100000, - TargetNumFrames: 1, - ApproxComprRatio: 0.4, + TargetOutputSize: 100_000_000, }) compressors := map[string]derive.Compressor{ @@ -87,10 +82,17 @@ func BenchmarkChannelOut(b *testing.B) { {derive.SingularBatchType, 100, 1}, {derive.SingularBatchType, 200, 1}, {derive.SingularBatchType, 1000, 1}, + {derive.SingularBatchType, 10000, 1}, // higher-throughput chains - {derive.SingularBatchType, 10, 10}, - {derive.SingularBatchType, 100, 10}, + {derive.SingularBatchType, 10, 100}, + {derive.SingularBatchType, 100, 100}, + {derive.SingularBatchType, 1000, 100}, + + // even higher-throughput chains + {derive.SingularBatchType, 10, 500}, + {derive.SingularBatchType, 100, 500}, + {derive.SingularBatchType, 1000, 500}, // Span Batch Tests // low-throughput chains @@ -99,10 +101,17 @@ func BenchmarkChannelOut(b *testing.B) { {derive.SpanBatchType, 100, 1}, {derive.SpanBatchType, 200, 1}, {derive.SpanBatchType, 1000, 1}, + {derive.SpanBatchType, 10000, 1}, // higher-throughput chains - {derive.SpanBatchType, 10, 10}, - {derive.SpanBatchType, 100, 10}, + {derive.SpanBatchType, 10, 100}, + {derive.SpanBatchType, 100, 100}, + {derive.SpanBatchType, 1000, 100}, + + // even higher-throughput chains + {derive.SpanBatchType, 10, 500}, + {derive.SpanBatchType, 100, 500}, + {derive.SpanBatchType, 1000, 500}, } // for each compressor, run each the tests @@ -120,6 +129,11 @@ func BenchmarkChannelOut(b *testing.B) { b.Run(tc.String(cName), func(b *testing.B) { cout, _ := derive.NewChannelOut(tc.BatchType, c, spanBatchBuilder) for i := 0; i < tc.BatchCount; i++ { + // if the channel is full, break out of the loop + // consider removing this if the cost of FullErr() is significant + if cout.FullErr() != nil { + break + } cout.AddSingularBatch(batches[i], 0) } }) From 65d62bc655979961391bd0477dadc1ea57954349 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 20 Mar 2024 17:17:37 -0500 Subject: [PATCH 03/12] Add b.N --- op-node/benchmarks/batchbuilding_test.go | 35 +++++++----------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 4eb374df0a33..b4d6a366485b 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" ) func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *derive.SingularBatch { @@ -58,29 +59,27 @@ func (t BatchingBenchmarkTC) String(cName string) string { // Every Compressor in the compressor map is benchmarked for each test case func BenchmarkChannelOut(b *testing.B) { rc, _ := compressor.NewRatioCompressor(compressor.Config{ - TargetOutputSize: 100_000_000, + TargetOutputSize: 100_000_000_000, ApproxComprRatio: 0.4, }) sc, _ := compressor.NewShadowCompressor(compressor.Config{ - TargetOutputSize: 100_000_000, + TargetOutputSize: 100_000_000_000, }) nc, _ := compressor.NewNonCompressor(compressor.Config{ - TargetOutputSize: 100_000_000, + TargetOutputSize: 100_000_000_000, }) compressors := map[string]derive.Compressor{ + "NonCompressor": nc, "RatioCompressor": rc, "ShadowCompressor": sc, - "NonCompressor": nc, } tests := []BatchingBenchmarkTC{ // Singular Batch Tests // low-throughput chains {derive.SingularBatchType, 10, 1}, - {derive.SingularBatchType, 50, 1}, {derive.SingularBatchType, 100, 1}, - {derive.SingularBatchType, 200, 1}, {derive.SingularBatchType, 1000, 1}, {derive.SingularBatchType, 10000, 1}, @@ -89,17 +88,10 @@ func BenchmarkChannelOut(b *testing.B) { {derive.SingularBatchType, 100, 100}, {derive.SingularBatchType, 1000, 100}, - // even higher-throughput chains - {derive.SingularBatchType, 10, 500}, - {derive.SingularBatchType, 100, 500}, - {derive.SingularBatchType, 1000, 500}, - // Span Batch Tests // low-throughput chains {derive.SpanBatchType, 10, 1}, - {derive.SpanBatchType, 50, 1}, {derive.SpanBatchType, 100, 1}, - {derive.SpanBatchType, 200, 1}, {derive.SpanBatchType, 1000, 1}, {derive.SpanBatchType, 10000, 1}, @@ -107,11 +99,6 @@ func BenchmarkChannelOut(b *testing.B) { {derive.SpanBatchType, 10, 100}, {derive.SpanBatchType, 100, 100}, {derive.SpanBatchType, 1000, 100}, - - // even higher-throughput chains - {derive.SpanBatchType, 10, 500}, - {derive.SpanBatchType, 100, 500}, - {derive.SpanBatchType, 1000, 500}, } // for each compressor, run each the tests @@ -127,14 +114,12 @@ func BenchmarkChannelOut(b *testing.B) { batches[i] = RandomSingularBatch(rng, tc.txPerBatch, chainID) } b.Run(tc.String(cName), func(b *testing.B) { - cout, _ := derive.NewChannelOut(tc.BatchType, c, spanBatchBuilder) - for i := 0; i < tc.BatchCount; i++ { - // if the channel is full, break out of the loop - // consider removing this if the cost of FullErr() is significant - if cout.FullErr() != nil { - break + for bn := 0; bn < b.N; bn++ { + cout, _ := derive.NewChannelOut(tc.BatchType, c, spanBatchBuilder) + for i := 0; i < tc.BatchCount; i++ { + _, err := cout.AddSingularBatch(batches[i], 0) + require.NoError(b, err) } - cout.AddSingularBatch(batches[i], 0) } }) } From 13e442cd626701956392aa2bfc69a311e0153f42 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 20 Mar 2024 17:30:58 -0500 Subject: [PATCH 04/12] Export RandomSingularBatch through batch_test_util.go --- op-node/benchmarks/batchbuilding_test.go | 28 +------------------ op-node/rollup/derive/batch_test.go | 22 --------------- op-node/rollup/derive/batch_test_utils.go | 33 +++++++++++++++++++++++ 3 files changed, 34 insertions(+), 49 deletions(-) create mode 100644 op-node/rollup/derive/batch_test_utils.go diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index b4d6a366485b..54bda147035f 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -7,36 +7,10 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-batcher/compressor" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" ) -func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *derive.SingularBatch { - signer := types.NewLondonSigner(chainID) - baseFee := big.NewInt(rng.Int63n(300_000_000_000)) - txsEncoded := make([]hexutil.Bytes, 0, txCount) - // force each tx to have equal chainID - for i := 0; i < txCount; i++ { - tx := testutils.RandomTx(rng, baseFee, signer) - txEncoded, err := tx.MarshalBinary() - if err != nil { - panic("tx Marshal binary" + err.Error()) - } - txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded)) - } - return &derive.SingularBatch{ - ParentHash: testutils.RandomHash(rng), - EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)), - EpochHash: testutils.RandomHash(rng), - Timestamp: uint64(rng.Int63n(2_000_000_000)), - Transactions: txsEncoded, - } -} - type BatchingBenchmarkTC struct { BatchType uint BatchCount int @@ -111,7 +85,7 @@ func BenchmarkChannelOut(b *testing.B) { // pre-generate batches to keep the benchmark from including the random generation batches := make([]*derive.SingularBatch, tc.BatchCount) for i := 0; i < tc.BatchCount; i++ { - batches[i] = RandomSingularBatch(rng, tc.txPerBatch, chainID) + batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) } b.Run(tc.String(cName), func(b *testing.B) { for bn := 0; bn < b.N; bn++ { diff --git a/op-node/rollup/derive/batch_test.go b/op-node/rollup/derive/batch_test.go index 4668a7e035f7..3dc554a59325 100644 --- a/op-node/rollup/derive/batch_test.go +++ b/op-node/rollup/derive/batch_test.go @@ -76,28 +76,6 @@ func RandomRawSpanBatch(rng *rand.Rand, chainId *big.Int) *RawSpanBatch { return &rawSpanBatch } -func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *SingularBatch { - signer := types.NewLondonSigner(chainID) - baseFee := big.NewInt(rng.Int63n(300_000_000_000)) - txsEncoded := make([]hexutil.Bytes, 0, txCount) - // force each tx to have equal chainID - for i := 0; i < txCount; i++ { - tx := testutils.RandomTx(rng, baseFee, signer) - txEncoded, err := tx.MarshalBinary() - if err != nil { - panic("tx Marshal binary" + err.Error()) - } - txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded)) - } - return &SingularBatch{ - ParentHash: testutils.RandomHash(rng), - EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)), - EpochHash: testutils.RandomHash(rng), - Timestamp: uint64(rng.Int63n(2_000_000_000)), - Transactions: txsEncoded, - } -} - func RandomValidConsecutiveSingularBatches(rng *rand.Rand, chainID *big.Int) []*SingularBatch { blockCount := 2 + rng.Intn(128) l2BlockTime := uint64(2) diff --git a/op-node/rollup/derive/batch_test_utils.go b/op-node/rollup/derive/batch_test_utils.go new file mode 100644 index 000000000000..3b931cc4f03b --- /dev/null +++ b/op-node/rollup/derive/batch_test_utils.go @@ -0,0 +1,33 @@ +package derive + +import ( + "math/big" + "math/rand" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +func RandomSingularBatch(rng *rand.Rand, txCount int, chainID *big.Int) *SingularBatch { + signer := types.NewLondonSigner(chainID) + baseFee := big.NewInt(rng.Int63n(300_000_000_000)) + txsEncoded := make([]hexutil.Bytes, 0, txCount) + // force each tx to have equal chainID + for i := 0; i < txCount; i++ { + tx := testutils.RandomTx(rng, baseFee, signer) + txEncoded, err := tx.MarshalBinary() + if err != nil { + panic("tx Marshal binary" + err.Error()) + } + txsEncoded = append(txsEncoded, hexutil.Bytes(txEncoded)) + } + return &SingularBatch{ + ParentHash: testutils.RandomHash(rng), + EpochNum: rollup.Epoch(1 + rng.Int63n(100_000_000)), + EpochHash: testutils.RandomHash(rng), + Timestamp: uint64(rng.Int63n(2_000_000_000)), + Transactions: txsEncoded, + } +} From 84146deb308f87944e2ee0b52194c17443a7bb00 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 21 Mar 2024 11:48:36 -0500 Subject: [PATCH 05/12] measure only the final batch ; other organizational improvements --- op-node/benchmarks/batchbuilding_test.go | 143 +++++++++++++---------- 1 file changed, 82 insertions(+), 61 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 54bda147035f..bb2f4acb9e2d 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -11,13 +11,43 @@ import ( "github.com/stretchr/testify/require" ) +var ( + + // compressors used in the benchmark + rc, _ = compressor.NewRatioCompressor(compressor.Config{ + TargetOutputSize: 100_000_000_000, + ApproxComprRatio: 0.4, + }) + sc, _ = compressor.NewShadowCompressor(compressor.Config{ + TargetOutputSize: 100_000_000_000, + }) + nc, _ = compressor.NewNonCompressor(compressor.Config{ + TargetOutputSize: 100_000_000_000, + }) + + compressors = map[string]derive.Compressor{ + "NonCompressor": nc, + "RatioCompressor": rc, + "ShadowCompressor": sc, + } + + // batch types used in the benchmark + batchTypes = []uint{ + derive.SingularBatchType, + derive.SpanBatchType, + } +) + +// a test case for the benchmark controlls the number of batches and transactions per batch, +// as well as the batch type and compressor used type BatchingBenchmarkTC struct { BatchType uint BatchCount int txPerBatch int + compKey string } -func (t BatchingBenchmarkTC) String(cName string) string { +func (t BatchingBenchmarkTC) String() string { var btype string if t.BatchType == derive.SingularBatchType { btype = "Singular" @@ -25,77 +55,68 @@ func (t BatchingBenchmarkTC) String(cName string) string { if t.BatchType == derive.SpanBatchType { btype = "Span" } - return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, cName) + return fmt.Sprintf("BatchType=%s, txPerBatch=%d, BatchCount=%d, Compressor=%s", btype, t.txPerBatch, t.BatchCount, t.compKey) } // BenchmarkChannelOut benchmarks the performance of adding singular batches to a channel out // this exercises the compression and batching logic, as well as any batch-building logic // Every Compressor in the compressor map is benchmarked for each test case +// The results of the Benchmark measure *only* the time to add the final batch to the channel out, +// not the time to send all the batches through the channel out +// Hint: Remove the Start/Stop timers to measure the time to send all the batches through the channel out +// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits func BenchmarkChannelOut(b *testing.B) { - rc, _ := compressor.NewRatioCompressor(compressor.Config{ - TargetOutputSize: 100_000_000_000, - ApproxComprRatio: 0.4, - }) - sc, _ := compressor.NewShadowCompressor(compressor.Config{ - TargetOutputSize: 100_000_000_000, - }) - nc, _ := compressor.NewNonCompressor(compressor.Config{ - TargetOutputSize: 100_000_000_000, - }) - - compressors := map[string]derive.Compressor{ - "NonCompressor": nc, - "RatioCompressor": rc, - "ShadowCompressor": sc, - } - - tests := []BatchingBenchmarkTC{ - // Singular Batch Tests - // low-throughput chains - {derive.SingularBatchType, 10, 1}, - {derive.SingularBatchType, 100, 1}, - {derive.SingularBatchType, 1000, 1}, - {derive.SingularBatchType, 10000, 1}, - - // higher-throughput chains - {derive.SingularBatchType, 10, 100}, - {derive.SingularBatchType, 100, 100}, - {derive.SingularBatchType, 1000, 100}, - // Span Batch Tests - // low-throughput chains - {derive.SpanBatchType, 10, 1}, - {derive.SpanBatchType, 100, 1}, - {derive.SpanBatchType, 1000, 1}, - {derive.SpanBatchType, 10000, 1}, + // Targets define the number of batches and transactions per batch to test + type target struct{ bs, tpb int } + targets := []target{ + {10, 1}, + {100, 1}, + {1000, 1}, + {10000, 1}, - // higher-throughput chains - {derive.SpanBatchType, 10, 100}, - {derive.SpanBatchType, 100, 100}, - {derive.SpanBatchType, 1000, 100}, + {10, 100}, + {100, 100}, + {1000, 100}, } - // for each compressor, run each the tests - for cName, c := range compressors { - for _, tc := range tests { - chainID := big.NewInt(333) - spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) - rng := rand.New(rand.NewSource(0x543331)) - c.Reset() - // pre-generate batches to keep the benchmark from including the random generation - batches := make([]*derive.SingularBatch, tc.BatchCount) - for i := 0; i < tc.BatchCount; i++ { - batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) + // build a set of test cases for each batch type, compressor, and target-pair + tests := []BatchingBenchmarkTC{} + for _, bt := range batchTypes { + for compkey := range compressors { + for _, t := range targets { + tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey}) } - b.Run(tc.String(cName), func(b *testing.B) { - for bn := 0; bn < b.N; bn++ { - cout, _ := derive.NewChannelOut(tc.BatchType, c, spanBatchBuilder) - for i := 0; i < tc.BatchCount; i++ { - _, err := cout.AddSingularBatch(batches[i], 0) - require.NoError(b, err) - } - } - }) } } + + for _, tc := range tests { + chainID := big.NewInt(333) + spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) + rng := rand.New(rand.NewSource(0x543331)) + // pre-generate batches to keep the benchmark from including the random generation + batches := make([]*derive.SingularBatch, tc.BatchCount) + for i := 0; i < tc.BatchCount; i++ { + batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) + } + b.Run(tc.String(), func(b *testing.B) { + // reset the compressor used in the test case + compressors[tc.compKey].Reset() + for bn := 0; bn < b.N; bn++ { + // don't measure the setup time + b.StopTimer() + cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) + // add all but the final batche to the channel out + for i := 0; i < tc.BatchCount-1; i++ { + _, err := cout.AddSingularBatch(batches[i], 0) + require.NoError(b, err) + } + // measure the time to add the final batch + b.StartTimer() + // add the final batch to the channel out + _, err := cout.AddSingularBatch(batches[tc.BatchCount-1], 0) + require.NoError(b, err) + } + }) + } } From a55886cc7d58e58e7e96b018351da21c72766c25 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Thu, 21 Mar 2024 18:49:58 -0500 Subject: [PATCH 06/12] Add Benchmark for ToRawSpanBatch --- op-node/benchmarks/batchbuilding_test.go | 49 ++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index bb2f4acb9e2d..3243c54ecea6 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -5,6 +5,7 @@ import ( "math/big" "math/rand" "testing" + "time" "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -33,7 +34,7 @@ var ( // batch types used in the benchmark batchTypes = []uint{ - derive.SingularBatchType, + // derive.SingularBatchType, derive.SpanBatchType, } ) @@ -66,7 +67,6 @@ func (t BatchingBenchmarkTC) String() string { // Hint: Remove the Start/Stop timers to measure the time to send all the batches through the channel out // Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits func BenchmarkChannelOut(b *testing.B) { - // Targets define the number of batches and transactions per batch to test type target struct{ bs, tpb int } targets := []target{ @@ -92,12 +92,12 @@ func BenchmarkChannelOut(b *testing.B) { for _, tc := range tests { chainID := big.NewInt(333) - spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) rng := rand.New(rand.NewSource(0x543331)) // pre-generate batches to keep the benchmark from including the random generation batches := make([]*derive.SingularBatch, tc.BatchCount) for i := 0; i < tc.BatchCount; i++ { batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) + batches[i].Timestamp = uint64(time.Now().Add(time.Duration(i) * time.Second).Unix()) } b.Run(tc.String(), func(b *testing.B) { // reset the compressor used in the test case @@ -105,6 +105,7 @@ func BenchmarkChannelOut(b *testing.B) { for bn := 0; bn < b.N; bn++ { // don't measure the setup time b.StopTimer() + spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) // add all but the final batche to the channel out for i := 0; i < tc.BatchCount-1; i++ { @@ -120,3 +121,45 @@ func BenchmarkChannelOut(b *testing.B) { }) } } +func BenchmarkToRawSpanBatch(b *testing.B) { + // Targets define the number of batches and transactions per batch to test + type target struct{ bs, tpb int } + targets := []target{ + {10, 1}, + {100, 1}, + {1000, 1}, + {10000, 1}, + + {10, 100}, + {100, 100}, + {1000, 100}, + } + + tests := []BatchingBenchmarkTC{} + for _, t := range targets { + tests = append(tests, BatchingBenchmarkTC{derive.SpanBatchType, t.bs, t.tpb, "NonCompressor"}) + } + + for _, tc := range tests { + chainID := big.NewInt(333) + rng := rand.New(rand.NewSource(0x543331)) + // pre-generate batches to keep the benchmark from including the random generation + batches := make([]*derive.SingularBatch, tc.BatchCount) + for i := 0; i < tc.BatchCount; i++ { + batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) + batches[i].Timestamp = uint64(time.Now().Add(time.Duration(i) * time.Second).Unix()) + } + b.Run(tc.String(), func(b *testing.B) { + for bn := 0; bn < b.N; bn++ { + // don't measure the setup time + b.StopTimer() + spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) + for i := 0; i < tc.BatchCount; i++ { + spanBatchBuilder.AppendSingularBatch(batches[i], 0) + } + b.StartTimer() + spanBatchBuilder.GetRawSpanBatch() + } + }) + } +} From 375270b330e05b619f4c85e359847b382bc83e2a Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Fri, 22 Mar 2024 13:06:36 -0500 Subject: [PATCH 07/12] update tests --- op-node/benchmarks/batchbuilding_test.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 3243c54ecea6..b9b4fa46fe75 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -34,7 +34,7 @@ var ( // batch types used in the benchmark batchTypes = []uint{ - // derive.SingularBatchType, + derive.SingularBatchType, derive.SpanBatchType, } ) @@ -95,16 +95,19 @@ func BenchmarkChannelOut(b *testing.B) { rng := rand.New(rand.NewSource(0x543331)) // pre-generate batches to keep the benchmark from including the random generation batches := make([]*derive.SingularBatch, tc.BatchCount) + t := time.Now() for i := 0; i < tc.BatchCount; i++ { batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) - batches[i].Timestamp = uint64(time.Now().Add(time.Duration(i) * time.Second).Unix()) + // set the timestamp to increase with each batch + // to leverage optimizations in the Batch Linked List + batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) } b.Run(tc.String(), func(b *testing.B) { // reset the compressor used in the test case - compressors[tc.compKey].Reset() for bn := 0; bn < b.N; bn++ { // don't measure the setup time b.StopTimer() + compressors[tc.compKey].Reset() spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) // add all but the final batche to the channel out @@ -121,7 +124,7 @@ func BenchmarkChannelOut(b *testing.B) { }) } } -func BenchmarkToRawSpanBatch(b *testing.B) { +func BenchmarkGetRawSpanBatch(b *testing.B) { // Targets define the number of batches and transactions per batch to test type target struct{ bs, tpb int } targets := []target{ @@ -145,9 +148,10 @@ func BenchmarkToRawSpanBatch(b *testing.B) { rng := rand.New(rand.NewSource(0x543331)) // pre-generate batches to keep the benchmark from including the random generation batches := make([]*derive.SingularBatch, tc.BatchCount) + t := time.Now() for i := 0; i < tc.BatchCount; i++ { batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) - batches[i].Timestamp = uint64(time.Now().Add(time.Duration(i) * time.Second).Unix()) + batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) } b.Run(tc.String(), func(b *testing.B) { for bn := 0; bn < b.N; bn++ { From 95b884a84a3ea32f015407202f10ef6279695b91 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Fri, 22 Mar 2024 15:34:54 -0500 Subject: [PATCH 08/12] minor fixup --- op-node/benchmarks/batchbuilding_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index b9b4fa46fe75..a86011b7f96d 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -39,7 +39,7 @@ var ( } ) -// a test case for the benchmark controlls the number of batches and transactions per batch, +// a test case for the benchmark controls the number of batches and transactions per batch, // as well as the batch type and compressor used type BatchingBenchmarkTC struct { BatchType uint @@ -162,7 +162,8 @@ func BenchmarkGetRawSpanBatch(b *testing.B) { spanBatchBuilder.AppendSingularBatch(batches[i], 0) } b.StartTimer() - spanBatchBuilder.GetRawSpanBatch() + _, err := spanBatchBuilder.GetRawSpanBatch() + require.NoError(b, err) } }) } From 52c08171611fd1abc7dd8db6a2bfcecafe16b991 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Mon, 25 Mar 2024 09:39:29 -0500 Subject: [PATCH 09/12] Add Benchmark for adding *All* Span Batches --- op-node/benchmarks/batchbuilding_test.go | 70 +++++++++++++++++++++++- 1 file changed, 68 insertions(+), 2 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index a86011b7f96d..63bb29caf7a7 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -64,9 +64,8 @@ func (t BatchingBenchmarkTC) String() string { // Every Compressor in the compressor map is benchmarked for each test case // The results of the Benchmark measure *only* the time to add the final batch to the channel out, // not the time to send all the batches through the channel out -// Hint: Remove the Start/Stop timers to measure the time to send all the batches through the channel out // Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits -func BenchmarkChannelOut(b *testing.B) { +func BenchmarkFinalBatchChannelOut(b *testing.B) { // Targets define the number of batches and transactions per batch to test type target struct{ bs, tpb int } targets := []target{ @@ -124,6 +123,73 @@ func BenchmarkChannelOut(b *testing.B) { }) } } + +// BenchmarkAllBatchesChannelOut benchmarks the performance of adding singular batches to a channel out +// this exercises the compression and batching logic, as well as any batch-building logic +// Every Compressor in the compressor map is benchmarked for each test case +// The results of the Benchmark measure the time to add the *all batches* to the channel out, +// not the time to send all the batches through the channel out +// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits +func BenchmarkAllBatchesChannelOut(b *testing.B) { + // Targets define the number of batches and transactions per batch to test + type target struct{ bs, tpb int } + targets := []target{ + {10, 1}, + {100, 1}, + {1000, 1}, + {10000, 1}, + + {10, 100}, + {100, 100}, + {1000, 100}, + } + + // build a set of test cases for each batch type, compressor, and target-pair + tests := []BatchingBenchmarkTC{} + for _, bt := range batchTypes { + for compkey := range compressors { + for _, t := range targets { + tests = append(tests, BatchingBenchmarkTC{bt, t.bs, t.tpb, compkey}) + } + } + } + + for _, tc := range tests { + chainID := big.NewInt(333) + rng := rand.New(rand.NewSource(0x543331)) + // pre-generate batches to keep the benchmark from including the random generation + batches := make([]*derive.SingularBatch, tc.BatchCount) + t := time.Now() + for i := 0; i < tc.BatchCount; i++ { + batches[i] = derive.RandomSingularBatch(rng, tc.txPerBatch, chainID) + // set the timestamp to increase with each batch + // to leverage optimizations in the Batch Linked List + batches[i].Timestamp = uint64(t.Add(time.Duration(i) * time.Second).Unix()) + } + b.Run(tc.String(), func(b *testing.B) { + // reset the compressor used in the test case + for bn := 0; bn < b.N; bn++ { + // don't measure the setup time + b.StopTimer() + compressors[tc.compKey].Reset() + spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) + cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) + // add all but the final batche to the channel out + // measure the time to add the final batch + b.StartTimer() + // add the final batch to the channel out + for i := 0; i < tc.BatchCount; i++ { + _, err := cout.AddSingularBatch(batches[i], 0) + require.NoError(b, err) + } + } + }) + } +} + +// BenchmarkGetRawSpanBatch benchmarks the performance of building a span batch from singular batches +// this exercises the span batch building logic directly +// The adding of batches to the span batch builder is not included in the benchmark, only the final build to RawSpanBatch func BenchmarkGetRawSpanBatch(b *testing.B) { // Targets define the number of batches and transactions per batch to test type target struct{ bs, tpb int } From 296fb0916934431979fdf7259f5e39f0e6a33202 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 27 Mar 2024 09:42:56 -0500 Subject: [PATCH 10/12] comment fixups --- op-node/benchmarks/batchbuilding_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 63bb29caf7a7..6300f0446290 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -109,7 +109,7 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) { compressors[tc.compKey].Reset() spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) - // add all but the final batche to the channel out + // add all but the final batch to the channel out for i := 0; i < tc.BatchCount-1; i++ { _, err := cout.AddSingularBatch(batches[i], 0) require.NoError(b, err) @@ -174,10 +174,8 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) { compressors[tc.compKey].Reset() spanBatchBuilder := derive.NewSpanBatchBuilder(0, chainID) cout, _ := derive.NewChannelOut(tc.BatchType, compressors[tc.compKey], spanBatchBuilder) - // add all but the final batche to the channel out - // measure the time to add the final batch b.StartTimer() - // add the final batch to the channel out + // add all batches to the channel out for i := 0; i < tc.BatchCount; i++ { _, err := cout.AddSingularBatch(batches[i], 0) require.NoError(b, err) From 51fb6f942755b2bee4b05d9b1864f13ace09a570 Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 27 Mar 2024 14:22:18 -0500 Subject: [PATCH 11/12] narrow tests to only test span batches that won't exceed RLP limit --- op-node/benchmarks/batchbuilding_test.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 6300f0446290..a54992615648 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -34,8 +34,9 @@ var ( // batch types used in the benchmark batchTypes = []uint{ - derive.SingularBatchType, derive.SpanBatchType, + // uncomment to include singular batches in the benchmark + //derive.SingularBatchType, } ) @@ -72,11 +73,10 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) { {10, 1}, {100, 1}, {1000, 1}, - {10000, 1}, + //{10000, 1}, {10, 100}, {100, 100}, - {1000, 100}, } // build a set of test cases for each batch type, compressor, and target-pair @@ -137,11 +137,10 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) { {10, 1}, {100, 1}, {1000, 1}, - {10000, 1}, + //{10000, 1}, {10, 100}, {100, 100}, - {1000, 100}, } // build a set of test cases for each batch type, compressor, and target-pair From 43d5248861fc1d7fb2627c30a75651b1faa604ac Mon Sep 17 00:00:00 2001 From: axelKingsley Date: Wed, 27 Mar 2024 19:00:34 -0500 Subject: [PATCH 12/12] address pr comments --- op-node/benchmarks/batchbuilding_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index a54992615648..7b95304acbaa 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -36,6 +36,7 @@ var ( batchTypes = []uint{ derive.SpanBatchType, // uncomment to include singular batches in the benchmark + // singular batches are not included by default because they are not the target of the benchmark //derive.SingularBatchType, } ) @@ -65,7 +66,7 @@ func (t BatchingBenchmarkTC) String() string { // Every Compressor in the compressor map is benchmarked for each test case // The results of the Benchmark measure *only* the time to add the final batch to the channel out, // not the time to send all the batches through the channel out -// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits +// Hint: Raise the derive.MaxRLPBytesPerChannel to 10_000_000_000 to avoid hitting limits if adding larger test cases func BenchmarkFinalBatchChannelOut(b *testing.B) { // Targets define the number of batches and transactions per batch to test type target struct{ bs, tpb int } @@ -73,7 +74,6 @@ func BenchmarkFinalBatchChannelOut(b *testing.B) { {10, 1}, {100, 1}, {1000, 1}, - //{10000, 1}, {10, 100}, {100, 100}, @@ -137,7 +137,6 @@ func BenchmarkAllBatchesChannelOut(b *testing.B) { {10, 1}, {100, 1}, {1000, 1}, - //{10000, 1}, {10, 100}, {100, 100},