diff --git a/eth/stagedsync/stage_bodies_test.go b/eth/stagedsync/stage_bodies_test.go
index 872de8191b8..e0c93d766a4 100644
--- a/eth/stagedsync/stage_bodies_test.go
+++ b/eth/stagedsync/stage_bodies_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ledgerwatch/erigon-lib/config3"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/rawdbv3"
+ "github.com/ledgerwatch/erigon-lib/log/v3"
"github.com/stretchr/testify/require"
"github.com/ledgerwatch/erigon/core/rawdb"
@@ -50,6 +51,9 @@ func testingHeaderBody(t *testing.T) (h *types.Header, b *types.RawBody) {
}
func TestBodiesCanonical(t *testing.T) {
+ defer log.Root().SetHandler(log.Root().GetHandler())
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler))
+
m := mock.Mock(t)
tx, err := m.DB.BeginRw(m.Ctx)
require := require.New(t)
diff --git a/eth/stagedsync/stage_bor_heimdall_test.go b/eth/stagedsync/stage_bor_heimdall_test.go
index be13bcc8eac..a3fe1b1e33f 100644
--- a/eth/stagedsync/stage_bor_heimdall_test.go
+++ b/eth/stagedsync/stage_bor_heimdall_test.go
@@ -47,7 +47,7 @@ func TestBorHeimdallForwardPersistsSpans(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks))
@@ -77,7 +77,7 @@ func TestBorHeimdallForwardFetchesFirstSpanDuringSecondSprintStart(t *testing.T)
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks))
@@ -112,7 +112,7 @@ func TestBorHeimdallForwardFetchesFirstSpanAfterSecondSprintStart(t *testing.T)
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks))
@@ -143,7 +143,7 @@ func TestBorHeimdallForwardFetchesNextSpanDuringLastSprintOfCurrentSpan(t *testi
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks))
@@ -174,7 +174,7 @@ func TestBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SaveStageProgress(ctx, t, stages.Headers, uint64(numBlocks))
@@ -211,7 +211,7 @@ func TestBorHeimdallForwardErrHeaderValidatorsLengthMismatch(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
HeimdallProducersOverride: map[uint64][]valset.Validator{
1: {
*valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1),
@@ -236,7 +236,7 @@ func TestBorHeimdallForwardErrHeaderValidatorsBytesMismatch(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
HeimdallProducersOverride: map[uint64][]valset.Validator{
1: {
*valset.NewValidator(crypto.PubkeyToAddress(validatorKey1.PublicKey), 1),
@@ -260,7 +260,7 @@ func TestBorHeimdallForwardDetectsUnauthorizedSignerError(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: chainConfig,
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// prepare invalid header and insert it in the db
diff --git a/eth/stagedsync/stage_mining_bor_heimdall_test.go b/eth/stagedsync/stage_mining_bor_heimdall_test.go
index ac3489a960e..4c4dc4626c8 100644
--- a/eth/stagedsync/stage_mining_bor_heimdall_test.go
+++ b/eth/stagedsync/stage_mining_bor_heimdall_test.go
@@ -37,7 +37,7 @@ func TestMiningBorHeimdallForwardPersistsSpans(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SetMiningBlockEmptyHeader(ctx, t, uint64(numBlocks))
@@ -69,7 +69,7 @@ func TestMiningBorHeimdallForwardPersistsStateSyncEvents(t *testing.T) {
testHarness := stagedsynctest.InitHarness(ctx, t, stagedsynctest.HarnessCfg{
ChainConfig: stagedsynctest.BorDevnetChainConfigWithNoBlockSealDelays(),
GenerateChainNumBlocks: numBlocks,
- LogLvl: log.LvlInfo,
+ LogLvl: log.LvlError,
})
// pretend-update previous stage progress
testHarness.SetMiningBlockEmptyHeader(ctx, t, uint64(numBlocks))
diff --git a/go.mod b/go.mod
index bcefb039a8d..4a2778567a5 100644
--- a/go.mod
+++ b/go.mod
@@ -277,7 +277,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/text v0.16.0 // indirect
- golang.org/x/tools v0.23.0 // indirect
+ golang.org/x/tools v0.23.0
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gotest.tools/v3 v3.5.1 // indirect
diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go
new file mode 100644
index 00000000000..d62dcd9b6a1
--- /dev/null
+++ b/rlp/encbuffer.go
@@ -0,0 +1,227 @@
+// Copyright 2014 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package rlp
+
+import (
+ "io"
+ "reflect"
+ "sync"
+)
+
+type encBuffer struct {
+ str []byte // string data, contains everything except list headers
+ lheads []listhead // all list headers
+ lhsize int // sum of sizes of all encoded list headers
+ sizebuf [9]byte // auxiliary buffer for uint encoding
+ bufvalue reflect.Value // used in writeByteArrayCopy
+}
+
+// encbufs are pooled.
+var encBufferPool = sync.Pool{
+ New: func() interface{} {
+ var bytes []byte
+ return &encBuffer{bufvalue: reflect.ValueOf(&bytes).Elem()}
+ },
+}
+
+func (w *encBuffer) reset() {
+ w.lhsize = 0
+ w.str = w.str[:0]
+ w.lheads = w.lheads[:0]
+}
+
+// encBuffer implements io.Writer so it can be passed it into EncodeRLP.
+func (w *encBuffer) Write(b []byte) (int, error) {
+ w.str = append(w.str, b...)
+ return len(b), nil
+}
+
+func (w *encBuffer) encode(val interface{}) error {
+ rval := reflect.ValueOf(val)
+ writer, err := cachedWriter(rval.Type())
+ if err != nil {
+ return err
+ }
+ return writer(rval, w)
+}
+
+func (w *encBuffer) encodeStringHeader(size int) {
+ if size < 56 {
+ w.str = append(w.str, EmptyStringCode+byte(size))
+ } else {
+ sizesize := putint(w.sizebuf[1:], uint64(size))
+ w.sizebuf[0] = 0xB7 + byte(sizesize)
+ w.str = append(w.str, w.sizebuf[:sizesize+1]...)
+ }
+}
+
+func (w *encBuffer) encodeString(b []byte) {
+ if len(b) == 1 && b[0] <= 0x7F {
+ // fits single byte, no string header
+ w.str = append(w.str, b[0])
+ } else {
+ w.encodeStringHeader(len(b))
+ w.str = append(w.str, b...)
+ }
+}
+
+func (w *encBuffer) encodeUint(i uint64) {
+ if i == 0 {
+ w.str = append(w.str, 0x80)
+ } else if i < 128 {
+ // fits single byte
+ w.str = append(w.str, byte(i))
+ } else {
+ s := putint(w.sizebuf[1:], i)
+ w.sizebuf[0] = 0x80 + byte(s)
+ w.str = append(w.str, w.sizebuf[:s+1]...)
+ }
+}
+
+// list adds a new list header to the header stack. It returns the index
+// of the header. The caller must call listEnd with this index after encoding
+// the content of the list.
+func (w *encBuffer) list() int {
+ w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
+ return len(w.lheads) - 1
+}
+
+func (w *encBuffer) listEnd(index int) {
+ lh := &w.lheads[index]
+ lh.size = w.size() - lh.offset - lh.size
+ if lh.size < 56 {
+ w.lhsize++ // length encoded into kind tag
+ } else {
+ w.lhsize += 1 + intsize(uint64(lh.size))
+ }
+}
+
+func (w *encBuffer) size() int {
+ return len(w.str) + w.lhsize
+}
+
+func (w *encBuffer) toBytes() []byte {
+ out := make([]byte, w.size())
+ strpos := 0
+ pos := 0
+ for _, head := range w.lheads {
+ // write string data before header
+ n := copy(out[pos:], w.str[strpos:head.offset])
+ pos += n
+ strpos += n
+ // write the header
+ enc := head.encode(out[pos:])
+ pos += len(enc)
+ }
+ // copy string data after the last list header
+ copy(out[pos:], w.str[strpos:])
+ return out
+}
+
+func (w *encBuffer) toWriter(out io.Writer) (err error) {
+ strpos := 0
+ for _, head := range w.lheads {
+ // write string data before header
+ if head.offset-strpos > 0 {
+ n, nErr := out.Write(w.str[strpos:head.offset])
+ strpos += n
+ if nErr != nil {
+ return nErr
+ }
+ }
+ // write the header
+ enc := head.encode(w.sizebuf[:])
+ if _, wErr := out.Write(enc); wErr != nil {
+ return wErr
+ }
+ }
+ if strpos < len(w.str) {
+ // write string data after the last list header
+ _, err = out.Write(w.str[strpos:])
+ }
+ return err
+}
+
+// encReader is the io.Reader returned by EncodeToReader.
+// It releases its encbuf at EOF.
+type encReader struct {
+ buf *encBuffer // the buffer we're reading from. this is nil when we're at EOF.
+ lhpos int // index of list header that we're reading
+ strpos int // current position in string buffer
+ piece []byte // next piece to be read
+}
+
+func (r *encReader) Read(b []byte) (n int, err error) {
+ for {
+ if r.piece = r.next(); r.piece == nil {
+ // Put the encode buffer back into the pool at EOF when it
+ // is first encountered. Subsequent calls still return EOF
+ // as the error but the buffer is no longer valid.
+ if r.buf != nil {
+ encBufferPool.Put(r.buf)
+ r.buf = nil
+ }
+ return n, io.EOF
+ }
+ nn := copy(b[n:], r.piece)
+ n += nn
+ if nn < len(r.piece) {
+ // piece didn't fit, see you next time.
+ r.piece = r.piece[nn:]
+ return n, nil
+ }
+ r.piece = nil
+ }
+}
+
+// next returns the next piece of data to be read.
+// it returns nil at EOF.
+func (r *encReader) next() []byte {
+ switch {
+ case r.buf == nil:
+ return nil
+
+ case r.piece != nil:
+ // There is still data available for reading.
+ return r.piece
+
+ case r.lhpos < len(r.buf.lheads):
+ // We're before the last list header.
+ head := r.buf.lheads[r.lhpos]
+ sizebefore := head.offset - r.strpos
+ if sizebefore > 0 {
+ // String data before header.
+ p := r.buf.str[r.strpos:head.offset]
+ r.strpos += sizebefore
+ return p
+ }
+ r.lhpos++
+ return head.encode(r.buf.sizebuf[:])
+
+ case r.strpos < len(r.buf.str):
+ // String data at the end, after all list headers.
+ p := r.buf.str[r.strpos:]
+ r.strpos = len(r.buf.str)
+ return p
+
+ default:
+ return nil
+ }
+}
diff --git a/rlp/encode.go b/rlp/encode.go
index 7298c7fbba6..6c8e5e28385 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -21,12 +21,12 @@ package rlp
import (
"encoding/binary"
+ "errors"
"fmt"
"io"
"math/big"
"math/bits"
"reflect"
- "sync"
"github.com/holiman/uint256"
@@ -41,6 +41,8 @@ const (
EmptyListCode = 0xC0
)
+var ErrNegativeBigInt = errors.New("rlp: cannot encode negative big.Int")
+
var (
// Common encoded values.
// These are useful when implementing EncodeRLP.
@@ -68,13 +70,13 @@ type Encoder interface {
//
// Please see package-level documentation of encoding rules.
func Encode(w io.Writer, val interface{}) error {
- if outer, ok := w.(*encbuf); ok {
+ if outer, ok := w.(*encBuffer); ok {
// Encode was called by some type's EncodeRLP.
- // Avoid copying by writing to the outer encbuf directly.
+ // Avoid copying by writing to the outer encBuffer directly.
return outer.encode(val)
}
- eb := encbufPool.Get().(*encbuf)
- defer encbufPool.Put(eb)
+ eb := encBufferPool.Get().(*encBuffer)
+ defer encBufferPool.Put(eb)
eb.reset()
if err := eb.encode(val); err != nil {
return err
@@ -83,9 +85,9 @@ func Encode(w io.Writer, val interface{}) error {
}
func Write(w io.Writer, val []byte) error {
- if outer, ok := w.(*encbuf); ok {
+ if outer, ok := w.(*encBuffer); ok {
// Encode was called by some type's EncodeRLP.
- // Avoid copying by writing to the outer encbuf directly.
+ // Avoid copying by writing to the outer encBuffer directly.
_, err := outer.Write(val)
return err
}
@@ -97,8 +99,8 @@ func Write(w io.Writer, val []byte) error {
// EncodeToBytes returns the RLP encoding of val.
// Please see package-level documentation for the encoding rules.
func EncodeToBytes(val interface{}) ([]byte, error) {
- eb := encbufPool.Get().(*encbuf)
- defer encbufPool.Put(eb)
+ eb := encBufferPool.Get().(*encBuffer)
+ defer encBufferPool.Put(eb)
eb.reset()
if err := eb.encode(val); err != nil {
return nil, err
@@ -112,7 +114,7 @@ func EncodeToBytes(val interface{}) ([]byte, error) {
//
// Please see the documentation of Encode for the encoding rules.
func EncodeToReader(val interface{}) (size int, r io.Reader, err error) {
- eb := encbufPool.Get().(*encbuf)
+ eb := encBufferPool.Get().(*encBuffer)
eb.reset()
if err := eb.encode(val); err != nil {
return 0, nil, err
@@ -152,207 +154,6 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
return sizesize + 1
}
-type encbuf struct {
- str []byte // string data, contains everything except list headers
- lheads []listhead // all list headers
- lhsize int // sum of sizes of all encoded list headers
- sizebuf [9]byte // auxiliary buffer for uint encoding
- bufvalue reflect.Value // used in writeByteArrayCopy
-}
-
-// encbufs are pooled.
-var encbufPool = sync.Pool{
- New: func() interface{} {
- var bytes []byte
- return &encbuf{bufvalue: reflect.ValueOf(&bytes).Elem()}
- },
-}
-
-func (w *encbuf) reset() {
- w.lhsize = 0
- w.str = w.str[:0]
- w.lheads = w.lheads[:0]
-}
-
-// encbuf implements io.Writer so it can be passed it into EncodeRLP.
-func (w *encbuf) Write(b []byte) (int, error) {
- w.str = append(w.str, b...)
- return len(b), nil
-}
-
-func (w *encbuf) encode(val interface{}) error {
- rval := reflect.ValueOf(val)
- writer, err := cachedWriter(rval.Type())
- if err != nil {
- return err
- }
- return writer(rval, w)
-}
-
-func (w *encbuf) encodeStringHeader(size int) {
- if size < 56 {
- w.str = append(w.str, EmptyStringCode+byte(size))
- } else {
- sizesize := putint(w.sizebuf[1:], uint64(size))
- w.sizebuf[0] = 0xB7 + byte(sizesize)
- w.str = append(w.str, w.sizebuf[:sizesize+1]...)
- }
-}
-
-func (w *encbuf) encodeString(b []byte) {
- if len(b) == 1 && b[0] <= 0x7F {
- // fits single byte, no string header
- w.str = append(w.str, b[0])
- } else {
- w.encodeStringHeader(len(b))
- w.str = append(w.str, b...)
- }
-}
-
-func (w *encbuf) encodeUint(i uint64) {
- if i == 0 {
- w.str = append(w.str, 0x80)
- } else if i < 128 {
- // fits single byte
- w.str = append(w.str, byte(i))
- } else {
- s := putint(w.sizebuf[1:], i)
- w.sizebuf[0] = 0x80 + byte(s)
- w.str = append(w.str, w.sizebuf[:s+1]...)
- }
-}
-
-// list adds a new list header to the header stack. It returns the index
-// of the header. The caller must call listEnd with this index after encoding
-// the content of the list.
-func (w *encbuf) list() int {
- w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize})
- return len(w.lheads) - 1
-}
-
-func (w *encbuf) listEnd(index int) {
- lh := &w.lheads[index]
- lh.size = w.size() - lh.offset - lh.size
- if lh.size < 56 {
- w.lhsize++ // length encoded into kind tag
- } else {
- w.lhsize += 1 + intsize(uint64(lh.size))
- }
-}
-
-func (w *encbuf) size() int {
- return len(w.str) + w.lhsize
-}
-
-func (w *encbuf) toBytes() []byte {
- out := make([]byte, w.size())
- strpos := 0
- pos := 0
- for _, head := range w.lheads {
- // write string data before header
- n := copy(out[pos:], w.str[strpos:head.offset])
- pos += n
- strpos += n
- // write the header
- enc := head.encode(out[pos:])
- pos += len(enc)
- }
- // copy string data after the last list header
- copy(out[pos:], w.str[strpos:])
- return out
-}
-
-func (w *encbuf) toWriter(out io.Writer) (err error) {
- strpos := 0
- for _, head := range w.lheads {
- // write string data before header
- if head.offset-strpos > 0 {
- n, nErr := out.Write(w.str[strpos:head.offset])
- strpos += n
- if nErr != nil {
- return nErr
- }
- }
- // write the header
- enc := head.encode(w.sizebuf[:])
- if _, wErr := out.Write(enc); wErr != nil {
- return wErr
- }
- }
- if strpos < len(w.str) {
- // write string data after the last list header
- _, err = out.Write(w.str[strpos:])
- }
- return err
-}
-
-// encReader is the io.Reader returned by EncodeToReader.
-// It releases its encbuf at EOF.
-type encReader struct {
- buf *encbuf // the buffer we're reading from. this is nil when we're at EOF.
- lhpos int // index of list header that we're reading
- strpos int // current position in string buffer
- piece []byte // next piece to be read
-}
-
-func (r *encReader) Read(b []byte) (n int, err error) {
- for {
- if r.piece = r.next(); r.piece == nil {
- // Put the encode buffer back into the pool at EOF when it
- // is first encountered. Subsequent calls still return EOF
- // as the error but the buffer is no longer valid.
- if r.buf != nil {
- encbufPool.Put(r.buf)
- r.buf = nil
- }
- return n, io.EOF
- }
- nn := copy(b[n:], r.piece)
- n += nn
- if nn < len(r.piece) {
- // piece didn't fit, see you next time.
- r.piece = r.piece[nn:]
- return n, nil
- }
- r.piece = nil
- }
-}
-
-// next returns the next piece of data to be read.
-// it returns nil at EOF.
-func (r *encReader) next() []byte {
- switch {
- case r.buf == nil:
- return nil
-
- case r.piece != nil:
- // There is still data available for reading.
- return r.piece
-
- case r.lhpos < len(r.buf.lheads):
- // We're before the last list header.
- head := r.buf.lheads[r.lhpos]
- sizebefore := head.offset - r.strpos
- if sizebefore > 0 {
- // String data before header.
- p := r.buf.str[r.strpos:head.offset]
- r.strpos += sizebefore
- return p
- }
- r.lhpos++
- return head.encode(r.buf.sizebuf[:])
-
- case r.strpos < len(r.buf.str):
- // String data at the end, after all list headers.
- p := r.buf.str[r.strpos:]
- r.strpos = len(r.buf.str)
- return p
-
- default:
- return nil
- }
-}
-
var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
// makeWriter creates a writer function for the given type.
@@ -394,17 +195,17 @@ func makeWriter(typ reflect.Type, ts tags) (writer, error) {
}
}
-func writeRawValue(val reflect.Value, w *encbuf) error {
+func writeRawValue(val reflect.Value, w *encBuffer) error {
w.str = append(w.str, val.Bytes()...)
return nil
}
-func writeUint(val reflect.Value, w *encbuf) error {
+func writeUint(val reflect.Value, w *encBuffer) error {
w.encodeUint(val.Uint())
return nil
}
-func writeBool(val reflect.Value, w *encbuf) error {
+func writeBool(val reflect.Value, w *encBuffer) error {
if val.Bool() {
w.str = append(w.str, 0x01)
} else {
@@ -413,7 +214,7 @@ func writeBool(val reflect.Value, w *encbuf) error {
return nil
}
-func writeBigIntPtr(val reflect.Value, w *encbuf) error {
+func writeBigIntPtr(val reflect.Value, w *encBuffer) error {
ptr := val.Interface().(*big.Int)
if ptr == nil {
w.str = append(w.str, EmptyStringCode)
@@ -422,7 +223,7 @@ func writeBigIntPtr(val reflect.Value, w *encbuf) error {
return writeBigInt(ptr, w)
}
-func writeBigIntNoPtr(val reflect.Value, w *encbuf) error {
+func writeBigIntNoPtr(val reflect.Value, w *encBuffer) error {
i := val.Interface().(big.Int)
return writeBigInt(&i, w)
}
@@ -430,7 +231,7 @@ func writeBigIntNoPtr(val reflect.Value, w *encbuf) error {
// wordBytes is the number of bytes in a big.Word
const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
-func writeBigInt(i *big.Int, w *encbuf) error {
+func writeBigInt(i *big.Int, w *encBuffer) error {
if i.Sign() == -1 {
return fmt.Errorf("rlp: cannot encode negative *big.Int")
}
@@ -457,7 +258,7 @@ func writeBigInt(i *big.Int, w *encbuf) error {
return nil
}
-func writeUint256Ptr(val reflect.Value, w *encbuf) error {
+func writeUint256Ptr(val reflect.Value, w *encBuffer) error {
ptr := val.Interface().(*uint256.Int)
if ptr == nil {
w.str = append(w.str, EmptyStringCode)
@@ -466,12 +267,12 @@ func writeUint256Ptr(val reflect.Value, w *encbuf) error {
return writeUint256(ptr, w)
}
-func writeUint256NoPtr(val reflect.Value, w *encbuf) error {
+func writeUint256NoPtr(val reflect.Value, w *encBuffer) error {
i := val.Interface().(uint256.Int)
return writeUint256(&i, w)
}
-func writeUint256(i *uint256.Int, w *encbuf) error {
+func writeUint256(i *uint256.Int, w *encBuffer) error {
if i.IsZero() {
w.str = append(w.str, EmptyStringCode)
} else if i.LtUint64(0x80) {
@@ -486,7 +287,7 @@ func writeUint256(i *uint256.Int, w *encbuf) error {
return nil
}
-func writeBytes(val reflect.Value, w *encbuf) error {
+func writeBytes(val reflect.Value, w *encBuffer) error {
w.encodeString(val.Bytes())
return nil
}
@@ -503,18 +304,18 @@ func makeByteArrayWriter(typ reflect.Type) writer {
if typ.Elem() != byteType {
return writeNamedByteArray
}
- return func(val reflect.Value, w *encbuf) error {
+ return func(val reflect.Value, w *encBuffer) error {
writeByteArrayCopy(length, val, w)
return nil
}
}
-func writeLengthZeroByteArray(val reflect.Value, w *encbuf) error {
+func writeLengthZeroByteArray(val reflect.Value, w *encBuffer) error {
w.str = append(w.str, 0x80)
return nil
}
-func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
+func writeLengthOneByteArray(val reflect.Value, w *encBuffer) error {
b := byte(val.Index(0).Uint())
if b <= 0x7f {
w.str = append(w.str, b)
@@ -526,7 +327,7 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
// writeByteArrayCopy encodes byte arrays using reflect.Copy. This is
// the fast path for [N]byte where N > 1.
-func writeByteArrayCopy(length int, val reflect.Value, w *encbuf) {
+func writeByteArrayCopy(length int, val reflect.Value, w *encBuffer) {
w.encodeStringHeader(length)
offset := len(w.str)
w.str = append(w.str, make([]byte, length)...)
@@ -536,7 +337,7 @@ func writeByteArrayCopy(length int, val reflect.Value, w *encbuf) {
// writeNamedByteArray encodes byte arrays with named element type.
// This exists because reflect.Copy can't be used with such types.
-func writeNamedByteArray(val reflect.Value, w *encbuf) error {
+func writeNamedByteArray(val reflect.Value, w *encBuffer) error {
if !val.CanAddr() {
// Slice requires the value to be addressable.
// Make it addressable by copying.
@@ -550,7 +351,7 @@ func writeNamedByteArray(val reflect.Value, w *encbuf) error {
return nil
}
-func writeString(val reflect.Value, w *encbuf) error {
+func writeString(val reflect.Value, w *encBuffer) error {
s := val.String()
if len(s) == 1 && s[0] <= 0x7f {
// fits single byte, no string header
@@ -562,7 +363,7 @@ func writeString(val reflect.Value, w *encbuf) error {
return nil
}
-func writeInterface(val reflect.Value, w *encbuf) error {
+func writeInterface(val reflect.Value, w *encBuffer) error {
if val.IsNil() {
// Write empty list. This is consistent with the previous RLP
// encoder that we had and should therefore avoid any
@@ -583,7 +384,7 @@ func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
- writer := func(val reflect.Value, w *encbuf) error {
+ writer := func(val reflect.Value, w *encBuffer) error {
if !ts.tail {
defer w.listEnd(w.list())
}
@@ -613,7 +414,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
firstOptionalField := firstOptionalField(fields)
if firstOptionalField == len(fields) {
// This is the writer function for structs without any optional fields.
- writer = func(val reflect.Value, w *encbuf) error {
+ writer = func(val reflect.Value, w *encBuffer) error {
lh := w.list()
for _, f := range fields {
if err := f.info.writer(val.Field(f.index), w); err != nil {
@@ -626,7 +427,7 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
} else {
// If there are any "optional" fields, the writer needs to perform additional
// checks to determine the output list length.
- writer = func(val reflect.Value, w *encbuf) error {
+ writer = func(val reflect.Value, w *encBuffer) error {
lastField := len(fields) - 1
for ; lastField >= firstOptionalField; lastField-- {
if !val.Field(fields[lastField].index).IsZero() {
@@ -659,7 +460,7 @@ func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
nilKind = defaultNilKind(typ.Elem())
}
- writer := func(val reflect.Value, w *encbuf) error {
+ writer := func(val reflect.Value, w *encBuffer) error {
if val.IsNil() {
if nilKind == String {
w.str = append(w.str, EmptyStringCode)
@@ -675,11 +476,11 @@ func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
func makeEncoderWriter(typ reflect.Type) writer {
if typ.Implements(encoderInterface) {
- return func(val reflect.Value, w *encbuf) error {
+ return func(val reflect.Value, w *encBuffer) error {
return val.Interface().(Encoder).EncodeRLP(w)
}
}
- w := func(val reflect.Value, w *encbuf) error {
+ w := func(val reflect.Value, w *encBuffer) error {
if !val.CanAddr() {
// package json simply doesn't call MarshalJSON for this case, but encodes the
// value as if it didn't implement the interface. We don't want to handle it that
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index aeb9caba9a9..89066b846f6 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -453,7 +453,7 @@ func TestEncodeToReaderPiecewise(t *testing.T) {
}
// This is a regression test verifying that encReader
-// returns its encbuf to the pool only once.
+// returns its encBuffer to the pool only once.
func TestEncodeToReaderReturnToPool(t *testing.T) {
buf := make([]byte, 50)
wg := new(sync.WaitGroup)
diff --git a/rlp/internal/rlpstruct/rlpstruct.go b/rlp/internal/rlpstruct/rlpstruct.go
new file mode 100644
index 00000000000..b5e9e0aa7b9
--- /dev/null
+++ b/rlp/internal/rlpstruct/rlpstruct.go
@@ -0,0 +1,213 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package rlpstruct implements struct processing for RLP encoding/decoding.
+//
+// In particular, this package handles all rules around field filtering,
+// struct tags and nil value determination.
+package rlpstruct
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Field represents a struct field.
+type Field struct {
+ Name string
+ Index int
+ Exported bool
+ Type Type
+ Tag string
+}
+
+// Type represents the attributes of a Go type.
+type Type struct {
+ Name string
+ Kind reflect.Kind
+ IsEncoder bool // whether type implements rlp.Encoder
+ IsDecoder bool // whether type implements rlp.Decoder
+ Elem *Type // non-nil for Kind values of Ptr, Slice, Array
+}
+
+// DefaultNilValue determines whether a nil pointer to t encodes/decodes
+// as an empty string or empty list.
+func (t Type) DefaultNilValue() NilKind {
+ k := t.Kind
+ if isUint(k) || k == reflect.String || k == reflect.Bool || isByteArray(t) {
+ return NilKindString
+ }
+ return NilKindList
+}
+
+// NilKind is the RLP value encoded in place of nil pointers.
+type NilKind uint8
+
+const (
+ NilKindString NilKind = 0x80
+ NilKindList NilKind = 0xC0
+)
+
+// Tags represents struct tags.
+type Tags struct {
+ // rlp:"nil" controls whether empty input results in a nil pointer.
+ // nilKind is the kind of empty value allowed for the field.
+ NilKind NilKind
+ NilOK bool
+
+ // rlp:"optional" allows for a field to be missing in the input list.
+ // If this is set, all subsequent fields must also be optional.
+ Optional bool
+
+ // rlp:"tail" controls whether this field swallows additional list elements. It can
+ // only be set for the last field, which must be of slice type.
+ Tail bool
+
+ // rlp:"-" ignores fields.
+ Ignored bool
+}
+
+// TagError is raised for invalid struct tags.
+type TagError struct {
+ StructType string
+
+ // These are set by this package.
+ Field string
+ Tag string
+ Err string
+}
+
+func (e TagError) Error() string {
+ field := "field " + e.Field
+ if e.StructType != "" {
+ field = e.StructType + "." + e.Field
+ }
+ return fmt.Sprintf("rlp: invalid struct tag %q for %s (%s)", e.Tag, field, e.Err)
+}
+
+// ProcessFields filters the given struct fields, returning only fields
+// that should be considered for encoding/decoding.
+func ProcessFields(allFields []Field) ([]Field, []Tags, error) {
+ lastPublic := lastPublicField(allFields)
+
+ // Gather all exported fields and their tags.
+ var fields []Field //nolint:prealloc
+ var tags []Tags //nolint:prealloc
+ for _, field := range allFields {
+ if !field.Exported {
+ continue
+ }
+ ts, err := parseTag(field, lastPublic)
+ if err != nil {
+ return nil, nil, err
+ }
+ if ts.Ignored {
+ continue
+ }
+ fields = append(fields, field)
+ tags = append(tags, ts)
+ }
+
+ // Verify optional field consistency. If any optional field exists,
+ // all fields after it must also be optional. Note: optional + tail
+ // is supported.
+ var anyOptional bool
+ var firstOptionalName string
+ for i, ts := range tags {
+ name := fields[i].Name
+ if ts.Optional || ts.Tail {
+ if !anyOptional {
+ firstOptionalName = name
+ }
+ anyOptional = true
+ } else {
+ if anyOptional {
+ msg := fmt.Sprintf("must be optional because preceding field %q is optional", firstOptionalName)
+ return nil, nil, TagError{Field: name, Err: msg}
+ }
+ }
+ }
+ return fields, tags, nil
+}
+
+func parseTag(field Field, lastPublic int) (Tags, error) {
+ name := field.Name
+ tag := reflect.StructTag(field.Tag)
+ var ts Tags
+ for _, t := range strings.Split(tag.Get("rlp"), ",") {
+ switch t = strings.TrimSpace(t); t {
+ case "":
+ // empty tag is allowed for some reason
+ case "-":
+ ts.Ignored = true
+ case "nil", "nilString", "nilList":
+ ts.NilOK = true
+ if field.Type.Kind != reflect.Ptr {
+ return ts, TagError{Field: name, Tag: t, Err: "field is not a pointer"}
+ }
+ switch t {
+ case "nil":
+ ts.NilKind = field.Type.Elem.DefaultNilValue()
+ case "nilString":
+ ts.NilKind = NilKindString
+ case "nilList":
+ ts.NilKind = NilKindList
+ }
+ case "optional":
+ ts.Optional = true
+ if ts.Tail {
+ return ts, TagError{Field: name, Tag: t, Err: `also has "tail" tag`}
+ }
+ case "tail":
+ ts.Tail = true
+ if field.Index != lastPublic {
+ return ts, TagError{Field: name, Tag: t, Err: "must be on last field"}
+ }
+ if ts.Optional {
+ return ts, TagError{Field: name, Tag: t, Err: `also has "optional" tag`}
+ }
+ if field.Type.Kind != reflect.Slice {
+ return ts, TagError{Field: name, Tag: t, Err: "field type is not slice"}
+ }
+ default:
+ return ts, TagError{Field: name, Tag: t, Err: "unknown tag"}
+ }
+ }
+ return ts, nil
+}
+
+func lastPublicField(fields []Field) int {
+ last := 0
+ for _, f := range fields {
+ if f.Exported {
+ last = f.Index
+ }
+ }
+ return last
+}
+
+func isUint(k reflect.Kind) bool {
+ return k >= reflect.Uint && k <= reflect.Uintptr
+}
+
+func isByte(typ Type) bool {
+ return typ.Kind == reflect.Uint8 && !typ.IsEncoder
+}
+
+func isByteArray(typ Type) bool {
+ return (typ.Kind == reflect.Slice || typ.Kind == reflect.Array) && isByte(*typ.Elem)
+}
diff --git a/rlp/rlpgen/gen.go b/rlp/rlpgen/gen.go
new file mode 100644
index 00000000000..9c53943db72
--- /dev/null
+++ b/rlp/rlpgen/gen.go
@@ -0,0 +1,803 @@
+// Copyright 2014 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/types"
+ "sort"
+
+ "github.com/ledgerwatch/erigon/rlp/internal/rlpstruct"
+)
+
+// buildContext keeps the data needed for make*Op.
+type buildContext struct {
+ topType *types.Named // the type we're creating methods for
+
+ encoderIface *types.Interface
+ decoderIface *types.Interface
+ rawValueType *types.Named
+
+ typeToStructCache map[types.Type]*rlpstruct.Type
+}
+
+func newBuildContext(packageRLP *types.Package) *buildContext {
+ enc := packageRLP.Scope().Lookup("Encoder").Type().Underlying()
+ dec := packageRLP.Scope().Lookup("Decoder").Type().Underlying()
+ rawv := packageRLP.Scope().Lookup("RawValue").Type()
+ return &buildContext{
+ typeToStructCache: make(map[types.Type]*rlpstruct.Type),
+ encoderIface: enc.(*types.Interface),
+ decoderIface: dec.(*types.Interface),
+ rawValueType: rawv.(*types.Named),
+ }
+}
+
+func (bctx *buildContext) isEncoder(typ types.Type) bool {
+ return types.Implements(typ, bctx.encoderIface)
+}
+
+func (bctx *buildContext) isDecoder(typ types.Type) bool {
+ return types.Implements(typ, bctx.decoderIface)
+}
+
+// typeToStructType converts typ to rlpstruct.Type.
+func (bctx *buildContext) typeToStructType(typ types.Type) *rlpstruct.Type {
+ if prev := bctx.typeToStructCache[typ]; prev != nil {
+ return prev // short-circuit for recursive types.
+ }
+
+ // Resolve named types to their underlying type, but keep the name.
+ name := types.TypeString(typ, nil)
+ for {
+ utype := typ.Underlying()
+ if utype == typ {
+ break
+ }
+ typ = utype
+ }
+
+ // Create the type and store it in cache.
+ t := &rlpstruct.Type{
+ Name: name,
+ Kind: typeReflectKind(typ),
+ IsEncoder: bctx.isEncoder(typ),
+ IsDecoder: bctx.isDecoder(typ),
+ }
+ bctx.typeToStructCache[typ] = t
+
+ // Assign element type.
+ switch typ.(type) {
+ case *types.Array, *types.Slice, *types.Pointer:
+ etype := typ.(interface{ Elem() types.Type }).Elem()
+ t.Elem = bctx.typeToStructType(etype)
+ }
+ return t
+}
+
+// genContext is passed to the gen* methods of op when generating
+// the output code. It tracks packages to be imported by the output
+// file and assigns unique names of temporary variables.
+type genContext struct {
+ inPackage *types.Package
+ imports map[string]struct{}
+ tempCounter int
+}
+
+func newGenContext(inPackage *types.Package) *genContext {
+ return &genContext{
+ inPackage: inPackage,
+ imports: make(map[string]struct{}),
+ }
+}
+
+func (ctx *genContext) temp() string {
+ v := fmt.Sprintf("_tmp%d", ctx.tempCounter)
+ ctx.tempCounter++
+ return v
+}
+
+func (ctx *genContext) resetTemp() {
+ ctx.tempCounter = 0
+}
+
+func (ctx *genContext) addImport(path string) {
+ if path == ctx.inPackage.Path() {
+ return // avoid importing the package that we're generating in.
+ }
+ // TODO: renaming?
+ ctx.imports[path] = struct{}{}
+}
+
+// importsList returns all packages that need to be imported.
+func (ctx *genContext) importsList() []string {
+ imp := make([]string, 0, len(ctx.imports))
+ for k := range ctx.imports {
+ imp = append(imp, k)
+ }
+ sort.Strings(imp)
+ return imp
+}
+
+// qualify is the types.Qualifier used for printing types.
+func (ctx *genContext) qualify(pkg *types.Package) string {
+ if pkg.Path() == ctx.inPackage.Path() {
+ return ""
+ }
+ ctx.addImport(pkg.Path())
+ // TODO: renaming?
+ return pkg.Name()
+}
+
+type op interface {
+ // genWrite creates the encoder. The generated code should write v,
+ // which is any Go expression, to the rlp.EncoderBuffer 'w'.
+ genWrite(ctx *genContext, v string) string
+
+ // genDecode creates the decoder. The generated code should read
+ // a value from the rlp.Stream 'dec' and store it to dst.
+ genDecode(ctx *genContext) (string, string)
+}
+
+// basicOp handles basic types bool, uint*, string.
+type basicOp struct {
+ typ types.Type
+ writeMethod string // EncoderBuffer writer method name
+ writeArgType types.Type // parameter type of writeMethod
+ decMethod string
+ decResultType types.Type // return type of decMethod
+ decUseBitSize bool // if true, result bit size is appended to decMethod
+}
+
+func (*buildContext) makeBasicOp(typ *types.Basic) (op, error) {
+ op := basicOp{typ: typ}
+ kind := typ.Kind()
+ switch {
+ case kind == types.Bool:
+ op.writeMethod = "WriteBool"
+ op.writeArgType = types.Typ[types.Bool]
+ op.decMethod = "Bool"
+ op.decResultType = types.Typ[types.Bool]
+ case kind >= types.Uint8 && kind <= types.Uint64:
+ op.writeMethod = "WriteUint64"
+ op.writeArgType = types.Typ[types.Uint64]
+ op.decMethod = "Uint"
+ op.decResultType = typ
+ op.decUseBitSize = true
+ case kind == types.String:
+ op.writeMethod = "WriteString"
+ op.writeArgType = types.Typ[types.String]
+ op.decMethod = "String"
+ op.decResultType = types.Typ[types.String]
+ default:
+ return nil, fmt.Errorf("unhandled basic type: %v", typ)
+ }
+ return op, nil
+}
+
+func (*buildContext) makeByteSliceOp(typ *types.Slice) op {
+ if !isByte(typ.Elem()) {
+ panic("non-byte slice type in makeByteSliceOp")
+ }
+ bslice := types.NewSlice(types.Typ[types.Uint8])
+ return basicOp{
+ typ: typ,
+ writeMethod: "WriteBytes",
+ writeArgType: bslice,
+ decMethod: "Bytes",
+ decResultType: bslice,
+ }
+}
+
+func (bctx *buildContext) makeRawValueOp() op {
+ bslice := types.NewSlice(types.Typ[types.Uint8])
+ return basicOp{
+ typ: bctx.rawValueType,
+ writeMethod: "Write",
+ writeArgType: bslice,
+ decMethod: "Raw",
+ decResultType: bslice,
+ }
+}
+
+func (op basicOp) writeNeedsConversion() bool {
+ return !types.AssignableTo(op.typ, op.writeArgType)
+}
+
+func (op basicOp) decodeNeedsConversion() bool {
+ return !types.AssignableTo(op.decResultType, op.typ)
+}
+
+func (op basicOp) genWrite(ctx *genContext, v string) string {
+ if op.writeNeedsConversion() {
+ v = fmt.Sprintf("%s(%s)", op.writeArgType, v)
+ }
+ return fmt.Sprintf("w.%s(%s)\n", op.writeMethod, v)
+}
+
+func (op basicOp) genDecode(ctx *genContext) (string, string) {
+ var (
+ resultV = ctx.temp()
+ result = resultV
+ method = op.decMethod
+ )
+ if op.decUseBitSize {
+ // Note: For now, this only works for platform-independent integer
+ // sizes. makeBasicOp forbids the platform-dependent types.
+ var sizes types.StdSizes
+ method = fmt.Sprintf("%s%d", op.decMethod, sizes.Sizeof(op.typ)*8)
+ }
+
+ // Call the decoder method.
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s, err := dec.%s()\n", resultV, method)
+ fmt.Fprintf(&b, "if err != nil { return err }\n")
+ if op.decodeNeedsConversion() {
+ conv := ctx.temp()
+ fmt.Fprintf(&b, "%s := %s(%s)\n", conv, types.TypeString(op.typ, ctx.qualify), resultV)
+ result = conv
+ }
+ return result, b.String()
+}
+
+// byteArrayOp handles [...]byte.
+type byteArrayOp struct {
+ typ types.Type
+ name types.Type // name != typ for named byte array types (e.g. common.Address)
+}
+
+func (bctx *buildContext) makeByteArrayOp(name *types.Named, typ *types.Array) byteArrayOp {
+ nt := types.Type(name)
+ if name == nil {
+ nt = typ
+ }
+ return byteArrayOp{typ, nt}
+}
+
+func (op byteArrayOp) genWrite(ctx *genContext, v string) string {
+ return fmt.Sprintf("w.WriteBytes(%s[:])\n", v)
+}
+
+func (op byteArrayOp) genDecode(ctx *genContext) (string, string) {
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(op.name, ctx.qualify))
+ fmt.Fprintf(&b, "if err := dec.ReadBytes(%s[:]); err != nil { return err }\n", resultV)
+ return resultV, b.String()
+}
+
+// bigIntOp handles big.Int.
+// This exists because big.Int has it's own decoder operation on rlp.Stream,
+// but the decode method returns *big.Int, so it needs to be dereferenced.
+type bigIntOp struct {
+ pointer bool
+}
+
+func (op bigIntOp) genWrite(ctx *genContext, v string) string {
+ var b bytes.Buffer
+
+ fmt.Fprintf(&b, "if %s.Sign() == -1 {\n", v)
+ fmt.Fprintf(&b, " return rlp.ErrNegativeBigInt\n")
+ fmt.Fprintf(&b, "}\n")
+ dst := v
+ if !op.pointer {
+ dst = "&" + v
+ }
+ fmt.Fprintf(&b, "w.WriteBigInt(%s)\n", dst)
+
+ // Wrap with nil check.
+ if op.pointer {
+ code := b.String()
+ b.Reset()
+ fmt.Fprintf(&b, "if %s == nil {\n", v)
+ fmt.Fprintf(&b, " w.Write(rlp.EmptyString)")
+ fmt.Fprintf(&b, "} else {\n")
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, "}\n")
+ }
+
+ return b.String()
+}
+
+func (op bigIntOp) genDecode(ctx *genContext) (string, string) {
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s, err := dec.BigInt()\n", resultV)
+ fmt.Fprintf(&b, "if err != nil { return err }\n")
+
+ result := resultV
+ if !op.pointer {
+ result = "(*" + resultV + ")"
+ }
+ return result, b.String()
+}
+
+// uint256Op handles "github.com/holiman/uint256".Int
+type uint256Op struct {
+ pointer bool
+}
+
+func (op uint256Op) genWrite(ctx *genContext, v string) string {
+ var b bytes.Buffer
+
+ dst := v
+ if !op.pointer {
+ dst = "&" + v
+ }
+ fmt.Fprintf(&b, "w.WriteUint256(%s)\n", dst)
+
+ // Wrap with nil check.
+ if op.pointer {
+ code := b.String()
+ b.Reset()
+ fmt.Fprintf(&b, "if %s == nil {\n", v)
+ fmt.Fprintf(&b, " w.Write(rlp.EmptyString)")
+ fmt.Fprintf(&b, "} else {\n")
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, "}\n")
+ }
+
+ return b.String()
+}
+
+func (op uint256Op) genDecode(ctx *genContext) (string, string) {
+ ctx.addImport("github.com/holiman/uint256")
+
+ var b bytes.Buffer
+ resultV := ctx.temp()
+ fmt.Fprintf(&b, "var %s uint256.Int\n", resultV)
+ fmt.Fprintf(&b, "if err := dec.ReadUint256(&%s); err != nil { return err }\n", resultV)
+
+ result := resultV
+ if op.pointer {
+ result = "&" + resultV
+ }
+ return result, b.String()
+}
+
+// encoderDecoderOp handles rlp.Encoder and rlp.Decoder.
+// In order to be used with this, the type must implement both interfaces.
+// This restriction may be lifted in the future by creating separate ops for
+// encoding and decoding.
+type encoderDecoderOp struct {
+ typ types.Type
+}
+
+func (op encoderDecoderOp) genWrite(ctx *genContext, v string) string {
+ return fmt.Sprintf("if err := %s.EncodeRLP(w); err != nil { return err }\n", v)
+}
+
+func (op encoderDecoderOp) genDecode(ctx *genContext) (string, string) {
+ // DecodeRLP must have pointer receiver, and this is verified in makeOp.
+ etyp := op.typ.(*types.Pointer).Elem()
+ var resultV = ctx.temp()
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s := new(%s)\n", resultV, types.TypeString(etyp, ctx.qualify))
+ fmt.Fprintf(&b, "if err := %s.DecodeRLP(dec); err != nil { return err }\n", resultV)
+ return resultV, b.String()
+}
+
+// ptrOp handles pointer types.
+type ptrOp struct {
+ elemTyp types.Type
+ elem op
+ nilOK bool
+ nilValue rlpstruct.NilKind
+}
+
+func (bctx *buildContext) makePtrOp(elemTyp types.Type, tags rlpstruct.Tags) (op, error) {
+ elemOp, err := bctx.makeOp(nil, elemTyp, rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+ op := ptrOp{elemTyp: elemTyp, elem: elemOp}
+
+ // Determine nil value.
+ if tags.NilOK {
+ op.nilOK = true
+ op.nilValue = tags.NilKind
+ } else {
+ styp := bctx.typeToStructType(elemTyp)
+ op.nilValue = styp.DefaultNilValue()
+ }
+ return op, nil
+}
+
+func (op ptrOp) genWrite(ctx *genContext, v string) string {
+ // Note: in writer functions, accesses to v are read-only, i.e. v is any Go
+ // expression. To make all accesses work through the pointer, we substitute
+ // v with (*v). This is required for most accesses including `v`, `call(v)`,
+ // and `v[index]` on slices.
+ //
+ // For `v.field` and `v[:]` on arrays, the dereference operation is not required.
+ var vv string
+ _, isStruct := op.elem.(structOp)
+ _, isByteArray := op.elem.(byteArrayOp)
+ if isStruct || isByteArray {
+ vv = v
+ } else {
+ vv = fmt.Sprintf("(*%s)", v)
+ }
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "if %s == nil {\n", v)
+ fmt.Fprintf(&b, " w.Write([]byte{0x%X})\n", op.nilValue)
+ fmt.Fprintf(&b, "} else {\n")
+ fmt.Fprintf(&b, " %s", op.elem.genWrite(ctx, vv))
+ fmt.Fprintf(&b, "}\n")
+ return b.String()
+}
+
+func (op ptrOp) genDecode(ctx *genContext) (string, string) {
+ result, code := op.elem.genDecode(ctx)
+ if !op.nilOK {
+ // If nil pointers are not allowed, we can just decode the element.
+ return "&" + result, code
+ }
+
+ // nil is allowed, so check the kind and size first.
+ // If size is zero and kind matches the nilKind of the type,
+ // the value decodes as a nil pointer.
+ var (
+ resultV = ctx.temp()
+ kindV = ctx.temp()
+ sizeV = ctx.temp()
+ wantKind string
+ )
+ if op.nilValue == rlpstruct.NilKindList {
+ wantKind = "rlp.List"
+ } else {
+ wantKind = "rlp.String"
+ }
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, types.TypeString(types.NewPointer(op.elemTyp), ctx.qualify))
+ fmt.Fprintf(&b, "if %s, %s, err := dec.Kind(); err != nil {\n", kindV, sizeV)
+ fmt.Fprintf(&b, " return err\n")
+ fmt.Fprintf(&b, "} else if %s != 0 || %s != %s {\n", sizeV, kindV, wantKind)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, " %s = &%s\n", resultV, result)
+ fmt.Fprintf(&b, "}\n")
+ return resultV, b.String()
+}
+
+// structOp handles struct types.
+type structOp struct {
+ named *types.Named
+ typ *types.Struct
+ fields []*structField
+ optionalFields []*structField
+}
+
+type structField struct {
+ name string
+ typ types.Type
+ elem op
+}
+
+func (bctx *buildContext) makeStructOp(named *types.Named, typ *types.Struct) (op, error) {
+ // Convert fields to []rlpstruct.Field.
+ var allStructFields []rlpstruct.Field
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ allStructFields = append(allStructFields, rlpstruct.Field{
+ Name: f.Name(),
+ Exported: f.Exported(),
+ Index: i,
+ Tag: typ.Tag(i),
+ Type: *bctx.typeToStructType(f.Type()),
+ })
+ }
+
+ // Filter/validate fields.
+ fields, tags, err := rlpstruct.ProcessFields(allStructFields)
+ if err != nil {
+ return nil, err
+ }
+
+ // Create field ops.
+ var op = structOp{named: named, typ: typ}
+ for i, field := range fields {
+ // Advanced struct tags are not supported yet.
+ tag := tags[i]
+ if err := checkUnsupportedTags(field.Name, tag); err != nil {
+ return nil, err
+ }
+ typ := typ.Field(field.Index).Type()
+ elem, err := bctx.makeOp(nil, typ, tags[i])
+ if err != nil {
+ return nil, fmt.Errorf("field %s: %v", field.Name, err)
+ }
+ f := &structField{name: field.Name, typ: typ, elem: elem}
+ if tag.Optional {
+ op.optionalFields = append(op.optionalFields, f)
+ } else {
+ op.fields = append(op.fields, f)
+ }
+ }
+ return op, nil
+}
+
+func checkUnsupportedTags(field string, tag rlpstruct.Tags) error {
+ if tag.Tail {
+ return fmt.Errorf(`field %s has unsupported struct tag "tail"`, field)
+ }
+ return nil
+}
+
+func (op structOp) genWrite(ctx *genContext, v string) string {
+ var b bytes.Buffer
+ var listMarker = ctx.temp()
+ fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
+ for _, field := range op.fields {
+ selector := v + "." + field.name
+ fmt.Fprint(&b, field.elem.genWrite(ctx, selector))
+ }
+ op.writeOptionalFields(&b, ctx, v)
+ fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
+ return b.String()
+}
+
+func (op structOp) writeOptionalFields(b *bytes.Buffer, ctx *genContext, v string) {
+ if len(op.optionalFields) == 0 {
+ return
+ }
+ // First check zero-ness of all optional fields.
+ var zeroV = make([]string, len(op.optionalFields))
+ for i, field := range op.optionalFields {
+ selector := v + "." + field.name
+ zeroV[i] = ctx.temp()
+ fmt.Fprintf(b, "%s := %s\n", zeroV[i], nonZeroCheck(selector, field.typ, ctx.qualify))
+ }
+ // Now write the fields.
+ for i, field := range op.optionalFields {
+ selector := v + "." + field.name
+ cond := ""
+ for j := i; j < len(op.optionalFields); j++ {
+ if j > i {
+ cond += " || "
+ }
+ cond += zeroV[j]
+ }
+ fmt.Fprintf(b, "if %s {\n", cond)
+ fmt.Fprint(b, field.elem.genWrite(ctx, selector))
+ fmt.Fprintf(b, "}\n")
+ }
+}
+
+func (op structOp) genDecode(ctx *genContext) (string, string) {
+ // Get the string representation of the type.
+ // Here, named types are handled separately because the output
+ // would contain a copy of the struct definition otherwise.
+ var typeName string
+ if op.named != nil {
+ typeName = types.TypeString(op.named, ctx.qualify)
+ } else {
+ typeName = types.TypeString(op.typ, ctx.qualify)
+ }
+
+ // Create struct object.
+ var resultV = ctx.temp()
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", resultV, typeName)
+
+ // Decode fields.
+ fmt.Fprintf(&b, "{\n")
+ fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
+ for _, field := range op.fields {
+ result, code := field.elem.genDecode(ctx)
+ fmt.Fprintf(&b, "// %s:\n", field.name)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, "%s.%s = %s\n", resultV, field.name, result)
+ }
+ op.decodeOptionalFields(&b, ctx, resultV)
+ fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
+ fmt.Fprintf(&b, "}\n")
+ return resultV, b.String()
+}
+
+func (op structOp) decodeOptionalFields(b *bytes.Buffer, ctx *genContext, resultV string) {
+ var suffix bytes.Buffer
+ for _, field := range op.optionalFields {
+ result, code := field.elem.genDecode(ctx)
+ fmt.Fprintf(b, "// %s:\n", field.name)
+ fmt.Fprintf(b, "if dec.MoreDataInList() {\n")
+ fmt.Fprint(b, code)
+ fmt.Fprintf(b, "%s.%s = %s\n", resultV, field.name, result)
+ fmt.Fprintf(&suffix, "}\n")
+ }
+ suffix.WriteTo(b)
+}
+
+// sliceOp handles slice types.
+type sliceOp struct {
+ typ *types.Slice
+ elemOp op
+}
+
+func (bctx *buildContext) makeSliceOp(typ *types.Slice) (op, error) {
+ elemOp, err := bctx.makeOp(nil, typ.Elem(), rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+ return sliceOp{typ: typ, elemOp: elemOp}, nil
+}
+
+func (op sliceOp) genWrite(ctx *genContext, v string) string {
+ var (
+ listMarker = ctx.temp() // holds return value of w.List()
+ iterElemV = ctx.temp() // iteration variable
+ elemCode = op.elemOp.genWrite(ctx, iterElemV)
+ )
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%s := w.List()\n", listMarker)
+ fmt.Fprintf(&b, "for _, %s := range %s {\n", iterElemV, v)
+ fmt.Fprint(&b, elemCode)
+ fmt.Fprintf(&b, "}\n")
+ fmt.Fprintf(&b, "w.ListEnd(%s)\n", listMarker)
+ return b.String()
+}
+
+func (op sliceOp) genDecode(ctx *genContext) (string, string) {
+ var sliceV = ctx.temp() // holds the output slice
+ elemResult, elemCode := op.elemOp.genDecode(ctx)
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "var %s %s\n", sliceV, types.TypeString(op.typ, ctx.qualify))
+ fmt.Fprintf(&b, "if _, err := dec.List(); err != nil { return err }\n")
+ fmt.Fprintf(&b, "for dec.MoreDataInList() {\n")
+ fmt.Fprintf(&b, " %s", elemCode)
+ fmt.Fprintf(&b, " %s = append(%s, %s)\n", sliceV, sliceV, elemResult)
+ fmt.Fprintf(&b, "}\n")
+ fmt.Fprintf(&b, "if err := dec.ListEnd(); err != nil { return err }\n")
+ return sliceV, b.String()
+}
+
+func (bctx *buildContext) makeOp(name *types.Named, typ types.Type, tags rlpstruct.Tags) (op, error) {
+ switch typ := typ.(type) {
+ case *types.Named:
+ if isBigInt(typ) {
+ return bigIntOp{}, nil
+ }
+ if isUint256(typ) {
+ return uint256Op{}, nil
+ }
+ if typ == bctx.rawValueType {
+ return bctx.makeRawValueOp(), nil
+ }
+ if bctx.isDecoder(typ) {
+ return nil, fmt.Errorf("type %v implements rlp.Decoder with non-pointer receiver", typ)
+ }
+ // TODO: same check for encoder?
+ return bctx.makeOp(typ, typ.Underlying(), tags)
+ case *types.Pointer:
+ if isBigInt(typ.Elem()) {
+ return bigIntOp{pointer: true}, nil
+ }
+ if isUint256(typ.Elem()) {
+ return uint256Op{pointer: true}, nil
+ }
+ // Encoder/Decoder interfaces.
+ if bctx.isEncoder(typ) {
+ if bctx.isDecoder(typ) {
+ return encoderDecoderOp{typ}, nil
+ }
+ return nil, fmt.Errorf("type %v implements rlp.Encoder but not rlp.Decoder", typ)
+ }
+ if bctx.isDecoder(typ) {
+ return nil, fmt.Errorf("type %v implements rlp.Decoder but not rlp.Encoder", typ)
+ }
+ // Default pointer handling.
+ return bctx.makePtrOp(typ.Elem(), tags)
+ case *types.Basic:
+ return bctx.makeBasicOp(typ)
+ case *types.Struct:
+ return bctx.makeStructOp(name, typ)
+ case *types.Slice:
+ etyp := typ.Elem()
+ if isByte(etyp) && !bctx.isEncoder(etyp) {
+ return bctx.makeByteSliceOp(typ), nil
+ }
+ return bctx.makeSliceOp(typ)
+ case *types.Array:
+ etyp := typ.Elem()
+ if isByte(etyp) && !bctx.isEncoder(etyp) {
+ return bctx.makeByteArrayOp(name, typ), nil
+ }
+ return nil, fmt.Errorf("unhandled array type: %v", typ)
+ default:
+ return nil, fmt.Errorf("unhandled type: %v", typ)
+ }
+}
+
+// generateDecoder generates the DecodeRLP method on 'typ'.
+func generateDecoder(ctx *genContext, typ string, op op) []byte {
+ ctx.resetTemp()
+ ctx.addImport(pathOfPackageRLP)
+
+ result, code := op.genDecode(ctx)
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "func (obj *%s) DecodeRLP(dec *rlp.Stream) error {\n", typ)
+ fmt.Fprint(&b, code)
+ fmt.Fprintf(&b, " *obj = %s\n", result)
+ fmt.Fprintf(&b, " return nil\n")
+ fmt.Fprintf(&b, "}\n")
+ return b.Bytes()
+}
+
+// generateEncoder generates the EncodeRLP method on 'typ'.
+func generateEncoder(ctx *genContext, typ string, op op) []byte {
+ ctx.resetTemp()
+ ctx.addImport("io")
+ ctx.addImport(pathOfPackageRLP)
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "func (obj *%s) EncodeRLP(_w io.Writer) error {\n", typ)
+ fmt.Fprintf(&b, " w := rlp.NewEncoderBuffer(_w)\n")
+ fmt.Fprint(&b, op.genWrite(ctx, "obj"))
+ fmt.Fprintf(&b, " return w.Flush()\n")
+ fmt.Fprintf(&b, "}\n")
+ return b.Bytes()
+}
+
+func (bctx *buildContext) generate(typ *types.Named, encoder, decoder bool) ([]byte, error) {
+ bctx.topType = typ
+
+ pkg := typ.Obj().Pkg()
+ op, err := bctx.makeOp(nil, typ, rlpstruct.Tags{})
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ ctx = newGenContext(pkg)
+ encSource []byte
+ decSource []byte
+ )
+ if encoder {
+ encSource = generateEncoder(ctx, typ.Obj().Name(), op)
+ }
+ if decoder {
+ decSource = generateDecoder(ctx, typ.Obj().Name(), op)
+ }
+
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "package %s\n\n", pkg.Name())
+ for _, imp := range ctx.importsList() {
+ fmt.Fprintf(&b, "import %q\n", imp)
+ }
+ if encoder {
+ fmt.Fprintln(&b)
+ b.Write(encSource)
+ }
+ if decoder {
+ fmt.Fprintln(&b)
+ b.Write(decSource)
+ }
+
+ source := b.Bytes()
+ // fmt.Println(string(source))
+ return format.Source(source)
+}
diff --git a/rlp/rlpgen/gen_test.go b/rlp/rlpgen/gen_test.go
new file mode 100644
index 00000000000..eab759acb39
--- /dev/null
+++ b/rlp/rlpgen/gen_test.go
@@ -0,0 +1,110 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package main
+
+import (
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Package RLP is loaded only once and reused for all tests.
+var (
+ testFset = token.NewFileSet()
+ testImporter = importer.ForCompiler(testFset, "source", nil).(types.ImporterFrom)
+ testPackageRLP *types.Package
+)
+
+func init() {
+ cwd, err := os.Getwd()
+ if err != nil {
+ panic(err)
+ }
+ testPackageRLP, err = testImporter.ImportFrom(pathOfPackageRLP, cwd, 0)
+ if err != nil {
+ panic(fmt.Errorf("can't load package RLP: %v", err))
+ }
+}
+
+var tests = []string{"uints", "nil", "rawvalue", "optional", "bigint", "uint256"}
+
+func TestOutput(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("no reason to test, but \r\n handle will make tests more complex")
+ }
+ for _, test := range tests {
+ test := test
+ t.Run(test, func(t *testing.T) {
+ inputFile := filepath.Join("testdata", test+".in.txt")
+ outputFile := filepath.Join("testdata", test+".out.txt")
+ bctx, typ, err := loadTestSource(inputFile, "Test")
+ if err != nil {
+ t.Fatal("error loading test source:", err)
+ }
+ output, err := bctx.generate(typ, true, true)
+ if err != nil {
+ t.Fatal("error in generate:", err)
+ }
+
+ // Set this environment variable to regenerate the test outputs.
+ if os.Getenv("WRITE_TEST_FILES") != "" {
+ os.WriteFile(outputFile, output, 0644)
+ }
+
+ // Check if output matches.
+ wantOutput, err := os.ReadFile(outputFile)
+ if err != nil {
+ t.Fatal("error loading expected test output:", err)
+ }
+ assert.Equal(t, string(wantOutput), string(output))
+ })
+ }
+}
+
+func loadTestSource(file string, typeName string) (*buildContext, *types.Named, error) {
+ // Load the test input.
+ content, err := os.ReadFile(file)
+ if err != nil {
+ return nil, nil, err
+ }
+ f, err := parser.ParseFile(testFset, file, content, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ conf := types.Config{Importer: testImporter}
+ pkg, err := conf.Check("test", testFset, []*ast.File{f}, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Find the test struct.
+ bctx := newBuildContext(testPackageRLP)
+ typ, err := lookupStructType(pkg.Scope(), typeName)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't find type %s: %v", typeName, err)
+ }
+ return bctx, typ, nil
+}
diff --git a/rlp/rlpgen/main.go b/rlp/rlpgen/main.go
new file mode 100644
index 00000000000..9fe092a9fed
--- /dev/null
+++ b/rlp/rlpgen/main.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/types"
+ "os"
+
+ "golang.org/x/tools/go/packages"
+)
+
+const pathOfPackageRLP = "github.com/ledgerwatch/erigon/rlp"
+
+func main() {
+ var (
+ pkgdir = flag.String("dir", ".", "input package")
+ output = flag.String("out", "-", "output file (default is stdout)")
+ genEncoder = flag.Bool("encoder", true, "generate EncodeRLP?")
+ genDecoder = flag.Bool("decoder", false, "generate DecodeRLP?")
+ typename = flag.String("type", "", "type to generate methods for")
+ )
+ flag.Parse()
+
+ cfg := Config{
+ Dir: *pkgdir,
+ Type: *typename,
+ GenerateEncoder: *genEncoder,
+ GenerateDecoder: *genDecoder,
+ }
+ code, err := cfg.process()
+ if err != nil {
+ fatal(err)
+ }
+ if *output == "-" {
+ os.Stdout.Write(code)
+ } else if err := os.WriteFile(*output, code, 0600); err != nil {
+ fatal(err)
+ }
+}
+
+func fatal(args ...interface{}) {
+ fmt.Fprintln(os.Stderr, args...)
+ os.Exit(1)
+}
+
+type Config struct {
+ Dir string // input package directory
+ Type string
+
+ GenerateEncoder bool
+ GenerateDecoder bool
+}
+
+// process generates the Go code.
+func (cfg *Config) process() (code []byte, err error) {
+ // Load packages.
+ pcfg := &packages.Config{
+ Mode: packages.NeedName | packages.NeedTypes,
+ Dir: cfg.Dir,
+ }
+ ps, err := packages.Load(pcfg, pathOfPackageRLP, ".")
+ if err != nil {
+ return nil, err
+ }
+ if len(ps) == 0 {
+ return nil, fmt.Errorf("no Go package found in %s", cfg.Dir)
+ }
+ packages.PrintErrors(ps)
+
+ // Find the packages that were loaded.
+ var (
+ pkg *types.Package
+ packageRLP *types.Package
+ )
+ for _, p := range ps {
+ if len(p.Errors) > 0 {
+ return nil, fmt.Errorf("package %s has errors", p.PkgPath)
+ }
+ if p.PkgPath == pathOfPackageRLP {
+ packageRLP = p.Types
+ } else {
+ pkg = p.Types
+ }
+ }
+ bctx := newBuildContext(packageRLP)
+
+ // Find the type and generate.
+ typ, err := lookupStructType(pkg.Scope(), cfg.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't find %s in %s: %v", cfg.Type, pkg, err)
+ }
+ code, err = bctx.generate(typ, cfg.GenerateEncoder, cfg.GenerateDecoder)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add build comments.
+ // This is done here to avoid processing these lines with gofmt.
+ var header bytes.Buffer
+ fmt.Fprint(&header, "// Code generated by rlpgen. DO NOT EDIT.\n\n")
+ return append(header.Bytes(), code...), nil
+}
+
+func lookupStructType(scope *types.Scope, name string) (*types.Named, error) {
+ typ, err := lookupType(scope, name)
+ if err != nil {
+ return nil, err
+ }
+ _, ok := typ.Underlying().(*types.Struct)
+ if !ok {
+ return nil, errors.New("not a struct type")
+ }
+ return typ, nil
+}
+
+func lookupType(scope *types.Scope, name string) (*types.Named, error) {
+ obj := scope.Lookup(name)
+ if obj == nil {
+ return nil, errors.New("no such identifier")
+ }
+ typ, ok := obj.(*types.TypeName)
+ if !ok {
+ return nil, errors.New("not a type")
+ }
+ return typ.Type().(*types.Named), nil
+}
diff --git a/rlp/rlpgen/testdata/bigint.in.txt b/rlp/rlpgen/testdata/bigint.in.txt
new file mode 100644
index 00000000000..d23d84a2876
--- /dev/null
+++ b/rlp/rlpgen/testdata/bigint.in.txt
@@ -0,0 +1,10 @@
+// -*- mode: go -*-
+
+package test
+
+import "math/big"
+
+type Test struct {
+ Int *big.Int
+ IntNoPtr big.Int
+}
diff --git a/rlp/rlpgen/testdata/bigint.out.txt b/rlp/rlpgen/testdata/bigint.out.txt
new file mode 100644
index 00000000000..366498c5bae
--- /dev/null
+++ b/rlp/rlpgen/testdata/bigint.out.txt
@@ -0,0 +1,49 @@
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ if obj.Int == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ if obj.Int.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(obj.Int)
+ }
+ if obj.IntNoPtr.Sign() == -1 {
+ return rlp.ErrNegativeBigInt
+ }
+ w.WriteBigInt(&obj.IntNoPtr)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Int:
+ _tmp1, err := dec.BigInt()
+ if err != nil {
+ return err
+ }
+ _tmp0.Int = _tmp1
+ // IntNoPtr:
+ _tmp2, err := dec.BigInt()
+ if err != nil {
+ return err
+ }
+ _tmp0.IntNoPtr = (*_tmp2)
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/nil.in.txt b/rlp/rlpgen/testdata/nil.in.txt
new file mode 100644
index 00000000000..a28ff344874
--- /dev/null
+++ b/rlp/rlpgen/testdata/nil.in.txt
@@ -0,0 +1,30 @@
+// -*- mode: go -*-
+
+package test
+
+type Aux struct{
+ A uint32
+}
+
+type Test struct{
+ Uint8 *byte `rlp:"nil"`
+ Uint8List *byte `rlp:"nilList"`
+
+ Uint32 *uint32 `rlp:"nil"`
+ Uint32List *uint32 `rlp:"nilList"`
+
+ Uint64 *uint64 `rlp:"nil"`
+ Uint64List *uint64 `rlp:"nilList"`
+
+ String *string `rlp:"nil"`
+ StringList *string `rlp:"nilList"`
+
+ ByteArray *[3]byte `rlp:"nil"`
+ ByteArrayList *[3]byte `rlp:"nilList"`
+
+ ByteSlice *[]byte `rlp:"nil"`
+ ByteSliceList *[]byte `rlp:"nilList"`
+
+ Struct *Aux `rlp:"nil"`
+ StructString *Aux `rlp:"nilString"`
+}
diff --git a/rlp/rlpgen/testdata/nil.out.txt b/rlp/rlpgen/testdata/nil.out.txt
new file mode 100644
index 00000000000..654a8176316
--- /dev/null
+++ b/rlp/rlpgen/testdata/nil.out.txt
@@ -0,0 +1,289 @@
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ if obj.Uint8 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint8)))
+ }
+ if obj.Uint8List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint8List)))
+ }
+ if obj.Uint32 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint32)))
+ }
+ if obj.Uint32List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64(uint64((*obj.Uint32List)))
+ }
+ if obj.Uint64 == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.Uint64))
+ }
+ if obj.Uint64List == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteUint64((*obj.Uint64List))
+ }
+ if obj.String == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteString((*obj.String))
+ }
+ if obj.StringList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteString((*obj.StringList))
+ }
+ if obj.ByteArray == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteBytes(obj.ByteArray[:])
+ }
+ if obj.ByteArrayList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteBytes(obj.ByteArrayList[:])
+ }
+ if obj.ByteSlice == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteBytes((*obj.ByteSlice))
+ }
+ if obj.ByteSliceList == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ w.WriteBytes((*obj.ByteSliceList))
+ }
+ if obj.Struct == nil {
+ w.Write([]byte{0xC0})
+ } else {
+ _tmp1 := w.List()
+ w.WriteUint64(uint64(obj.Struct.A))
+ w.ListEnd(_tmp1)
+ }
+ if obj.StructString == nil {
+ w.Write([]byte{0x80})
+ } else {
+ _tmp2 := w.List()
+ w.WriteUint64(uint64(obj.StructString.A))
+ w.ListEnd(_tmp2)
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Uint8:
+ var _tmp2 *byte
+ if _tmp3, _tmp4, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp4 != 0 || _tmp3 != rlp.String {
+ _tmp1, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp2 = &_tmp1
+ }
+ _tmp0.Uint8 = _tmp2
+ // Uint8List:
+ var _tmp6 *byte
+ if _tmp7, _tmp8, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp8 != 0 || _tmp7 != rlp.List {
+ _tmp5, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp6 = &_tmp5
+ }
+ _tmp0.Uint8List = _tmp6
+ // Uint32:
+ var _tmp10 *uint32
+ if _tmp11, _tmp12, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp12 != 0 || _tmp11 != rlp.String {
+ _tmp9, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp10 = &_tmp9
+ }
+ _tmp0.Uint32 = _tmp10
+ // Uint32List:
+ var _tmp14 *uint32
+ if _tmp15, _tmp16, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp16 != 0 || _tmp15 != rlp.List {
+ _tmp13, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp14 = &_tmp13
+ }
+ _tmp0.Uint32List = _tmp14
+ // Uint64:
+ var _tmp18 *uint64
+ if _tmp19, _tmp20, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp20 != 0 || _tmp19 != rlp.String {
+ _tmp17, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp18 = &_tmp17
+ }
+ _tmp0.Uint64 = _tmp18
+ // Uint64List:
+ var _tmp22 *uint64
+ if _tmp23, _tmp24, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp24 != 0 || _tmp23 != rlp.List {
+ _tmp21, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp22 = &_tmp21
+ }
+ _tmp0.Uint64List = _tmp22
+ // String:
+ var _tmp26 *string
+ if _tmp27, _tmp28, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp28 != 0 || _tmp27 != rlp.String {
+ _tmp25, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp26 = &_tmp25
+ }
+ _tmp0.String = _tmp26
+ // StringList:
+ var _tmp30 *string
+ if _tmp31, _tmp32, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp32 != 0 || _tmp31 != rlp.List {
+ _tmp29, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp30 = &_tmp29
+ }
+ _tmp0.StringList = _tmp30
+ // ByteArray:
+ var _tmp34 *[3]byte
+ if _tmp35, _tmp36, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp36 != 0 || _tmp35 != rlp.String {
+ var _tmp33 [3]byte
+ if err := dec.ReadBytes(_tmp33[:]); err != nil {
+ return err
+ }
+ _tmp34 = &_tmp33
+ }
+ _tmp0.ByteArray = _tmp34
+ // ByteArrayList:
+ var _tmp38 *[3]byte
+ if _tmp39, _tmp40, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp40 != 0 || _tmp39 != rlp.List {
+ var _tmp37 [3]byte
+ if err := dec.ReadBytes(_tmp37[:]); err != nil {
+ return err
+ }
+ _tmp38 = &_tmp37
+ }
+ _tmp0.ByteArrayList = _tmp38
+ // ByteSlice:
+ var _tmp42 *[]byte
+ if _tmp43, _tmp44, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp44 != 0 || _tmp43 != rlp.String {
+ _tmp41, err := dec.Bytes()
+ if err != nil {
+ return err
+ }
+ _tmp42 = &_tmp41
+ }
+ _tmp0.ByteSlice = _tmp42
+ // ByteSliceList:
+ var _tmp46 *[]byte
+ if _tmp47, _tmp48, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp48 != 0 || _tmp47 != rlp.List {
+ _tmp45, err := dec.Bytes()
+ if err != nil {
+ return err
+ }
+ _tmp46 = &_tmp45
+ }
+ _tmp0.ByteSliceList = _tmp46
+ // Struct:
+ var _tmp51 *Aux
+ if _tmp52, _tmp53, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp53 != 0 || _tmp52 != rlp.List {
+ var _tmp49 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp50, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp49.A = _tmp50
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp51 = &_tmp49
+ }
+ _tmp0.Struct = _tmp51
+ // StructString:
+ var _tmp56 *Aux
+ if _tmp57, _tmp58, err := dec.Kind(); err != nil {
+ return err
+ } else if _tmp58 != 0 || _tmp57 != rlp.String {
+ var _tmp54 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp55, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp54.A = _tmp55
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp56 = &_tmp54
+ }
+ _tmp0.StructString = _tmp56
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/optional.in.txt b/rlp/rlpgen/testdata/optional.in.txt
new file mode 100644
index 00000000000..f1ac9f7899d
--- /dev/null
+++ b/rlp/rlpgen/testdata/optional.in.txt
@@ -0,0 +1,17 @@
+// -*- mode: go -*-
+
+package test
+
+type Aux struct {
+ A uint64
+}
+
+type Test struct {
+ Uint64 uint64 `rlp:"optional"`
+ Pointer *uint64 `rlp:"optional"`
+ String string `rlp:"optional"`
+ Slice []uint64 `rlp:"optional"`
+ Array [3]byte `rlp:"optional"`
+ NamedStruct Aux `rlp:"optional"`
+ AnonStruct struct{ A string } `rlp:"optional"`
+}
diff --git a/rlp/rlpgen/testdata/optional.out.txt b/rlp/rlpgen/testdata/optional.out.txt
new file mode 100644
index 00000000000..3219730057e
--- /dev/null
+++ b/rlp/rlpgen/testdata/optional.out.txt
@@ -0,0 +1,153 @@
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ _tmp1 := obj.Uint64 != 0
+ _tmp2 := obj.Pointer != nil
+ _tmp3 := obj.String != ""
+ _tmp4 := len(obj.Slice) > 0
+ _tmp5 := obj.Array != ([3]byte{})
+ _tmp6 := obj.NamedStruct != (Aux{})
+ _tmp7 := obj.AnonStruct != (struct{ A string }{})
+ if _tmp1 || _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ w.WriteUint64(obj.Uint64)
+ }
+ if _tmp2 || _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ if obj.Pointer == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.WriteUint64((*obj.Pointer))
+ }
+ }
+ if _tmp3 || _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ w.WriteString(obj.String)
+ }
+ if _tmp4 || _tmp5 || _tmp6 || _tmp7 {
+ _tmp8 := w.List()
+ for _, _tmp9 := range obj.Slice {
+ w.WriteUint64(_tmp9)
+ }
+ w.ListEnd(_tmp8)
+ }
+ if _tmp5 || _tmp6 || _tmp7 {
+ w.WriteBytes(obj.Array[:])
+ }
+ if _tmp6 || _tmp7 {
+ _tmp10 := w.List()
+ w.WriteUint64(obj.NamedStruct.A)
+ w.ListEnd(_tmp10)
+ }
+ if _tmp7 {
+ _tmp11 := w.List()
+ w.WriteString(obj.AnonStruct.A)
+ w.ListEnd(_tmp11)
+ }
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Uint64:
+ if dec.MoreDataInList() {
+ _tmp1, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.Uint64 = _tmp1
+ // Pointer:
+ if dec.MoreDataInList() {
+ _tmp2, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.Pointer = &_tmp2
+ // String:
+ if dec.MoreDataInList() {
+ _tmp3, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp0.String = _tmp3
+ // Slice:
+ if dec.MoreDataInList() {
+ var _tmp4 []uint64
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ _tmp5, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp4 = append(_tmp4, _tmp5)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.Slice = _tmp4
+ // Array:
+ if dec.MoreDataInList() {
+ var _tmp6 [3]byte
+ if err := dec.ReadBytes(_tmp6[:]); err != nil {
+ return err
+ }
+ _tmp0.Array = _tmp6
+ // NamedStruct:
+ if dec.MoreDataInList() {
+ var _tmp7 Aux
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp8, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp7.A = _tmp8
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.NamedStruct = _tmp7
+ // AnonStruct:
+ if dec.MoreDataInList() {
+ var _tmp9 struct{ A string }
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp10, err := dec.String()
+ if err != nil {
+ return err
+ }
+ _tmp9.A = _tmp10
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp0.AnonStruct = _tmp9
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/rawvalue.in.txt b/rlp/rlpgen/testdata/rawvalue.in.txt
new file mode 100644
index 00000000000..8290bf6f96b
--- /dev/null
+++ b/rlp/rlpgen/testdata/rawvalue.in.txt
@@ -0,0 +1,11 @@
+// -*- mode: go -*-
+
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+
+type Test struct {
+ RawValue rlp.RawValue
+ PointerToRawValue *rlp.RawValue
+ SliceOfRawValue []rlp.RawValue
+}
diff --git a/rlp/rlpgen/testdata/rawvalue.out.txt b/rlp/rlpgen/testdata/rawvalue.out.txt
new file mode 100644
index 00000000000..406f9d86621
--- /dev/null
+++ b/rlp/rlpgen/testdata/rawvalue.out.txt
@@ -0,0 +1,64 @@
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.Write(obj.RawValue)
+ if obj.PointerToRawValue == nil {
+ w.Write([]byte{0x80})
+ } else {
+ w.Write((*obj.PointerToRawValue))
+ }
+ _tmp1 := w.List()
+ for _, _tmp2 := range obj.SliceOfRawValue {
+ w.Write(_tmp2)
+ }
+ w.ListEnd(_tmp1)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // RawValue:
+ _tmp1, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp0.RawValue = _tmp1
+ // PointerToRawValue:
+ _tmp2, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp0.PointerToRawValue = &_tmp2
+ // SliceOfRawValue:
+ var _tmp3 []rlp.RawValue
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ _tmp4, err := dec.Raw()
+ if err != nil {
+ return err
+ }
+ _tmp3 = append(_tmp3, _tmp4)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.SliceOfRawValue = _tmp3
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/uint256.in.txt b/rlp/rlpgen/testdata/uint256.in.txt
new file mode 100644
index 00000000000..ed16e0a7882
--- /dev/null
+++ b/rlp/rlpgen/testdata/uint256.in.txt
@@ -0,0 +1,10 @@
+// -*- mode: go -*-
+
+package test
+
+import "github.com/holiman/uint256"
+
+type Test struct {
+ Int *uint256.Int
+ IntNoPtr uint256.Int
+}
diff --git a/rlp/rlpgen/testdata/uint256.out.txt b/rlp/rlpgen/testdata/uint256.out.txt
new file mode 100644
index 00000000000..5520241c0dc
--- /dev/null
+++ b/rlp/rlpgen/testdata/uint256.out.txt
@@ -0,0 +1,44 @@
+package test
+
+import "github.com/holiman/uint256"
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ if obj.Int == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ w.WriteUint256(obj.Int)
+ }
+ w.WriteUint256(&obj.IntNoPtr)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Int:
+ var _tmp1 uint256.Int
+ if err := dec.ReadUint256(&_tmp1); err != nil {
+ return err
+ }
+ _tmp0.Int = &_tmp1
+ // IntNoPtr:
+ var _tmp2 uint256.Int
+ if err := dec.ReadUint256(&_tmp2); err != nil {
+ return err
+ }
+ _tmp0.IntNoPtr = _tmp2
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/testdata/uints.in.txt b/rlp/rlpgen/testdata/uints.in.txt
new file mode 100644
index 00000000000..8095da997d9
--- /dev/null
+++ b/rlp/rlpgen/testdata/uints.in.txt
@@ -0,0 +1,10 @@
+// -*- mode: go -*-
+
+package test
+
+type Test struct{
+ A uint8
+ B uint16
+ C uint32
+ D uint64
+}
diff --git a/rlp/rlpgen/testdata/uints.out.txt b/rlp/rlpgen/testdata/uints.out.txt
new file mode 100644
index 00000000000..c18ce0c2dcc
--- /dev/null
+++ b/rlp/rlpgen/testdata/uints.out.txt
@@ -0,0 +1,53 @@
+package test
+
+import "github.com/ledgerwatch/erigon/rlp"
+import "io"
+
+func (obj *Test) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteUint64(uint64(obj.A))
+ w.WriteUint64(uint64(obj.B))
+ w.WriteUint64(uint64(obj.C))
+ w.WriteUint64(obj.D)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *Test) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 Test
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // A:
+ _tmp1, err := dec.Uint8()
+ if err != nil {
+ return err
+ }
+ _tmp0.A = _tmp1
+ // B:
+ _tmp2, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp0.B = _tmp2
+ // C:
+ _tmp3, err := dec.Uint32()
+ if err != nil {
+ return err
+ }
+ _tmp0.C = _tmp3
+ // D:
+ _tmp4, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp0.D = _tmp4
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/rlp/rlpgen/types.go b/rlp/rlpgen/types.go
new file mode 100644
index 00000000000..2464a3793e2
--- /dev/null
+++ b/rlp/rlpgen/types.go
@@ -0,0 +1,127 @@
+// Copyright 2014 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package main
+
+import (
+ "fmt"
+ "go/types"
+ "reflect"
+)
+
+// typeReflectKind gives the reflect.Kind that represents typ.
+func typeReflectKind(typ types.Type) reflect.Kind {
+ switch typ := typ.(type) {
+ case *types.Basic:
+ k := typ.Kind()
+ if k >= types.Bool && k <= types.Complex128 {
+ // value order matches for Bool..Complex128
+ return reflect.Bool + reflect.Kind(k-types.Bool)
+ }
+ if k == types.String {
+ return reflect.String
+ }
+ if k == types.UnsafePointer {
+ return reflect.UnsafePointer
+ }
+ panic(fmt.Errorf("unhandled BasicKind %v", k))
+ case *types.Array:
+ return reflect.Array
+ case *types.Chan:
+ return reflect.Chan
+ case *types.Interface:
+ return reflect.Interface
+ case *types.Map:
+ return reflect.Map
+ case *types.Pointer:
+ return reflect.Ptr
+ case *types.Signature:
+ return reflect.Func
+ case *types.Slice:
+ return reflect.Slice
+ case *types.Struct:
+ return reflect.Struct
+ default:
+ panic(fmt.Errorf("unhandled type %T", typ))
+ }
+}
+
+// nonZeroCheck returns the expression that checks whether 'v' is a non-zero value of type 'vtyp'.
+func nonZeroCheck(v string, vtyp types.Type, qualify types.Qualifier) string {
+ // Resolve type name.
+ typ := resolveUnderlying(vtyp)
+ switch typ := typ.(type) {
+ case *types.Basic:
+ k := typ.Kind()
+ switch {
+ case k == types.Bool:
+ return v
+ case k >= types.Uint && k <= types.Complex128:
+ return fmt.Sprintf("%s != 0", v)
+ case k == types.String:
+ return fmt.Sprintf(`%s != ""`, v)
+ default:
+ panic(fmt.Errorf("unhandled BasicKind %v", k))
+ }
+ case *types.Array, *types.Struct:
+ return fmt.Sprintf("%s != (%s{})", v, types.TypeString(vtyp, qualify))
+ case *types.Interface, *types.Pointer, *types.Signature:
+ return fmt.Sprintf("%s != nil", v)
+ case *types.Slice, *types.Map:
+ return fmt.Sprintf("len(%s) > 0", v)
+ default:
+ panic(fmt.Errorf("unhandled type %T", typ))
+ }
+}
+
+// isBigInt checks whether 'typ' is "math/big".Int.
+func isBigInt(typ types.Type) bool {
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ name := named.Obj()
+ return name.Pkg().Path() == "math/big" && name.Name() == "Int"
+}
+
+// isUint256 checks whether 'typ' is "github.com/holiman/uint256".Int.
+func isUint256(typ types.Type) bool {
+ named, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ name := named.Obj()
+ return name.Pkg().Path() == "github.com/holiman/uint256" && name.Name() == "Int"
+}
+
+// isByte checks whether the underlying type of 'typ' is uint8.
+func isByte(typ types.Type) bool {
+ basic, ok := resolveUnderlying(typ).(*types.Basic)
+ return ok && basic.Kind() == types.Uint8
+}
+
+func resolveUnderlying(typ types.Type) types.Type {
+ for {
+ t := typ.Underlying()
+ if t == typ {
+ return t
+ }
+ typ = t
+ }
+}
diff --git a/rlp/typecache.go b/rlp/typecache.go
index bc941d6627e..e003125a13f 100644
--- a/rlp/typecache.go
+++ b/rlp/typecache.go
@@ -66,7 +66,7 @@ type typekey struct {
type decoder func(*Stream, reflect.Value) error
-type writer func(reflect.Value, *encbuf) error
+type writer func(reflect.Value, *encBuffer) error
func cachedDecoder(typ reflect.Type) (decoder, error) {
info := cachedTypeInfo(typ, tags{})
diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go
index 2f87b0234b2..a32181fa988 100644
--- a/turbo/stages/mock/mock_sentry.go
+++ b/turbo/stages/mock/mock_sentry.go
@@ -275,7 +275,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK
cfg.DeprecatedTxPool.Disable = !withTxPool
cfg.DeprecatedTxPool.StartOnInit = true
- logger := log.New()
+ logger := log.Root()
+ logger.SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler))
ctx, ctxCancel := context.WithCancel(context.Background())
db, agg := temporaltest.NewTestDB(nil, dirs)