diff --git a/libs/appconsts/appconsts.go b/libs/appconsts/appconsts.go new file mode 100644 index 00000000000..8a5281e94cd --- /dev/null +++ b/libs/appconsts/appconsts.go @@ -0,0 +1,101 @@ +package appconsts + +// These constants were originally sourced from: +// https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants +const ( + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 32 + + // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = NamespaceVersionSize + NamespaceIDSize + + // ShareSize is the size of a share in bytes. + ShareSize = 512 + + // ShareInfoBytes is the number of bytes reserved for information. The info + // byte contains the share version and a sequence start idicator. + ShareInfoBytes = 1 + + // SequenceLenBytes is the number of bytes reserved for the sequence length + // that is present in the first share of a sequence. + SequenceLenBytes = 4 + + // ShareVersionZero is the first share version format. + ShareVersionZero = uint8(0) + + // DefaultShareVersion is the defacto share version. Use this if you are + // unsure of which version to use. + DefaultShareVersion = ShareVersionZero + + // CompactShareReservedBytes is the number of bytes reserved for the location of + // the first unit (transaction, ISR) in a compact share. + CompactShareReservedBytes = 4 + + // FirstCompactShareContentSize is the number of bytes usable for data in + // the first compact share of a sequence. + FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes + + // ContinuationCompactShareContentSize is the number of bytes usable for + // data in a continuation compact share of a sequence. + ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes + + // FirstSparseShareContentSize is the number of bytes usable for data in the + // first sparse share of a sequence. + FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes + + // ContinuationSparseShareContentSize is the number of bytes usable for data + // in a continuation sparse share of a sequence. + ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes + + // DefaultMaxSquareSize is the maximum original square width. + // + // Note: 128 shares in a row * 128 shares in a column * 512 bytes in a share + // = 8 MiB + DefaultMaxSquareSize = 128 + + // MaxShareCount is the maximum number of shares allowed in the original + // data square. + MaxShareCount = DefaultMaxSquareSize * DefaultMaxSquareSize + + // DefaultMinSquareSize is the smallest original square width. + DefaultMinSquareSize = 1 + + // MinshareCount is the minimum number of shares allowed in the original + // data square. + MinShareCount = DefaultMinSquareSize * DefaultMinSquareSize + + // MaxShareVersion is the maximum value a share version can be. + MaxShareVersion = 127 + + // DefaultGasPerBlobByte is the default gas cost deducted per byte of blob + // included in a PayForBlobs txn + DefaultGasPerBlobByte = 8 + + // TransactionsPerBlockLimit is the maximum number of transactions a block + // producer will include in a block. + // + // NOTE: Currently this value is set at roughly the number of PFBs that + // would fill one quarter of the max square size. + TransactionsPerBlockLimit = 5090 +) + +var ( + + // TODO: Consider commenting back in. Removed to reduce unneeded dependency + + // // NewBaseHashFunc is the base hash function used by NMT. Change accordingly + // // if another hash.Hash should be used as a base hasher in the NMT. + // NewBaseHashFunc = consts.NewBaseHashFunc + // // DefaultCodec is the default codec creator used for data erasure. + // DefaultCodec = rsmt2d.NewLeoRSCodec + + // // DataCommitmentBlocksLimit is the limit to the number of blocks we can + // // generate a data commitment for. + // DataCommitmentBlocksLimit = consts.DataCommitmentBlocksLimit + + // SupportedShareVersions is a list of supported share versions. + SupportedShareVersions = []uint8{ShareVersionZero} +) diff --git a/libs/appconsts/consensus_consts.go b/libs/appconsts/consensus_consts.go new file mode 100644 index 00000000000..f2f12736a1b --- /dev/null +++ b/libs/appconsts/consensus_consts.go @@ -0,0 +1,8 @@ +package appconsts + +import "time" + +const ( + TimeoutPropose = time.Second * 10 + TimeoutCommit = time.Second * 10 +) diff --git a/libs/namespace/consts.go b/libs/namespace/consts.go new file mode 100644 index 00000000000..c8b320238ac --- /dev/null +++ b/libs/namespace/consts.go @@ -0,0 +1,70 @@ +package namespace + +import ( + "bytes" + "math" + + "github.com/rollkit/rollkit/libs/appconsts" +) + +const ( + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = appconsts.NamespaceVersionSize + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = appconsts.NamespaceIDSize + + // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = appconsts.NamespaceSize + + // NamespaceVersionZero is the first namespace version. + NamespaceVersionZero = uint8(0) + + // NamespaceVersionMax is the max namespace version. + NamespaceVersionMax = math.MaxUint8 + + // NamespaceZeroPrefixSize is the number of `0` bytes that are prefixed to + // namespace IDs for version 0. + NamespaceVersionZeroPrefixSize = 22 + + // NamespaceVersionZeroIDSize is the number of bytes available for + // user-specified namespace ID in a namespace ID for version 0. + NamespaceVersionZeroIDSize = NamespaceIDSize - NamespaceVersionZeroPrefixSize +) + +var ( + // NamespaceVersionZeroPrefix is the prefix of a namespace ID for version 0. + NamespaceVersionZeroPrefix = bytes.Repeat([]byte{0}, NamespaceVersionZeroPrefixSize) + + // TxNamespace is the namespace reserved for transaction data. + TxNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) + + // IntermediateStateRootsNamespace is the namespace reserved for + // intermediate state root data. + IntermediateStateRootsNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 2}) + + // PayForBlobNamespace is the namespace reserved for PayForBlobs transactions. + PayForBlobNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 4}) + + // ReservedPaddingNamespace is the namespace used for padding after all + // reserved namespaces. In practice this padding is after transactions + // (ordinary and PFBs) but before blobs. + ReservedPaddingNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 255}) + + // MaxReservedNamespace is lexicographically the largest namespace that is + // reserved for protocol use. + MaxReservedNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 255}) + + // TailPaddingNamespace is the namespace reserved for tail padding. All data + // with this namespace will be ignored. + TailPaddingNamespace = Namespace{ + Version: math.MaxUint8, + ID: append(bytes.Repeat([]byte{0xFF}, NamespaceIDSize-1), 0xFE), + } + + // ParitySharesNamespace is the namespace reserved for erasure coded data. + ParitySharesNamespace = Namespace{ + Version: math.MaxUint8, + ID: bytes.Repeat([]byte{0xFF}, NamespaceIDSize), + } +) diff --git a/libs/namespace/namespace.go b/libs/namespace/namespace.go new file mode 100644 index 00000000000..0d4c95d080a --- /dev/null +++ b/libs/namespace/namespace.go @@ -0,0 +1,130 @@ +package namespace + +import ( + "bytes" + "fmt" +) + +type Namespace struct { + Version uint8 + ID []byte +} + +// New returns a new namespace with the provided version and id. +func New(version uint8, id []byte) (Namespace, error) { + err := validateVersion(version) + if err != nil { + return Namespace{}, err + } + + err = validateID(version, id) + if err != nil { + return Namespace{}, err + } + + return Namespace{ + Version: version, + ID: id, + }, nil +} + +// MustNew returns a new namespace with the provided version and id. It panics +// if the provided version or id are not supported. +func MustNew(version uint8, id []byte) Namespace { + ns, err := New(version, id) + if err != nil { + panic(err) + } + return ns +} + +// MustNewV0 returns a new namespace with version 0 and the provided id. This +// function panics if the provided id is not exactly NamespaceVersionZeroIDSize bytes. +func MustNewV0(id []byte) Namespace { + if len(id) != NamespaceVersionZeroIDSize { + panic(fmt.Sprintf("invalid namespace id length: %v must be %v", len(id), NamespaceVersionZeroIDSize)) + } + + ns, err := New(NamespaceVersionZero, append(NamespaceVersionZeroPrefix, id...)) + if err != nil { + panic(err) + } + return ns +} + +// From returns a namespace from the provided byte slice. +func From(b []byte) (Namespace, error) { + if len(b) != NamespaceSize { + return Namespace{}, fmt.Errorf("invalid namespace length: %v must be %v", len(b), NamespaceSize) + } + rawVersion := b[0] + rawNamespace := b[1:] + return New(rawVersion, rawNamespace) +} + +// Bytes returns this namespace as a byte slice. +func (n Namespace) Bytes() []byte { + return append([]byte{n.Version}, n.ID...) +} + +// ValidateBlobNamespace returns an error if this namespace is not a valid blob namespace. +func (n Namespace) ValidateBlobNamespace() error { + if n.IsReserved() { + return fmt.Errorf("invalid blob namespace: %v cannot use a reserved namespace ID, want > %v", n.Bytes(), MaxReservedNamespace.Bytes()) + } + + if n.IsParityShares() { + return fmt.Errorf("invalid blob namespace: %v cannot use parity shares namespace ID", n.Bytes()) + } + + if n.IsTailPadding() { + return fmt.Errorf("invalid blob namespace: %v cannot use tail padding namespace ID", n.Bytes()) + } + + return nil +} + +// validateVersion returns an error if the version is not supported. +func validateVersion(version uint8) error { + if version != NamespaceVersionZero && version != NamespaceVersionMax { + return fmt.Errorf("unsupported namespace version %v", version) + } + return nil +} + +// validateID returns an error if the provided id does not meet the requirements +// for the provided version. +func validateID(version uint8, id []byte) error { + if len(id) != NamespaceIDSize { + return fmt.Errorf("unsupported namespace id length: id %v must be %v bytes but it was %v bytes", id, NamespaceIDSize, len(id)) + } + + if version == NamespaceVersionZero && !bytes.HasPrefix(id, NamespaceVersionZeroPrefix) { + return fmt.Errorf("unsupported namespace id with version %v. ID %v must start with %v leading zeros", version, id, len(NamespaceVersionZeroPrefix)) + } + return nil +} + +func (n Namespace) IsReserved() bool { + return bytes.Compare(n.Bytes(), MaxReservedNamespace.Bytes()) < 1 +} + +func (n Namespace) IsParityShares() bool { + return bytes.Equal(n.Bytes(), ParitySharesNamespace.Bytes()) +} + +func (n Namespace) IsTailPadding() bool { + return bytes.Equal(n.Bytes(), TailPaddingNamespace.Bytes()) +} + +func (n Namespace) IsReservedPadding() bool { + return bytes.Equal(n.Bytes(), ReservedPaddingNamespace.Bytes()) +} + +func (n Namespace) IsTx() bool { + return bytes.Equal(n.Bytes(), TxNamespace.Bytes()) +} + +func (n Namespace) IsPayForBlob() bool { + return bytes.Equal(n.Bytes(), PayForBlobNamespace.Bytes()) +} diff --git a/libs/namespace/namespace_test.go b/libs/namespace/namespace_test.go new file mode 100644 index 00000000000..712fb7073f0 --- /dev/null +++ b/libs/namespace/namespace_test.go @@ -0,0 +1,151 @@ +package namespace + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + validID = append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize)...) + tooShortID = append(NamespaceVersionZeroPrefix, []byte{1}...) + tooLongID = append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize)...) + invalidPrefixID = bytes.Repeat([]byte{1}, NamespaceSize) +) + +func TestNew(t *testing.T) { + type testCase struct { + name string + version uint8 + id []byte + wantErr bool + want Namespace + } + + testCases := []testCase{ + { + name: "valid namespace", + version: NamespaceVersionZero, + id: validID, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionZero, + ID: validID, + }, + }, + { + name: "unsupported version", + version: uint8(1), + id: validID, + wantErr: true, + }, + { + name: "unsupported id: too short", + version: NamespaceVersionZero, + id: tooShortID, + wantErr: true, + }, + { + name: "unsupported id: too long", + version: NamespaceVersionZero, + id: tooLongID, + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + version: NamespaceVersionZero, + id: invalidPrefixID, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := New(tc.version, tc.id) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestFrom(t *testing.T) { + type testCase struct { + name string + bytes []byte + wantErr bool + want Namespace + } + validNamespace := []byte{} + validNamespace = append(validNamespace, NamespaceVersionZero) + validNamespace = append(validNamespace, NamespaceVersionZeroPrefix...) + validNamespace = append(validNamespace, bytes.Repeat([]byte{0x1}, NamespaceVersionZeroIDSize)...) + parityNamespace := bytes.Repeat([]byte{0xFF}, NamespaceSize) + + testCases := []testCase{ + { + name: "valid namespace", + bytes: validNamespace, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionZero, + ID: validID, + }, + }, + { + name: "parity namespace", + bytes: parityNamespace, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionMax, + ID: bytes.Repeat([]byte{0xFF}, NamespaceIDSize), + }, + }, + { + name: "unsupported version", + bytes: append([]byte{1}, append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize-len(NamespaceVersionZeroPrefix))...)...), + wantErr: true, + }, + { + name: "unsupported id: too short", + bytes: append([]byte{NamespaceVersionZero}, tooShortID...), + wantErr: true, + }, + { + name: "unsupported id: too long", + bytes: append([]byte{NamespaceVersionZero}, tooLongID...), + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + bytes: append([]byte{NamespaceVersionZero}, invalidPrefixID...), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := From(tc.bytes) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestBytes(t *testing.T) { + namespace, err := New(NamespaceVersionZero, validID) + assert.NoError(t, err) + + want := append([]byte{NamespaceVersionZero}, validID...) + got := namespace.Bytes() + + assert.Equal(t, want, got) +} diff --git a/libs/namespace/random_blob.go b/libs/namespace/random_blob.go new file mode 100644 index 00000000000..376534b8cf2 --- /dev/null +++ b/libs/namespace/random_blob.go @@ -0,0 +1,28 @@ +package namespace + +import ( + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +func RandomBlobNamespaceID() []byte { + return tmrand.Bytes(NamespaceVersionZeroIDSize) +} + +func RandomBlobNamespace() Namespace { + for { + id := RandomBlobNamespaceID() + namespace := MustNewV0(id) + err := namespace.ValidateBlobNamespace() + if err != nil { + continue + } + return namespace + } +} + +func RandomBlobNamespaces(count int) (namespaces []Namespace) { + for i := 0; i < count; i++ { + namespaces = append(namespaces, RandomBlobNamespace()) + } + return namespaces +} diff --git a/libs/namespace/random_namespace.go b/libs/namespace/random_namespace.go new file mode 100644 index 00000000000..79e52a6118e --- /dev/null +++ b/libs/namespace/random_namespace.go @@ -0,0 +1,18 @@ +package namespace + +import tmrand "github.com/tendermint/tendermint/libs/rand" + +func RandomNamespace() Namespace { + for { + id := RandomVerzionZeroID() + namespace, err := New(NamespaceVersionZero, id) + if err != nil { + continue + } + return namespace + } +} + +func RandomVerzionZeroID() []byte { + return append(NamespaceVersionZeroPrefix, tmrand.Bytes(NamespaceVersionZeroIDSize)...) +} diff --git a/libs/shares/compact_shares_test.go b/libs/shares/compact_shares_test.go new file mode 100644 index 00000000000..45ae135e71a --- /dev/null +++ b/libs/shares/compact_shares_test.go @@ -0,0 +1,216 @@ +package shares + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" + "github.com/rollkit/rollkit/libs/testfactory" +) + +func SplitTxs(txs coretypes.Txs) (txShares []Share, err error) { + txWriter := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range txs { + err = txWriter.WriteTx(tx) + if err != nil { + return nil, err + } + } + + txShares, _, err = txWriter.Export(0) + if err != nil { + return nil, err + } + + return txShares, nil +} + +func TestCompactShareSplitter(t *testing.T) { + // note that this test is mainly for debugging purposes, the main round trip + // tests occur in TestMerge and Test_processCompactShares + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(33, 200) + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + shares, _, err := css.Export(0) + require.NoError(t, err) + + rawResTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) + resTxs := TxsFromBytes(rawResTxs) + require.NoError(t, err) + + assert.Equal(t, txs, resTxs) +} + +func TestFuzz_processCompactShares(t *testing.T) { + t.Skip() + // run random shares through processCompactShares for a minute + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + for { + select { + case <-ctx.Done(): + return + default: + Test_processCompactShares(t) + } + } +} + +func Test_processCompactShares(t *testing.T) { + // exactTxShareSize is the length of tx that will fit exactly into a single + // share, accounting for the tx length delimiter prepended to + // each tx. Note that the length delimiter can be 1 to 10 bytes (varint) but + // this test assumes it is 1 byte. + const exactTxShareSize = appconsts.FirstCompactShareContentSize - 1 + + type test struct { + name string + txSize int + txCount int + } + + // each test is ran twice, once using txSize as an exact size, and again + // using it as a cap for randomly sized txs + tests := []test{ + {"single small tx", appconsts.ContinuationCompactShareContentSize / 8, 1}, + {"many small txs", appconsts.ContinuationCompactShareContentSize / 8, 10}, + {"single big tx", appconsts.ContinuationCompactShareContentSize * 4, 1}, + {"many big txs", appconsts.ContinuationCompactShareContentSize * 4, 10}, + {"single exact size tx", exactTxShareSize, 1}, + {"many exact size txs", exactTxShareSize, 100}, + } + + for _, tc := range tests { + tc := tc + + // run the tests with identically sized txs + t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) { + txs := testfactory.GenerateRandomTxs(tc.txCount, tc.txSize) + + shares, err := SplitTxs(txs) + require.NoError(t, err) + + parsedTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + + // run the same tests using randomly sized txs with caps of tc.txSize + t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { + txs := testfactory.GenerateRandomlySizedTxs(tc.txCount, tc.txSize) + + txShares, err := SplitTxs(txs) + require.NoError(t, err) + parsedTxs, err := parseCompactShares(txShares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical to the original + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + } +} + +func TestCompactShareContainsInfoByte(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(1, appconsts.ContinuationCompactShareContentSize/4) + + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Condition(t, func() bool { return len(shares) == 1 }) + + infoByte := shares[0].data[appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0] + + isSequenceStart := true + want, err := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + + require.NoError(t, err) + assert.Equal(t, byte(want), infoByte) +} + +func TestContiguousCompactShareContainsInfoByte(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(1, appconsts.ContinuationCompactShareContentSize*4) + + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Condition(t, func() bool { return len(shares) > 1 }) + + infoByte := shares[1].data[appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0] + + isSequenceStart := false + want, err := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + + require.NoError(t, err) + assert.Equal(t, byte(want), infoByte) +} + +func Test_parseCompactSharesErrors(t *testing.T) { + type testCase struct { + name string + shares []Share + } + + txs := testfactory.GenerateRandomTxs(2, appconsts.ContinuationCompactShareContentSize*4) + txShares, err := SplitTxs(txs) + require.NoError(t, err) + rawShares := ToBytes(txShares) + + unsupportedShareVersion := 5 + infoByte, _ := NewInfoByte(uint8(unsupportedShareVersion), true) + shareWithUnsupportedShareVersionBytes := rawShares[0] + shareWithUnsupportedShareVersionBytes[appconsts.NamespaceSize] = byte(infoByte) + + shareWithUnsupportedShareVersion, err := NewShare(shareWithUnsupportedShareVersionBytes) + if err != nil { + t.Fatal(err) + } + + testCases := []testCase{ + { + "share with start indicator false", + txShares[1:], // set the first share to the second share which has the start indicator set to false + }, + { + "share with unsupported share version", + []Share{*shareWithUnsupportedShareVersion}, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + _, err := parseCompactShares(tt.shares, appconsts.SupportedShareVersions) + assert.Error(t, err) + }) + } +} diff --git a/libs/shares/doc.go b/libs/shares/doc.go new file mode 100644 index 00000000000..2b5353fa573 --- /dev/null +++ b/libs/shares/doc.go @@ -0,0 +1,73 @@ +// Package shares provides primitives for splitting block data into shares and +// parsing shares back into block data. +// +// # Compact vs. Sparse +// +// There are two types of shares: +// 1. Compact +// 2. Sparse +// +// Compact shares can contain data from one or more unit (transactions or +// intermediate state roots). Sparse shares can contain data from zero or one +// blob. Compact shares and sparse shares are encoded differently. The +// motivation behind the distinction is that transactions and intermediate state +// roots are expected to have small lengths so they are encoded in compact +// shares to minimize the number of shares needed to store them. On the other +// hand, blobs are expected to be larger and have the desideratum that clients +// should be able to create proofs of blob inclusion. This desiradum is +// infeasible if client A's blob is encoded into a share with another client B's +// blob that is unknown to A. It follows that client A's blob is encoded into a +// share such that the contents can be determined by client A without any +// additional information. See [message layout rational] or +// [adr-006-non-interactive-defaults] for more details. +// +// # Universal Prefix +// +// Both types of shares have a universal prefix. The first 1 byte of a share +// contains the namespace version. The next 32 bytes contain the namespace ID. +// The next one byte contains an [InfoByte] that contains the +// share version and a sequence start indicator. If the sequence start indicator +// is `1` (i.e. this is the first share of a sequence) then the next 4 bytes +// contain a big endian uint32 of the sequence length. +// +// For the first share of a sequence: +// +// | namespace_version | namespace_id | info_byte | sequence_length | sequence_data | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | remaining bytes of share | +// +// For continuation share of a sequence: +// +// | namespace_version | namespace_id | info_byte | sequence_data | +// | 1 byte | 32 bytes | 1 byte | remaining bytes of share | +// +// The remaining bytes depend on the share type. +// +// # Compact Share Schema +// +// The four bytes after the universal prefix are reserved for +// the location in the share of the first unit of data that starts in this +// share. +// +// For the first compact share: +// +// | namespace_version | namespace_id | info_byte | sequence_length | location_of_first_unit | transactions or intermediate state roots | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | 4 bytes | remaining bytes of share | +// +// For continuation compact share: +// +// | namespace_version | namespace_id | info_byte | location_of_first_unit | transactions or intermediate state roots | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | remaining bytes of share | +// +// Notes +// - All shares in a reserved namespace belong to one sequence. +// - Each unit (transaction or intermediate state root) in data is prefixed with a varint of the length of the unit. +// +// # Sparse Share Schema +// +// The remaining bytes contain blob data. +// +// [message layout rational]: https://celestiaorg.github.io/celestia-specs/latest/rationale/message_block_layout.html#message-layout-rationale +// [adr-006-non-interactive-defaults]: https://github.com/celestiaorg/celestia-app/pull/673 +// +// [namespace.ID]: https://github.com/celestiaorg/nmt/blob/master/namespace/id.go +package shares diff --git a/libs/shares/info_byte.go b/libs/shares/info_byte.go new file mode 100644 index 00000000000..a57e295b7cb --- /dev/null +++ b/libs/shares/info_byte.go @@ -0,0 +1,43 @@ +package shares + +import ( + "fmt" + + "github.com/rollkit/rollkit/libs/appconsts" +) + +// InfoByte is a byte with the following structure: the first 7 bits are +// reserved for version information in big endian form (initially `0000000`). +// The last bit is a "sequence start indicator", that is `1` if this is the +// first share of a sequence and `0` if this is a continuation share. +type InfoByte byte + +func NewInfoByte(version uint8, isSequenceStart bool) (InfoByte, error) { + if version > appconsts.MaxShareVersion { + return 0, fmt.Errorf("version %d must be less than or equal to %d", version, appconsts.MaxShareVersion) + } + + prefix := version << 1 + if isSequenceStart { + return InfoByte(prefix + 1), nil + } + return InfoByte(prefix), nil +} + +// Version returns the version encoded in this InfoByte. Version is +// expected to be between 0 and appconsts.MaxShareVersion (inclusive). +func (i InfoByte) Version() uint8 { + version := uint8(i) >> 1 + return version +} + +// IsSequenceStart returns whether this share is the start of a sequence. +func (i InfoByte) IsSequenceStart() bool { + return uint(i)%2 == 1 +} + +func ParseInfoByte(i byte) (InfoByte, error) { + isSequenceStart := i%2 == 1 + version := uint8(i) >> 1 + return NewInfoByte(version, isSequenceStart) +} diff --git a/libs/shares/info_byte_test.go b/libs/shares/info_byte_test.go new file mode 100644 index 00000000000..ff02a548ade --- /dev/null +++ b/libs/shares/info_byte_test.go @@ -0,0 +1,103 @@ +package shares + +import "testing" + +func TestInfoByte(t *testing.T) { + blobStart := true + notBlobStart := false + + type testCase struct { + version uint8 + isSequenceStart bool + } + tests := []testCase{ + {0, blobStart}, + {1, blobStart}, + {2, blobStart}, + {127, blobStart}, + + {0, notBlobStart}, + {1, notBlobStart}, + {2, notBlobStart}, + {127, notBlobStart}, + } + + for _, test := range tests { + irb, err := NewInfoByte(test.version, test.isSequenceStart) + if err != nil { + t.Errorf("got %v want no error", err) + } + if got := irb.Version(); got != test.version { + t.Errorf("got version %v want %v", got, test.version) + } + if got := irb.IsSequenceStart(); got != test.isSequenceStart { + t.Errorf("got IsSequenceStart %v want %v", got, test.isSequenceStart) + } + } +} + +func TestInfoByteErrors(t *testing.T) { + blobStart := true + notBlobStart := false + + type testCase struct { + version uint8 + isSequenceStart bool + } + + tests := []testCase{ + {128, notBlobStart}, + {255, notBlobStart}, + {128, blobStart}, + {255, blobStart}, + } + + for _, test := range tests { + _, err := NewInfoByte(test.version, false) + if err == nil { + t.Errorf("got nil but want error when version > 127") + } + } +} + +func FuzzNewInfoByte(f *testing.F) { + f.Fuzz(func(t *testing.T, version uint8, isSequenceStart bool) { + if version > 127 { + t.Skip() + } + _, err := NewInfoByte(version, isSequenceStart) + if err != nil { + t.Errorf("got nil but want error when version > 127") + } + }) +} + +func TestParseInfoByte(t *testing.T) { + type testCase struct { + b byte + wantVersion uint8 + wantisSequenceStart bool + } + + tests := []testCase{ + {0b00000000, 0, false}, + {0b00000001, 0, true}, + {0b00000010, 1, false}, + {0b00000011, 1, true}, + {0b00000101, 2, true}, + {0b11111111, 127, true}, + } + + for _, test := range tests { + got, err := ParseInfoByte(test.b) + if err != nil { + t.Errorf("got %v want no error", err) + } + if got.Version() != test.wantVersion { + t.Errorf("got version %v want %v", got.Version(), test.wantVersion) + } + if got.IsSequenceStart() != test.wantisSequenceStart { + t.Errorf("got IsSequenceStart %v want %v", got.IsSequenceStart(), test.wantisSequenceStart) + } + } +} diff --git a/libs/shares/parse_compact_shares.go b/libs/shares/parse_compact_shares.go new file mode 100644 index 00000000000..32f959012f2 --- /dev/null +++ b/libs/shares/parse_compact_shares.go @@ -0,0 +1,83 @@ +package shares + +import "errors" + +// parseCompactShares returns data (transactions or intermediate state roots +// based on the contents of rawShares and supportedShareVersions. If rawShares +// contains a share with a version that isn't present in supportedShareVersions, +// an error is returned. The returned data [][]byte does not have namespaces, +// info bytes, data length delimiter, or unit length delimiters and are ready to +// be unmarshalled. +func parseCompactShares(shares []Share, supportedShareVersions []uint8) (data [][]byte, err error) { + if len(shares) == 0 { + return nil, nil + } + + seqStart, err := shares[0].IsSequenceStart() + if err != nil { + return nil, err + } + if !seqStart { + return nil, errors.New("first share is not the start of a sequence") + } + + err = validateShareVersions(shares, supportedShareVersions) + if err != nil { + return nil, err + } + + rawData, err := extractRawData(shares) + if err != nil { + return nil, err + } + + data, err = parseRawData(rawData) + if err != nil { + return nil, err + } + + return data, nil +} + +// validateShareVersions returns an error if the shares contain a share with an +// unsupported share version. Returns nil if all shares contain supported share +// versions. +func validateShareVersions(shares []Share, supportedShareVersions []uint8) error { + for i := 0; i < len(shares); i++ { + if err := shares[i].DoesSupportVersions(supportedShareVersions); err != nil { + return err + } + } + return nil +} + +// parseRawData returns the units (transactions, PFB transactions, intermediate +// state roots) contained in raw data by parsing the unit length delimiter +// prefixed to each unit. +func parseRawData(rawData []byte) (units [][]byte, err error) { + units = make([][]byte, 0) + for { + actualData, unitLen, err := ParseDelimiter(rawData) + if err != nil { + return nil, err + } + if unitLen == 0 { + return units, nil + } + rawData = actualData[unitLen:] + units = append(units, actualData[:unitLen]) + } +} + +// extractRawData returns the raw data contained in the shares. The raw data does +// not contain the namespace ID, info byte, sequence length, or reserved bytes. +func extractRawData(shares []Share) (rawData []byte, err error) { + for i := 0; i < len(shares); i++ { + raw, err := shares[i].RawData() + if err != nil { + return nil, err + } + rawData = append(rawData, raw...) + } + return rawData, nil +} diff --git a/libs/shares/reserved_bytes.go b/libs/shares/reserved_bytes.go new file mode 100644 index 00000000000..f5b1b0efd11 --- /dev/null +++ b/libs/shares/reserved_bytes.go @@ -0,0 +1,33 @@ +package shares + +import ( + "encoding/binary" + "fmt" + + "github.com/rollkit/rollkit/libs/appconsts" +) + +// NewReservedBytes returns a byte slice of length +// appconsts.CompactShareReservedBytes that contains the byteIndex of the first +// unit that starts in a compact share. +func NewReservedBytes(byteIndex uint32) ([]byte, error) { + if byteIndex >= appconsts.ShareSize { + return []byte{}, fmt.Errorf("byte index %d must be less than share size %d", byteIndex, appconsts.ShareSize) + } + reservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + binary.BigEndian.PutUint32(reservedBytes, byteIndex) + return reservedBytes, nil +} + +// ParseReservedBytes parses a byte slice of length +// appconsts.CompactShareReservedBytes into a byteIndex. +func ParseReservedBytes(reservedBytes []byte) (uint32, error) { + if len(reservedBytes) != appconsts.CompactShareReservedBytes { + return 0, fmt.Errorf("reserved bytes must be of length %d", appconsts.CompactShareReservedBytes) + } + byteIndex := binary.BigEndian.Uint32(reservedBytes) + if appconsts.ShareSize <= byteIndex { + return 0, fmt.Errorf("byteIndex must be less than share size %d", appconsts.ShareSize) + } + return byteIndex, nil +} diff --git a/libs/shares/reserved_bytes_test.go b/libs/shares/reserved_bytes_test.go new file mode 100644 index 00000000000..1723b358ae4 --- /dev/null +++ b/libs/shares/reserved_bytes_test.go @@ -0,0 +1,84 @@ +package shares + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseReservedBytes(t *testing.T) { + type testCase struct { + name string + input []byte + want uint32 + expectErr bool + } + testCases := []testCase{ + {"byte index of 0", []byte{0, 0, 0, 0}, 0, false}, + {"byte index of 2", []byte{0, 0, 0, 2}, 2, false}, + {"byte index of 4", []byte{0, 0, 0, 4}, 4, false}, + {"byte index of 8", []byte{0, 0, 0, 8}, 8, false}, + {"byte index of 16", []byte{0, 0, 0, 16}, 16, false}, + {"byte index of 32", []byte{0, 0, 0, 32}, 32, false}, + {"byte index of 64", []byte{0, 0, 0, 64}, 64, false}, + {"byte index of 128", []byte{0, 0, 0, 128}, 128, false}, + {"byte index of 256", []byte{0, 0, 1, 0}, 256, false}, + {"byte index of 511", []byte{0, 0, 1, 255}, 511, false}, + + // error cases + {"empty", []byte{}, 0, true}, + {"too few reserved bytes", []byte{1}, 0, true}, + {"another case of too few reserved bytes", []byte{3, 3, 3}, 0, true}, + {"too many bytes", []byte{0, 0, 0, 0, 0}, 0, true}, + {"too high of a byte index", []byte{0, 0, 3, 232}, 0, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := ParseReservedBytes(tc.input) + if tc.expectErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestNewReservedBytes(t *testing.T) { + type testCase struct { + name string + input uint32 + want []byte + expectErr bool + } + testCases := []testCase{ + {"byte index of 0", 0, []byte{0, 0, 0, 0}, false}, + {"byte index of 2", 2, []byte{0, 0, 0, 2}, false}, + {"byte index of 4", 4, []byte{0, 0, 0, 4}, false}, + {"byte index of 8", 8, []byte{0, 0, 0, 8}, false}, + {"byte index of 16", 16, []byte{0, 0, 0, 16}, false}, + {"byte index of 32", 32, []byte{0, 0, 0, 32}, false}, + {"byte index of 64", 64, []byte{0, 0, 0, 64}, false}, + {"byte index of 128", 128, []byte{0, 0, 0, 128}, false}, + {"byte index of 256", 256, []byte{0, 0, 1, 0}, false}, + {"byte index of 511", 511, []byte{0, 0, 1, 255}, false}, + + // error cases + {"byte index of 512 is equal to share size", 512, []byte{}, true}, + {"byte index of 1000 is greater than share size", 1000, []byte{}, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NewReservedBytes(tc.input) + if tc.expectErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/libs/shares/share_builder.go b/libs/shares/share_builder.go new file mode 100644 index 00000000000..02affdffead --- /dev/null +++ b/libs/shares/share_builder.go @@ -0,0 +1,227 @@ +package shares + +import ( + "encoding/binary" + "errors" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +type Builder struct { + namespace appns.Namespace + shareVersion uint8 + isFirstShare bool + isCompactShare bool + rawShareData []byte +} + +func NewEmptyBuilder() *Builder { + return &Builder{ + rawShareData: make([]byte, 0, appconsts.ShareSize), + } +} + +// Init() needs to be called right after this method +func NewBuilder(ns appns.Namespace, shareVersion uint8, isFirstShare bool) *Builder { + return &Builder{ + namespace: ns, + shareVersion: shareVersion, + isFirstShare: isFirstShare, + isCompactShare: isCompactShare(ns), + } +} + +func (b *Builder) Init() (*Builder, error) { + if b.isCompactShare { + if err := b.prepareCompactShare(); err != nil { + return nil, err + } + } else { + if err := b.prepareSparseShare(); err != nil { + return nil, err + } + } + + return b, nil +} + +func (b *Builder) AvailableBytes() int { + return appconsts.ShareSize - len(b.rawShareData) +} + +func (b *Builder) ImportRawShare(rawBytes []byte) *Builder { + b.rawShareData = rawBytes + return b +} + +func (b *Builder) AddData(rawData []byte) (rawDataLeftOver []byte) { + // find the len left in the pending share + pendingLeft := appconsts.ShareSize - len(b.rawShareData) + + // if we can simply add the tx to the share without creating a new + // pending share, do so and return + if len(rawData) <= pendingLeft { + b.rawShareData = append(b.rawShareData, rawData...) + return nil + } + + // if we can only add a portion of the rawData to the pending share, + // then we add it and add the pending share to the finalized shares. + chunk := rawData[:pendingLeft] + b.rawShareData = append(b.rawShareData, chunk...) + + // We need to finish this share and start a new one + // so we return the leftover to be written into a new share + return rawData[pendingLeft:] +} + +func (b *Builder) Build() (*Share, error) { + return NewShare(b.rawShareData) +} + +// IsEmptyShare returns true if no data has been written to the share +func (b *Builder) IsEmptyShare() bool { + expectedLen := appconsts.NamespaceSize + appconsts.ShareInfoBytes + if b.isCompactShare { + expectedLen += appconsts.CompactShareReservedBytes + } + if b.isFirstShare { + expectedLen += appconsts.SequenceLenBytes + } + return len(b.rawShareData) == expectedLen +} + +func (b *Builder) ZeroPadIfNecessary() (bytesOfPadding int) { + b.rawShareData, bytesOfPadding = zeroPadIfNecessary(b.rawShareData, appconsts.ShareSize) + return bytesOfPadding +} + +// isEmptyReservedBytes returns true if the reserved bytes are empty. +func (b *Builder) isEmptyReservedBytes() (bool, error) { + indexOfReservedBytes := b.indexOfReservedBytes() + reservedBytes, err := ParseReservedBytes(b.rawShareData[indexOfReservedBytes : indexOfReservedBytes+appconsts.CompactShareReservedBytes]) + if err != nil { + return false, err + } + return reservedBytes == 0, nil +} + +// indexOfReservedBytes returns the index of the reserved bytes in the share. +func (b *Builder) indexOfReservedBytes() int { + if b.isFirstShare { + // if the share is the first share, the reserved bytes follow the namespace, info byte, and sequence length + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + } + // if the share is not the first share, the reserved bytes follow the namespace and info byte + return appconsts.NamespaceSize + appconsts.ShareInfoBytes +} + +// indexOfInfoBytes returns the index of the InfoBytes. +func (b *Builder) indexOfInfoBytes() int { + // the info byte is immediately after the namespace + return appconsts.NamespaceSize +} + +// MaybeWriteReservedBytes will be a no-op if the reserved bytes +// have already been populated. If the reserved bytes are empty, it will write +// the location of the next unit of data to the reserved bytes. +func (b *Builder) MaybeWriteReservedBytes() error { + if !b.isCompactShare { + return errors.New("this is not a compact share") + } + + empty, err := b.isEmptyReservedBytes() + if err != nil { + return err + } + if !empty { + return nil + } + + byteIndexOfNextUnit := len(b.rawShareData) + reservedBytes, err := NewReservedBytes(uint32(byteIndexOfNextUnit)) + if err != nil { + return err + } + + indexOfReservedBytes := b.indexOfReservedBytes() + // overwrite the reserved bytes of the pending share + for i := 0; i < appconsts.CompactShareReservedBytes; i++ { + b.rawShareData[indexOfReservedBytes+i] = reservedBytes[i] + } + return nil +} + +// writeSequenceLen writes the sequence length to the first share. +func (b *Builder) WriteSequenceLen(sequenceLen uint32) error { + if b == nil { + return errors.New("the builder object is not initialized (is nil)") + } + if !b.isFirstShare { + return errors.New("not the first share") + } + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + + for i := 0; i < appconsts.SequenceLenBytes; i++ { + b.rawShareData[appconsts.NamespaceSize+appconsts.ShareInfoBytes+i] = sequenceLenBuf[i] + } + + return nil +} + +// FlipSequenceStart flips the sequence start indicator of the share provided +func (b *Builder) FlipSequenceStart() { + infoByteIndex := b.indexOfInfoBytes() + + // the sequence start indicator is the last bit of the info byte so flip the + // last bit + b.rawShareData[infoByteIndex] = b.rawShareData[infoByteIndex] ^ 0x01 +} + +func (b *Builder) prepareCompactShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + placeholderReservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + shareData = append(shareData, placeholderReservedBytes...) + + b.rawShareData = shareData + + return nil +} + +func (b *Builder) prepareSparseShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + b.rawShareData = shareData + return nil +} + +func isCompactShare(ns appns.Namespace) bool { + return ns.IsTx() || ns.IsPayForBlob() +} diff --git a/libs/shares/share_builder_test.go b/libs/shares/share_builder_test.go new file mode 100644 index 00000000000..6187637bdf7 --- /dev/null +++ b/libs/shares/share_builder_test.go @@ -0,0 +1,321 @@ +package shares + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +func TestShareBuilderIsEmptyShare(t *testing.T) { + type testCase struct { + name string + builder *Builder + data []byte // input data + want bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "first compact share empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: nil, + want: true, + }, + { + name: "first compact share not empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "first sparse share empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: nil, + want: true, + }, + { + name: "first sparse share not empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues compact share empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: nil, + want: true, + }, + { + name: "continues compact share not empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues sparse share not empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues sparse share empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: nil, + want: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + tc.builder.AddData(tc.data) + assert.Equal(t, tc.want, tc.builder.IsEmptyShare()) + }) + } +} + +func TestShareBuilderWriteSequenceLen(t *testing.T) { + type testCase struct { + name string + builder *Builder + wantLen uint32 + wantErr bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "first share", + builder: NewBuilder(ns1, 1, true), + wantLen: 10, + wantErr: false, + }, + { + name: "first share with long sequence", + builder: NewBuilder(ns1, 1, true), + wantLen: 323, + wantErr: false, + }, + { + name: "continuation sparse share", + builder: NewBuilder(ns1, 1, false), + wantLen: 10, + wantErr: true, + }, + { + name: "compact share", + builder: NewBuilder(appns.TxNamespace, 1, true), + wantLen: 10, + wantErr: false, + }, + { + name: "continuation compact share", + builder: NewBuilder(ns1, 1, false), + wantLen: 10, + wantErr: true, + }, + { + name: "nil builder", + builder: &Builder{}, + wantLen: 10, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + if err := tc.builder.WriteSequenceLen(tc.wantLen); tc.wantErr { + assert.Error(t, err) + return + } + + tc.builder.ZeroPadIfNecessary() + share, err := tc.builder.Build() + require.NoError(t, err) + + len, err := share.SequenceLen() + require.NoError(t, err) + + assert.Equal(t, tc.wantLen, len) + }) + } +} + +func TestShareBuilderAddData(t *testing.T) { + type testCase struct { + name string + builder *Builder + data []byte // input data + want []byte + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "small share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: nil, + }, + { + name: "exact fit first compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.ShareInfoBytes-appconsts.CompactShareReservedBytes-appconsts.SequenceLenBytes), + want: nil, + }, + { + name: "exact fit first sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: nil, + }, + { + name: "exact fit continues compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-1 /*1 = info byte*/), + want: nil, + }, + { + name: "exact fit continues sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-1 /*1 = info byte*/), + want: nil, + }, + { + name: "oversize first compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize first sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize continues compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize continues sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-1 /*1 = info byte*/), + want: []byte{1}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + + got := tc.builder.AddData(tc.data) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestShareBuilderImportRawData(t *testing.T) { + type testCase struct { + name string + shareBytes []byte + want []byte + wantErr bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + firstSparseShare := append(ns1.Bytes(), []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + continuationSparseShare := append(ns1.Bytes(), []byte{ + 0, // info byte + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + firstCompactShare := append(appns.TxNamespace.Bytes(), []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 0, 0, 0, 15, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + continuationCompactShare := append(appns.TxNamespace.Bytes(), []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + oversizedImport := append( + append( + ns1.Bytes(), + []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + }...), bytes.Repeat([]byte{1}, 513)...) // data + + testCases := []testCase{ + { + name: "first sparse share", + shareBytes: firstSparseShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation sparse share", + shareBytes: continuationSparseShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "first compact share", + shareBytes: firstCompactShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation compact share", + shareBytes: continuationCompactShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "oversized import", + shareBytes: oversizedImport, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := NewEmptyBuilder().ImportRawShare(tc.shareBytes) + b.ZeroPadIfNecessary() + builtShare, err := b.Build() + if tc.wantErr { + assert.Error(t, err) + return + } + + rawData, err := builtShare.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + // Since rawData has padding, we need to use contains + if !bytes.Contains(rawData, tc.want) { + t.Errorf(fmt.Sprintf("%#v does not contain %#v", rawData, tc.want)) + } + }) + } +} diff --git a/libs/shares/share_sequence.go b/libs/shares/share_sequence.go new file mode 100644 index 00000000000..cb9ac2e5009 --- /dev/null +++ b/libs/shares/share_sequence.go @@ -0,0 +1,84 @@ +package shares + +import ( + "fmt" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +// ShareSequence represents a contiguous sequence of shares that are part of the +// same namespace and blob. For compact shares, one share sequence exists per +// reserved namespace. For sparse shares, one share sequence exists per blob. +type ShareSequence struct { + Namespace appns.Namespace + Shares []Share +} + +// RawData returns the raw share data of this share sequence. The raw data does +// not contain the namespace ID, info byte, sequence length, or reserved bytes. +func (s ShareSequence) RawData() (data []byte, err error) { + for _, share := range s.Shares { + raw, err := share.RawData() + if err != nil { + return []byte{}, err + } + data = append(data, raw...) + } + + sequenceLen, err := s.SequenceLen() + if err != nil { + return []byte{}, err + } + // trim any padding that may have been added to the last share + return data[:sequenceLen], nil +} + +func (s ShareSequence) SequenceLen() (uint32, error) { + if len(s.Shares) == 0 { + return 0, fmt.Errorf("invalid sequence length because share sequence %v has no shares", s) + } + firstShare := s.Shares[0] + return firstShare.SequenceLen() +} + +// CompactSharesNeeded returns the number of compact shares needed to store a +// sequence of length sequenceLen. The parameter sequenceLen is the number +// of bytes of transactions or intermediate state roots in a sequence. +func CompactSharesNeeded(sequenceLen int) (sharesNeeded int) { + if sequenceLen == 0 { + return 0 + } + + if sequenceLen < appconsts.FirstCompactShareContentSize { + return 1 + } + + bytesAvailable := appconsts.FirstCompactShareContentSize + sharesNeeded++ + for bytesAvailable < sequenceLen { + bytesAvailable += appconsts.ContinuationCompactShareContentSize + sharesNeeded++ + } + return sharesNeeded +} + +// SparseSharesNeeded returns the number of shares needed to store a sequence of +// length sequenceLen. +func SparseSharesNeeded(sequenceLen uint32) (sharesNeeded int) { + if sequenceLen == 0 { + return 0 + } + + if sequenceLen < appconsts.FirstSparseShareContentSize { + return 1 + } + + bytesAvailable := appconsts.FirstSparseShareContentSize + sharesNeeded++ + for uint32(bytesAvailable) < sequenceLen { + bytesAvailable += appconsts.ContinuationSparseShareContentSize + sharesNeeded++ + } + return sharesNeeded +} diff --git a/libs/shares/share_sequence_test.go b/libs/shares/share_sequence_test.go new file mode 100644 index 00000000000..869433f2892 --- /dev/null +++ b/libs/shares/share_sequence_test.go @@ -0,0 +1,138 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +func TestShareSequenceRawData(t *testing.T) { + type testCase struct { + name string + shareSequence ShareSequence + want []byte + wantErr bool + } + blobNamespace := appns.RandomBlobNamespace() + + testCases := []testCase{ + { + name: "empty share sequence", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{}, + }, + want: []byte{}, + wantErr: false, + }, + { + name: "one empty share", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, 0, []byte{}), + }, + }, + want: []byte{}, + wantErr: false, + }, + { + name: "one share with one byte", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, 1, []byte{0x0f}), + }, + }, + want: []byte{0xf}, + wantErr: false, + }, + { + name: "removes padding from last share", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, appconsts.FirstSparseShareContentSize+1, bytes.Repeat([]byte{0xf}, appconsts.FirstSparseShareContentSize)), + shareWithData(blobNamespace, false, 0, []byte{0x0f}), + }, + }, + want: bytes.Repeat([]byte{0xf}, appconsts.FirstSparseShareContentSize+1), + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.shareSequence.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tc.want, got) + }) + } +} + +func Test_compactSharesNeeded(t *testing.T) { + type testCase struct { + sequenceLen int + want int + } + testCases := []testCase{ + {0, 0}, + {1, 1}, + {2, 1}, + {appconsts.FirstCompactShareContentSize, 1}, + {appconsts.FirstCompactShareContentSize + 1, 2}, + {appconsts.FirstCompactShareContentSize + appconsts.ContinuationCompactShareContentSize, 2}, + {appconsts.FirstCompactShareContentSize + appconsts.ContinuationCompactShareContentSize*100, 101}, + } + for _, tc := range testCases { + got := CompactSharesNeeded(tc.sequenceLen) + assert.Equal(t, tc.want, got) + } +} + +func Test_sparseSharesNeeded(t *testing.T) { + type testCase struct { + sequenceLen uint32 + want int + } + testCases := []testCase{ + {0, 0}, + {1, 1}, + {2, 1}, + {appconsts.FirstSparseShareContentSize, 1}, + {appconsts.FirstSparseShareContentSize + 1, 2}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationSparseShareContentSize, 2}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationCompactShareContentSize*2, 3}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationCompactShareContentSize*99, 100}, + {1000, 3}, + {10000, 21}, + {100000, 210}, + } + for _, tc := range testCases { + got := SparseSharesNeeded(tc.sequenceLen) + assert.Equal(t, tc.want, got) + } +} + +func shareWithData(namespace appns.Namespace, isSequenceStart bool, sequenceLen uint32, data []byte) (rawShare Share) { + infoByte, _ := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + rawShareBytes := make([]byte, 0, appconsts.ShareSize) + rawShareBytes = append(rawShareBytes, namespace.Bytes()...) + rawShareBytes = append(rawShareBytes, byte(infoByte)) + if isSequenceStart { + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + rawShareBytes = append(rawShareBytes, sequenceLenBuf...) + } + rawShareBytes = append(rawShareBytes, data...) + + return padShare(Share{data: rawShareBytes}) +} diff --git a/libs/shares/shares.go b/libs/shares/shares.go new file mode 100644 index 00000000000..a8eccc067ae --- /dev/null +++ b/libs/shares/shares.go @@ -0,0 +1,213 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +// Share contains the raw share data (including namespace ID). +type Share struct { + data []byte +} + +func (s *Share) Namespace() (appns.Namespace, error) { + if len(s.data) < appns.NamespaceSize { + panic(fmt.Sprintf("share %s is too short to contain a namespace", s)) + } + return appns.From(s.data[:appns.NamespaceSize]) +} + +func (s *Share) InfoByte() (InfoByte, error) { + if len(s.data) < appns.NamespaceSize+appconsts.ShareInfoBytes { + return 0, fmt.Errorf("share %s is too short to contain an info byte", s) + } + // the info byte is the first byte after the namespace + unparsed := s.data[appns.NamespaceSize] + return ParseInfoByte(unparsed) +} + +func NewShare(data []byte) (*Share, error) { + if err := validateSize(data); err != nil { + return nil, err + } + return &Share{data}, nil +} + +func (s *Share) Validate() error { + return validateSize(s.data) +} + +func validateSize(data []byte) error { + if len(data) != appconsts.ShareSize { + return fmt.Errorf("share data must be %d bytes, got %d", appconsts.ShareSize, len(data)) + } + return nil +} + +func (s *Share) Len() int { + return len(s.data) +} + +func (s *Share) Version() (uint8, error) { + infoByte, err := s.InfoByte() + if err != nil { + return 0, err + } + return infoByte.Version(), nil +} + +func (s *Share) DoesSupportVersions(supportedShareVersions []uint8) error { + ver, err := s.Version() + if err != nil { + return err + } + if !bytes.Contains(supportedShareVersions, []byte{ver}) { + return fmt.Errorf("unsupported share version %v is not present in the list of supported share versions %v", ver, supportedShareVersions) + } + return nil +} + +// IsSequenceStart returns true if this is the first share in a sequence. +func (s *Share) IsSequenceStart() (bool, error) { + infoByte, err := s.InfoByte() + if err != nil { + return false, err + } + return infoByte.IsSequenceStart(), nil +} + +// IsCompactShare returns true if this is a compact share. +func (s Share) IsCompactShare() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + isCompact := ns.IsTx() || ns.IsPayForBlob() + return isCompact, nil +} + +// SequenceLen returns the sequence length of this *share and optionally an +// error. It returns 0, nil if this is a continuation share (i.e. doesn't +// contain a sequence length). +func (s *Share) SequenceLen() (sequenceLen uint32, err error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return 0, err + } + if !isSequenceStart { + return 0, nil + } + + start := appconsts.NamespaceSize + appconsts.ShareInfoBytes + end := start + appconsts.SequenceLenBytes + if len(s.data) < end { + return 0, fmt.Errorf("share %s with length %d is too short to contain a sequence length", + s, len(s.data)) + } + return binary.BigEndian.Uint32(s.data[start:end]), nil +} + +// IsPadding returns whether this *share is padding or not. +func (s *Share) IsPadding() (bool, error) { + isNamespacePadding, err := s.isNamespacePadding() + if err != nil { + return false, err + } + isTailPadding, err := s.isTailPadding() + if err != nil { + return false, err + } + isReservedPadding, err := s.isReservedPadding() + if err != nil { + return false, err + } + return isNamespacePadding || isTailPadding || isReservedPadding, nil +} + +func (s *Share) isNamespacePadding() (bool, error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return false, err + } + sequenceLen, err := s.SequenceLen() + if err != nil { + return false, err + } + + return isSequenceStart && sequenceLen == 0, nil +} + +func (s *Share) isTailPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsTailPadding(), nil +} + +func (s *Share) isReservedPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsReservedPadding(), nil +} + +func (s *Share) ToBytes() []byte { + return s.data +} + +// RawData returns the raw share data. The raw share data does not contain the +// namespace ID, info byte, sequence length, or reserved bytes. +func (s *Share) RawData() (rawData []byte, err error) { + if len(s.data) < s.rawDataStartIndex() { + return rawData, fmt.Errorf("share %s is too short to contain raw data", s) + } + + return s.data[s.rawDataStartIndex():], nil +} + +func (s *Share) rawDataStartIndex() int { + isStart, err := s.IsSequenceStart() + if err != nil { + panic(err) + } + isCompact, err := s.IsCompactShare() + if err != nil { + panic(err) + } + if isStart && isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + appconsts.CompactShareReservedBytes + } else if isStart && !isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + } else if !isStart && isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.CompactShareReservedBytes + } else if !isStart && !isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + } else { + panic(fmt.Sprintf("unable to determine the rawDataStartIndex for share %s", s.data)) + } +} + +func ToBytes(shares []Share) (bytes [][]byte) { + bytes = make([][]byte, len(shares)) + for i, share := range shares { + bytes[i] = []byte(share.data) + } + return bytes +} + +func FromBytes(bytes [][]byte) (shares []Share, err error) { + for _, b := range bytes { + share, err := NewShare(b) + if err != nil { + return nil, err + } + shares = append(shares, *share) + } + return shares, nil +} diff --git a/libs/shares/shares_test.go b/libs/shares/shares_test.go new file mode 100644 index 00000000000..412c2d57e57 --- /dev/null +++ b/libs/shares/shares_test.go @@ -0,0 +1,323 @@ +package shares + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +// // TestPadFirstIndexedBlob ensures that we are adding padding to the first share +// // instead of calculating the value. +// func TestPadFirstIndexedBlob(t *testing.T) { +// tx := tmrand.Bytes(300) +// blob := tmrand.Bytes(300) +// index := 100 +// indexedTx, err := coretypes.MarshalIndexWrapper(tx, 100) +// require.NoError(t, err) + +// bd := coretypes.Data{ +// Txs: []coretypes.Tx{indexedTx}, +// Blobs: []coretypes.Blob{ +// { +// NamespaceVersion: appns.RandomBlobNamespace().Version, +// NamespaceID: appns.RandomBlobNamespace().ID, +// Data: blob, +// ShareVersion: appconsts.ShareVersionZero, +// }, +// }, +// SquareSize: 64, +// } + +// shares, err := Split(bd, true) +// require.NoError(t, err) + +// resShare, err := shares[index].RawData() +// require.NoError(t, err) + +// require.True(t, bytes.Contains(resShare, blob)) +// } + +func TestSequenceLen(t *testing.T) { + type testCase struct { + name string + share Share + wantLen uint32 + wantErr bool + } + sparseNamespaceID := bytes.Repeat([]byte{1}, appconsts.NamespaceSize) + firstShare := append(sparseNamespaceID, + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + firstShareWithLongSequence := append(sparseNamespaceID, + []byte{ + 1, // info byte + 0, 0, 1, 67, // sequence len + }...) + continuationShare := append(sparseNamespaceID, + []byte{ + 0, // info byte + }...) + compactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + }...) + noInfoByte := appns.TxNamespace.Bytes() + noSequenceLen := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + }...) + testCases := []testCase{ + { + name: "first share", + share: Share{data: firstShare}, + wantLen: 10, + wantErr: false, + }, + { + name: "first share with long sequence", + share: Share{data: firstShareWithLongSequence}, + wantLen: 323, + wantErr: false, + }, + { + name: "continuation share", + share: Share{data: continuationShare}, + wantLen: 0, + wantErr: false, + }, + { + name: "compact share", + share: Share{data: compactShare}, + wantLen: 10, + wantErr: false, + }, + { + name: "no info byte returns error", + share: Share{data: noInfoByte}, + wantLen: 0, + wantErr: true, + }, + { + name: "no sequence len returns error", + share: Share{data: noSequenceLen}, + wantLen: 0, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + len, err := tc.share.SequenceLen() + + if tc.wantErr { + assert.Error(t, err) + return + } + if tc.wantLen != len { + t.Errorf("want %d, got %d", tc.wantLen, len) + } + }) + } +} + +func TestRawData(t *testing.T) { + type testCase struct { + name string + share Share + want []byte + wantErr bool + } + sparseNamespaceID := appns.MustNewV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)) + firstSparseShare := append( + sparseNamespaceID.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + continuationSparseShare := append( + sparseNamespaceID.Bytes(), + []byte{ + 0, // info byte + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + firstCompactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 0, 0, 0, 15, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + continuationCompactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + noSequenceLen := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + }...) + notEnoughSequenceLenBytes := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 10, // sequence len + }...) + testCases := []testCase{ + { + name: "first sparse share", + share: Share{data: firstSparseShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation sparse share", + share: Share{data: continuationSparseShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "first compact share", + share: Share{data: firstCompactShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation compact share", + share: Share{data: continuationCompactShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "no sequence len returns error", + share: Share{data: noSequenceLen}, + wantErr: true, + }, + { + name: "not enough sequence len bytes returns error", + share: Share{data: notEnoughSequenceLenBytes}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rawData, err := tc.share.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tc.want, rawData) + }) + } +} + +func TestIsCompactShare(t *testing.T) { + type testCase struct { + name string + share Share + want bool + } + + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + txShare, _ := zeroPadIfNecessary(appns.TxNamespace.Bytes(), appconsts.ShareSize) + pfbTxShare, _ := zeroPadIfNecessary(appns.PayForBlobNamespace.Bytes(), appconsts.ShareSize) + blobShare, _ := zeroPadIfNecessary(ns1.Bytes(), appconsts.ShareSize) + + testCases := []testCase{ + { + name: "tx share", + share: Share{data: txShare}, + want: true, + }, + { + name: "pfb tx share", + share: Share{data: pfbTxShare}, + want: true, + }, + { + name: "blob share", + share: Share{data: blobShare}, + want: false, + }, + } + + for _, tc := range testCases { + got, err := tc.share.IsCompactShare() + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + } +} + +// func TestIsPadding(t *testing.T) { +// type testCase struct { +// name string +// share Share +// want bool +// wantErr bool +// } +// emptyShare := Share{} +// blobShare, _ := zeroPadIfNecessary( +// append( +// ns1.Bytes(), +// []byte{ +// 1, // info byte +// 0, 0, 0, 1, // sequence len +// 0xff, // data +// }..., +// ), +// appconsts.ShareSize) + +// nsPadding, err := NamespacePaddingShare(ns1) +// require.NoError(t, err) + +// tailPadding, err := TailPaddingShare() +// require.NoError(t, err) + +// reservedPaddingShare, err := ReservedPaddingShare() +// require.NoError(t, err) + +// testCases := []testCase{ +// { +// name: "empty share", +// share: emptyShare, +// wantErr: true, +// }, +// { +// name: "blob share", +// share: Share{data: blobShare}, +// want: false, +// }, +// { +// name: "namespace padding", +// share: nsPadding, +// want: true, +// }, +// { +// name: "tail padding", +// share: tailPadding, +// want: true, +// }, +// { +// name: "reserved padding", +// share: reservedPaddingShare, +// want: true, +// }, +// } + +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// got, err := tc.share.IsPadding() +// if tc.wantErr { +// assert.Error(t, err) +// return +// } +// require.NoError(t, err) +// assert.Equal(t, tc.want, got) +// }) +// } +// } diff --git a/libs/shares/split_compact_shares.go b/libs/shares/split_compact_shares.go new file mode 100644 index 00000000000..41b80e5eae5 --- /dev/null +++ b/libs/shares/split_compact_shares.go @@ -0,0 +1,233 @@ +package shares + +import ( + "encoding/binary" + "fmt" + + coretypes "github.com/tendermint/tendermint/types" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +type ShareRange struct { + // Start is the index of the first share occupied by this range. + Start int + // End is the index of the last share occupied by this range. + End int +} + +// CompactShareSplitter will write raw data compactly across a progressively +// increasing set of shares. It is used to lazily split block data such as +// transactions or intermediate state roots into shares. +type CompactShareSplitter struct { + shares []Share + // pendingShare Share + shareBuilder *Builder + namespace appns.Namespace + done bool + shareVersion uint8 + // shareRanges is a map from a transaction key to the range of shares it + // occupies. The range assumes this compact share splitter is the only + // thing in the data square (e.g. the range for the first tx starts at index + // 0). + shareRanges map[coretypes.TxKey]ShareRange +} + +// NewCompactShareSplitter returns a CompactShareSplitter using the provided +// namespace and shareVersion. +func NewCompactShareSplitter(ns appns.Namespace, shareVersion uint8) *CompactShareSplitter { + sb, err := NewBuilder(ns, shareVersion, true).Init() + if err != nil { + panic(err) + } + + return &CompactShareSplitter{ + shares: []Share{}, + namespace: ns, + shareVersion: shareVersion, + shareRanges: map[coretypes.TxKey]ShareRange{}, + shareBuilder: sb, + } +} + +// WriteTx adds the delimited data for the provided tx to the underlying compact +// share splitter. +func (css *CompactShareSplitter) WriteTx(tx coretypes.Tx) error { + rawData, err := MarshalDelimitedTx(tx) + if err != nil { + return fmt.Errorf("included Tx in mem-pool that can not be encoded %v", tx) + } + + startShare := len(css.shares) + + if err := css.write(rawData); err != nil { + return err + } + endShare := css.Count() - 1 + + css.shareRanges[tx.Key()] = ShareRange{ + Start: startShare, + End: endShare, + } + return nil +} + +// write adds the delimited data to the underlying compact shares. +func (css *CompactShareSplitter) write(rawData []byte) error { + if css.done { + // remove the last element + if !css.shareBuilder.IsEmptyShare() { + css.shares = css.shares[:len(css.shares)-1] + } + css.done = false + } + + if err := css.shareBuilder.MaybeWriteReservedBytes(); err != nil { + return err + } + + for { + rawDataLeftOver := css.shareBuilder.AddData(rawData) + if rawDataLeftOver == nil { + break + } + if err := css.stackPending(); err != nil { + return err + } + + rawData = rawDataLeftOver + } + + if css.shareBuilder.AvailableBytes() == 0 { + if err := css.stackPending(); err != nil { + return err + } + } + return nil +} + +// stackPending will build & add the pending share to accumulated shares +func (css *CompactShareSplitter) stackPending() error { + pendingShare, err := css.shareBuilder.Build() + if err != nil { + return err + } + css.shares = append(css.shares, *pendingShare) + + // Now we need to create a new builder + css.shareBuilder, err = NewBuilder(css.namespace, css.shareVersion, false).Init() + return err +} + +// Export finalizes and returns the underlying compact shares and a map of +// shareRanges. All share ranges in the map of shareRanges will be offset (i.e. +// incremented) by the shareRangeOffset provided. shareRangeOffset should be 0 +// for the first compact share sequence in the data square (transactions) but +// should be some non-zero number for subsequent compact share sequences (e.g. +// pfb txs). +func (css *CompactShareSplitter) Export(shareRangeOffset int) ([]Share, map[coretypes.TxKey]ShareRange, error) { + // apply the shareRangeOffset to all share ranges + shareRanges := make(map[coretypes.TxKey]ShareRange, len(css.shareRanges)) + + if css.isEmpty() { + return []Share{}, shareRanges, nil + } + + for k, v := range css.shareRanges { + shareRanges[k] = ShareRange{ + Start: v.Start + shareRangeOffset, + End: v.End + shareRangeOffset, + } + } + + // in case Export is called multiple times + if css.done { + return css.shares, shareRanges, nil + } + + var bytesOfPadding int + // add the pending share to the current shares before returning + if !css.shareBuilder.IsEmptyShare() { + bytesOfPadding = css.shareBuilder.ZeroPadIfNecessary() + if err := css.stackPending(); err != nil { + return []Share{}, shareRanges, err + } + } + + sequenceLen := css.sequenceLen(bytesOfPadding) + if err := css.writeSequenceLen(sequenceLen); err != nil { + return []Share{}, shareRanges, err + } + css.done = true + return css.shares, shareRanges, nil +} + +// writeSequenceLen writes the sequence length to the first share. +func (css *CompactShareSplitter) writeSequenceLen(sequenceLen uint32) error { + if css.isEmpty() { + return nil + } + + // We may find a more efficient way to write seqLen + b, err := NewBuilder(css.namespace, css.shareVersion, true).Init() + if err != nil { + return err + } + b.ImportRawShare(css.shares[0].ToBytes()) + if err := b.WriteSequenceLen(sequenceLen); err != nil { + return err + } + + firstShare, err := b.Build() + if err != nil { + return err + } + + // replace existing first share with new first share + css.shares[0] = *firstShare + + return nil +} + +// sequenceLen returns the total length in bytes of all units (transactions or +// intermediate state roots) written to this splitter. sequenceLen does not +// include the number of bytes occupied by the namespace ID, the share info +// byte, or the reserved bytes. sequenceLen does include the unit length +// delimiter prefixed to each unit. +func (css *CompactShareSplitter) sequenceLen(bytesOfPadding int) uint32 { + if len(css.shares) == 0 { + return 0 + } + if len(css.shares) == 1 { + return uint32(appconsts.FirstCompactShareContentSize) - uint32(bytesOfPadding) + } + + continuationSharesCount := len(css.shares) - 1 + continuationSharesSequenceLen := continuationSharesCount * appconsts.ContinuationCompactShareContentSize + return uint32(appconsts.FirstCompactShareContentSize + continuationSharesSequenceLen - bytesOfPadding) +} + +// isEmpty returns whether this compact share splitter is empty. +func (css *CompactShareSplitter) isEmpty() bool { + return len(css.shares) == 0 && css.shareBuilder.IsEmptyShare() +} + +// Count returns the number of shares that would be made if `Export` was invoked +// on this compact share splitter. +func (css *CompactShareSplitter) Count() (shareCount int) { + if !css.shareBuilder.IsEmptyShare() && !css.done { + // pending share is non-empty, so it will be zero padded and added to shares during export + return len(css.shares) + 1 + } + return len(css.shares) +} + +// MarshalDelimitedTx prefixes a transaction with the length of the transaction +// encoded as a varint. +func MarshalDelimitedTx(tx coretypes.Tx) ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(tx)) + n := binary.PutUvarint(lenBuf, length) + return append(lenBuf[:n], tx...), nil +} diff --git a/libs/shares/split_compact_shares_test.go b/libs/shares/split_compact_shares_test.go new file mode 100644 index 00000000000..2bb77a4c004 --- /dev/null +++ b/libs/shares/split_compact_shares_test.go @@ -0,0 +1,394 @@ +package shares + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" + + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" +) + +// fillShare returns a share filled with filler so that the share length +// is equal to appconsts.ShareSize. +func fillShare(share Share, filler byte) (paddedShare Share) { + return Share{data: append(share.data, bytes.Repeat([]byte{filler}, appconsts.ShareSize-len(share.data))...)} +} + +// padShare returns a share padded with trailing zeros. +func padShare(share Share) (paddedShare Share) { + return fillShare(share, 0) +} + +func TestCount(t *testing.T) { + type testCase struct { + transactions []coretypes.Tx + wantShareCount int + } + testCases := []testCase{ + {transactions: []coretypes.Tx{}, wantShareCount: 0}, + {transactions: []coretypes.Tx{[]byte{0}}, wantShareCount: 1}, + {transactions: []coretypes.Tx{bytes.Repeat([]byte{1}, 100)}, wantShareCount: 1}, + // Test with 1 byte over 1 share + {transactions: []coretypes.Tx{bytes.Repeat([]byte{1}, rawTxSize(appconsts.FirstCompactShareContentSize+1))}, wantShareCount: 2}, + {transactions: []coretypes.Tx{generateTx(1)}, wantShareCount: 1}, + {transactions: []coretypes.Tx{generateTx(2)}, wantShareCount: 2}, + {transactions: []coretypes.Tx{generateTx(20)}, wantShareCount: 20}, + } + for _, tc := range testCases { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + for _, transaction := range tc.transactions { + err := css.WriteTx(transaction) + require.NoError(t, err) + } + got := css.Count() + if got != tc.wantShareCount { + t.Errorf("count got %d want %d", got, tc.wantShareCount) + } + } +} + +// generateTx generates a transaction that occupies exactly numShares number of +// shares. +func generateTx(numShares int) coretypes.Tx { + if numShares == 0 { + return coretypes.Tx{} + } + if numShares == 1 { + return bytes.Repeat([]byte{1}, rawTxSize(appconsts.FirstCompactShareContentSize)) + } + return bytes.Repeat([]byte{2}, rawTxSize(appconsts.FirstCompactShareContentSize+(numShares-1)*appconsts.ContinuationCompactShareContentSize)) +} + +func TestExport_write(t *testing.T) { + type testCase struct { + name string + want []Share + writeBytes [][]byte + } + + oneShare, _ := zeroPadIfNecessary( + append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x0, 0x1, // sequence len + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 0xf, // data + }..., + ), + appconsts.ShareSize) + + firstShare := fillShare(Share{data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x0, // sequence len + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + }..., + )}, 0xf) + + continuationShare, _ := zeroPadIfNecessary( + append( + appns.TxNamespace.Bytes(), + append( + []byte{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x0, // reserved bytes + }, bytes.Repeat([]byte{0xf}, appconsts.NamespaceSize+appconsts.ShareInfoBytes+appconsts.SequenceLenBytes+appconsts.CompactShareReservedBytes)..., // data + )..., + ), + appconsts.ShareSize) + + testCases := []testCase{ + { + name: "empty", + want: []Share{}, + }, + { + name: "one share with small sequence len", + want: []Share{ + {data: oneShare}, + }, + writeBytes: [][]byte{{0xf}}, + }, + { + name: "two shares with big sequence len", + want: []Share{ + firstShare, + {data: continuationShare}, + }, + writeBytes: [][]byte{bytes.Repeat([]byte{0xf}, 512)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + for _, bytes := range tc.writeBytes { + err := css.write(bytes) + require.NoError(t, err) + } + got, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, tc.want, got) + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, got, shares) + assert.Len(t, got, css.Count()) + }) + } +} + +func TestWriteAndExportIdempotence(t *testing.T) { + type testCase struct { + name string + txs []coretypes.Tx + wantLen int + } + testCases := []testCase{ + { + name: "one tx that occupies exactly one share", + txs: []coretypes.Tx{generateTx(1)}, + wantLen: 1, + }, + { + name: "one tx that occupies exactly two shares", + txs: []coretypes.Tx{generateTx(2)}, + wantLen: 2, + }, + { + name: "one tx that occupies exactly three shares", + txs: []coretypes.Tx{generateTx(3)}, + wantLen: 3, + }, + { + name: "two txs that occupy exactly two shares", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + }, + wantLen: 2, + }, + { + name: "three txs that occupy exactly three shares", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + }, + wantLen: 3, + }, + { + name: "four txs that occupy three full shares and one partial share", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + []byte{0xf}, + }, + wantLen: 4, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range tc.txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + assert.Equal(t, tc.wantLen, css.Count()) + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, tc.wantLen, len(shares)) + }) + } +} + +func TestExport(t *testing.T) { + type testCase struct { + name string + txs []coretypes.Tx + want map[coretypes.TxKey]ShareRange + shareRangeOffset int + } + + txOne := coretypes.Tx{0x1} + txTwo := coretypes.Tx(bytes.Repeat([]byte{2}, 600)) + txThree := coretypes.Tx(bytes.Repeat([]byte{3}, 1000)) + exactlyOneShare := coretypes.Tx(bytes.Repeat([]byte{4}, rawTxSize(appconsts.FirstCompactShareContentSize))) + exactlyTwoShares := coretypes.Tx(bytes.Repeat([]byte{5}, rawTxSize(appconsts.FirstCompactShareContentSize+appconsts.ContinuationCompactShareContentSize))) + + testCases := []testCase{ + { + name: "empty", + txs: []coretypes.Tx{}, + want: map[coretypes.TxKey]ShareRange{}, + }, + { + name: "txOne occupies shares 0 to 0", + txs: []coretypes.Tx{ + txOne, + }, + want: map[coretypes.TxKey]ShareRange{ + txOne.Key(): {0, 0}, + }, + }, + { + name: "txTwo occupies shares 0 to 1", + txs: []coretypes.Tx{ + txTwo, + }, + want: map[coretypes.TxKey]ShareRange{ + txTwo.Key(): {0, 1}, + }, + }, + { + name: "txThree occupies shares 0 to 2", + txs: []coretypes.Tx{ + txThree, + }, + want: map[coretypes.TxKey]ShareRange{ + txThree.Key(): {0, 2}, + }, + }, + { + name: "txOne occupies shares 0 to 0, txTwo occupies shares 0 to 1, txThree occupies shares 1 to 3", + txs: []coretypes.Tx{ + txOne, + txTwo, + txThree, + }, + want: map[coretypes.TxKey]ShareRange{ + txOne.Key(): {0, 0}, + txTwo.Key(): {0, 1}, + txThree.Key(): {1, 3}, + }, + }, + + { + name: "exactly one share occupies shares 0 to 0", + txs: []coretypes.Tx{ + exactlyOneShare, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {0, 0}, + }, + }, + { + name: "exactly two shares occupies shares 0 to 1", + txs: []coretypes.Tx{ + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyTwoShares.Key(): {0, 1}, + }, + }, + { + name: "two shares followed by one share", + txs: []coretypes.Tx{ + exactlyTwoShares, + exactlyOneShare, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyTwoShares.Key(): {0, 1}, + exactlyOneShare.Key(): {2, 2}, + }, + }, + { + name: "one share followed by two shares", + txs: []coretypes.Tx{ + exactlyOneShare, + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {0, 0}, + exactlyTwoShares.Key(): {1, 2}, + }, + }, + { + name: "one share followed by two shares offset by 10", + txs: []coretypes.Tx{ + exactlyOneShare, + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {10, 10}, + exactlyTwoShares.Key(): {11, 12}, + }, + shareRangeOffset: 10, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range tc.txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + _, got, err := css.Export(tc.shareRangeOffset) + require.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestWriteAfterExport(t *testing.T) { + a := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)) + b := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize*2)) + c := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)) + d := []byte{0xf} + + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, 0, len(shares)) + + err = css.WriteTx(a) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 1, len(shares)) + + err = css.WriteTx(b) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 3, len(shares)) + + err = css.WriteTx(c) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 4, len(shares)) + + err = css.WriteTx(d) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 5, len(shares)) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 5, len(shares)) +} + +// rawTxSize returns the raw tx size that can be used to construct a +// tx of desiredSize bytes. This function is useful in tests to account for +// the length delimiter that is prefixed to a tx. +func rawTxSize(desiredSize int) int { + return desiredSize - DelimLen(uint64(desiredSize)) +} diff --git a/libs/shares/testdata/sample-block.json b/libs/shares/testdata/sample-block.json new file mode 100755 index 00000000000..9041fcf72d7 --- /dev/null +++ b/libs/shares/testdata/sample-block.json @@ -0,0 +1,62 @@ +{ + "header": { + "version": { + "block": 11 + }, + "chain_id": "private", + "height": 4, + "time": "2023-04-11T20:52:54.923092Z", + "last_block_id": { + "hash": "TLPtmfe84IKmiOU8K45MwPawkYq9XAx8KIYdQltNNXs=", + "part_set_header": { + "total": 1, + "hash": "7HS0mrbP9UNbYk2YMkA8oigEvHEErzs3rC0i1RkloUY=" + } + }, + "last_commit_hash": "DTjLVpzuBBOdujbAcw8nnQ4ACGxXwt7JJ0u4FiBGcGk=", + "data_hash": "ecqvIuCjhlW76pE31tEJcRhohEODd3lo9YBNaza8HGg=", + "validators_hash": "T46pWC4iIg0DjiF1DpCCv4UVlqcP2piZ7+/Is/cLRPo=", + "next_validators_hash": "T46pWC4iIg0DjiF1DpCCv4UVlqcP2piZ7+/Is/cLRPo=", + "consensus_hash": "BICRvH3cKD93v7+R1zxE2ljD34qcvIZ0Bdi389qtoi8=", + "app_hash": "hYTcK/JXCrBCS9C6gfDzYdBnx2dV3cTty21r1a+AbPA=", + "last_results_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=", + "evidence_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=", + "proposer_address": "8TeEky8GnBxKq/c2jmFemTVQCy0=" + }, + "data": { + "txs": [ + "CsUCCqQBCqEBCiAvY2VsZXN0aWEuYmxvYi52MS5Nc2dQYXlGb3JCbG9icxJ9Ci9jZWxlc3RpYTFuY3Y3bGh4NDRndnR5NmdmOGMzZjVoeTJneWx2NGRhOW4yZ3ZmMxIhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh/+2uFQ/iYjVRGgKSGiIgmcwTeaaECDCWTmNHehI5AtEkDFkxYs8YT/63si4foiVCAQASWgpOCkYKHy9jb3Ntb3MuY3J5cHRvLnNlY3AyNTZrMS5QdWJLZXkSIwohA1ja+jKt+kagxAbHlZx4wzHALPC0YPqCn1DlHH6FHSpAEgQKAggBEggQgIDpg7HeFhpAc2aRvS1W7s5XPCEuekN8Tbt5i33u4pZYSIfb7rQuD4dp+H7FRePT2MXdNYmFx5IZtizLchyJ/UG+0EdySwNnvhIBBBoESU5EWA==" + ], + "blobs": [ + { + "namespace_id": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAOH/7a4VD+JiNVE=", + "data": "xLsIsDwF/TuEkH+SxQGtwa0pHDY13lPBOXQHiGkssqGycX2jdMpAudsH9yfGiFbk3vwkISoNjR3PbOxS+t8+6LHmGdg88O6r1OILoxxL0mbt9N3+4ixaJl9JtbrJCBDzt2xF+dzegAs8NYdhjGp/VPwuBAVjNtuNsH+6qu+r7aK1WtoIFcQ0/DPLMI3r7WXU9ZRIflOnRAEcwuCdpRoIF7/aEiwJC7z22XN3j8wDnLJTv6xJ+hMAdTiwwFoh+PmlvwFl0pcB5EXjgSNNZT0KFEhhZl4AO3Y8xALTq2aRVOIY+2M3e791BAflL8TelrAwFT2XcGKpuQugAY3PF5N57rjTJyKx8d5leHU5kZ+ZQRCo1XPzNZMVZqgK6ay0DVPFuF4E3AStgtSQQBtm/irWxtHBJyEe7hc+PG5rP8FkDoTcPyIJtL659jR8VFSBhpsVKfAoOa2V56h6NxGWFb3hQzSedPeOKq3pOqBwWTAAmpO65mK4m2hjwexl1fhUXdqNlyEpSvWp6M0K7pwgSYlmz37ugmh5ZKcpjiwsTYX197Cqvx8bMK8ZVqIIRIBQe77i8kaBSOte5G3GmKLGfQM/eh+HyNkLQP5iBlq4gBgrC6m5eLnYXUmMYkJIQkHfxK6IKrRYmTaWwZ1lB7dsQLkigbJAjbQLsR8mswmfeQCegAbPagoFewNizrhqVDNZL5Bgm2vmEO65pwUHQ06QGELHL+/VosBRVgyoxWQ1ZCmORwN4X+5bHeglUjKPs1AH7Fb4bWJ6vu6CNztU+DqO/lv7S7iBhaIklb2mpEmLNra211/QMaBkPqrQazj5ydaXwn6ObFDUqdXh6+ZdQSjZheKhTAQ3idaC2yE+yOOMm4CqQbiJu0JTV4LGtzn2QJb/h3EXtZfGvlhnD3sLmc9ImFdb5KSHvQFHitV4n1fCDYhgbnVONz9dJmtpIKJN+S4w/Fc7qCOMKw/mza7hwASnKwXNwAOTgM6ckUW/LXLVLS5EmKMccjKrQHqX3V9FpFyGSNDhW8Rslr/Hp/iZKphrfJxQ08V7swQYe3iafnjEdStS1E9hAD19EdL+ioYqkLPNIlYPkFUzovvHktCqmA+b9X1wX/7blmkakuwsN2FTkea3HGs1BNLcZ6eN1zzhx1Ehd0mzHt+jNpMC+jUb3doqz/fGfzRDJNX3UxB4dSGlfePqhzbscCzYVle1eyZ4pyYDW6hyDStYp8SZGV2sMyJN/44uHferxPYREHj6M41KwRyanJoH3dBzUz8PibIcL+QFeZqPghh+Syv0cw8YD7DyDSLzKFuw79rFiSxVhXdFTYsKie5c0W2B1Pez15x94E0wkdRLroX7+TJDutPrWb9aIiX0QmI7jWJaWdA55aPF9NdEKsIn3AxX9KM0bZA78BVuuxZvWo1UvkQW3X70NvjcoDb7jcca2JrGnTRA0shrb3d7qoDD6O5pOLxk99dhBE9UF/4in4uPoRo5uJ811tpTL2a969U/CU3fyXVFQZAUIICx1+QeBO+R7zoHAXhpbfJslia1VNYDdSsD+9XcqoXOCyrt/5Xay0nAe6nnCtnIrhJZUxXUh+lGKfvUbVXmwSCo+tnmnnxN6IvOthxDvsMAd/P5bMunQQ4dI6pHgnvyxqaDBwbfVsFTLBba0FBA6DXUrm73X+SGcbXmx/Q4ERLF98zCEqmeJMQDmq7szdwT39ShqInidENTYEG+xWy6oVexfQmYyR881Owv0W+Qp7BNVEW5ePlmOTLCXcPWuolpIBHF9r6S5rhjpBHEvj743SPhbfb+IEfodCsLXGGyp/WZsw8exvL1ky7jz5nDPHu9ipck+FnmCyFOaMExUcnNQVbvfzmCZohDIchXY/O9BQU/clQAOORbgQBj7cSNKwgcOhTCpbYKh3j+PL3TpW3rRXpLe4nr2itD5dusp+BIXWquZS7EiBmmY1DtttIQThw34pfxrffFQbLg6oUD0fkd6JL3PXItr+98OGEtmx9oJ/jVsC2583YLLheq4ZEASPdm65B8IHsit4tKCs5DcydaXhFTJem5vW2UNdR7lOF5F50veXA7r2ay55aSb8pw4ZueptBLDIuwR2pnyF8+6X5Amb1vR20UKOhA/rvuskhN/9b0KpqzUo//Z4xaV+hgOx5FDbS1uKSDG0Aat+XeR7pxARe+p1GAAX9X7YezCH5mAW+UzsHmHzXyYhXxnC7BjLnirxEF7pMPukaHN6XVVym14aV1beHHUrz1R52XLZZVlneTtb5CaJ748LC6oLSqBnE6mZFJnSh2acxdHpNL3LYisJkUIhmZxyLzQhli+xOgYjy3k27Vy/u00qEa8YZ7OmYPVCgd8sYDsmSnO1wCd8nt9QP/nSxOzUFU7pJTyJvZEKV4ROOR9AIL/bRF3juYWeh6BgssC4c1ClUfA4qX1588FxQictwGVqfABLk8tkzr02FEKddR6+FxmpZ9oi+OKWZ1bWvdPsz4rln15ILi6llfxM3A7Kxc8LPDaiYIQ7U8Tv0Zrlw+mhjGbqBbdapKAVvIspTHPxqfPVNDHFgimCUWTsPZhznfhW4kJHK0cuhK9dnzdAfDgHnHHeqY+2MTTwbYwhJrawEOOjAgUvn43z1KLyqi04Q0XTEm039TY/Alw/vRvYmigzOXtzQeT+QUqfd4nxhNpH8hCkO7mXxVuBiloa4Lk6a5Jny0NY7efrGDVqAjFs3T/E17sKBkDgFvYg5nQH6IiAZ5ECYvmjkzEPhDvgqmCGuqmmVb6moX7om0kMoR97gj0Mjn6j+kBluQ9IfODlGUn4v10t8hMVRbHnPjqBFx4srH9g+IRHcOoneFj1EF6YtYYIOD+GPDJ/UJRLdwF/KppWnJhOiX5brpvMzZfXaJ6QukTP9cAA8l5YvlnwGY/aBS72EJpBuiWkGg2L7TvQpGm2HHxWzfEiT8v+cg6QONJ1lSf+BTd0GAW54xlBOMd8oP7hxOVOcFMECyUD5h7dHzU4WztB6thofRyfGUV8mSo6jXdZWsqNBMEHDNi6aJpAbgszlnkXpgbtN+bflKFhvsWvh1G8NRzLt000f66VXhWDVx9D0rakAYuAfgnKf4iRuw/m9FbUTC5RXpMqjc0iy/oTLMjad0TGUqjuvwR11/zIMehJqIBDAqgZX2A9v2K0Naoj1ZeRp9M8skC2jmp66dMJcISv0alz+pe4cqB5Xoen9aa4A3PUH+v07xvr17Q/0w2nMeo+shfJGI+OPtn+hl72FLDAMVPMVx0F5G4ypvToszYPufIb3R6GoGTSs+mkgSkw8gulBh9A5VHqvtqe85g0kFQkp5f4Q1l+wurcHotWQntIrur1yvekENGPD1tBAN8sIG9zoFqB/23z9XMFDLgi1FmbkeXEI0WmWmfhqQZ4ZZelZRKXXDgSKERbtcgYbtaDKlOxVKxJqxHXYkiyhljJKiGhCqvaMI3crbbWSBK57jlSZ5i5Om+b7gqkf0H9A+wjwPSEDwoR1CM4yeUvAlhamaOM8W4q7bBOuWJvHD/j+nHe353Sh+oi0ycqhkLzajqacueZdTyk+hvkU0rZ1TJBm/xmqhVC+Hc967z7QzZs34wV1dR/F8DIUPxvE+M4fw5DMqLTRpl+PkswZXVAw5FYFC6D8WL19IbflXUhTJ8qiVFgeDfXdW/VIMjsLfUpSiTM3t2S8zaEABcGmkNqrzWbAmEIr91EE6X+AOsO2VBv9u+2YhmZRGSPe0svxhdussSfKxeMtCWAEbek6pC0HpuQU5zc2SqdBhsraX0TImkHQJtoaMQEWKdurvfmP1fnvqP0bAx5Yxjmz5cSGk/wcpAhN4Qc5J16MWxkmGM+v6/4N3Kc2JqK7DH4o7faK5Pkod+sRdiZvRVoEi/tTrB0n4bzPkSFc59eCFgsnZajF/3zQS7hRyO3xos4FJmhdgbD6F8KZmOrYGfr1yYyUYToC2hFGSSWGNcXIRAojqvByBjSktPaxEqmc0u+5Mma0F8TzQHcs/AtZV2NrPlwDsOo5ITBMVTUTEezak7hDJCQXcgm4VanDY+qvE8DjkXQiMm5lUpLwpJcbCRRGYpcM4HzsgOV9pmofeanywTkTf8JapiH47aawB2b9QrR/GZ/eYRMQyvkeDRkwW3mdN4GNYueeKtyiTpDXo77/LgwLfcxcFi22sRT36zD5QhEs4yFH0yolWR6wxXyqLC/17bMegw1KAWP3Nevb+ay/lo8/22SM/K7JPn/9y019rB56YIl8TbNWKEU308hTc/b1PXTY961wdwsk4Z58F4XUmkyuQjjYwqgDB/+6mKlcjwPD+AXcIcwVfHolhX0M7THb8mK7bfxrANEYaNolsKAlh7WD0/dw7lOV3S0TWVaCkeZgUy62JjbJhC9l/7pjzAqmCqd99PGupei9yx7fJdrdJ//7VmzsTi32Hzuldjl2lKcSe/BdEWFChe0eosLY9sumppZbzuYhRHZRC7A==" + } + ], + "square_size": 4, + "hash": "ecqvIuCjhlW76pE31tEJcRhohEODd3lo9YBNaza8HGg=" + }, + "evidence": { + "evidence": [ + + ] + }, + "last_commit": { + "height": 3, + "block_id": { + "hash": "TLPtmfe84IKmiOU8K45MwPawkYq9XAx8KIYdQltNNXs=", + "part_set_header": { + "total": 1, + "hash": "7HS0mrbP9UNbYk2YMkA8oigEvHEErzs3rC0i1RkloUY=" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "8TeEky8GnBxKq/c2jmFemTVQCy0=", + "timestamp": "2023-04-11T20:52:54.923092Z", + "signature": "1Z3NmDMQU1B5dhSkxdv7xQNF0/n/q3dn+qlHlVBGcXEI3AZ1z4Kua9f+vKOwP3RacsEuGggD90rEbhpN6IqmBA==" + } + ] + } +} diff --git a/libs/shares/utils.go b/libs/shares/utils.go new file mode 100644 index 00000000000..90aeb5dcdd8 --- /dev/null +++ b/libs/shares/utils.go @@ -0,0 +1,98 @@ +package shares + +import ( + "bytes" + "encoding/binary" + + coretypes "github.com/tendermint/tendermint/types" +) + +// DelimLen calculates the length of the delimiter for a given unit size +func DelimLen(size uint64) int { + lenBuf := make([]byte, binary.MaxVarintLen64) + return binary.PutUvarint(lenBuf, size) +} + +// func isPowerOf2(v uint64) bool { +// return v&(v-1) == 0 && v != 0 +// } + +// func BlobsFromProto(blobs []core.Blob) ([]coretypes.Blob, error) { +// result := make([]coretypes.Blob, len(blobs)) +// for i, blob := range blobs { +// if blob.ShareVersion > math.MaxUint8 { +// return nil, fmt.Errorf("share version %d is too large to be a uint8", blob.ShareVersion) +// } +// result[i] = coretypes.Blob{ +// NamespaceID: blob.NamespaceId, +// Data: blob.Data, +// ShareVersion: uint8(blob.ShareVersion), +// } +// } +// return result, nil +// } + +func TxsToBytes(txs coretypes.Txs) [][]byte { + e := make([][]byte, len(txs)) + for i, tx := range txs { + e[i] = []byte(tx) + } + return e +} + +func TxsFromBytes(txs [][]byte) coretypes.Txs { + e := make(coretypes.Txs, len(txs)) + for i, tx := range txs { + e[i] = coretypes.Tx(tx) + } + return e +} + +// zeroPadIfNecessary pads the share with trailing zero bytes if the provided +// share has fewer bytes than width. Returns the share unmodified if the +// len(share) is greater than or equal to width. +func zeroPadIfNecessary(share []byte, width int) (padded []byte, bytesOfPadding int) { + oldLen := len(share) + if oldLen >= width { + return share, 0 + } + + missingBytes := width - oldLen + padByte := []byte{0} + padding := bytes.Repeat(padByte, missingBytes) + share = append(share, padding...) + return share, missingBytes +} + +// ParseDelimiter attempts to parse a varint length delimiter from the input +// provided. It returns the input without the len delimiter bytes, the length +// parsed from the varint optionally an error. Unit length delimiters are used +// in compact shares where units (i.e. a transaction) are prefixed with a length +// delimiter that is encoded as a varint. Input should not contain the namespace +// ID or info byte of a share. +func ParseDelimiter(input []byte) (inputWithoutLenDelimiter []byte, unitLen uint64, err error) { + if len(input) == 0 { + return input, 0, nil + } + + l := binary.MaxVarintLen64 + if len(input) < binary.MaxVarintLen64 { + l = len(input) + } + + delimiter, _ := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64) + + // read the length of the data + r := bytes.NewBuffer(delimiter) + dataLen, err := binary.ReadUvarint(r) + if err != nil { + return nil, 0, err + } + + // calculate the number of bytes used by the delimiter + lenBuf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(lenBuf, dataLen) + + // return the input without the length delimiter + return input[n:], dataLen, nil +} diff --git a/libs/shares/utils_test.go b/libs/shares/utils_test.go new file mode 100644 index 00000000000..09eb2a113bc --- /dev/null +++ b/libs/shares/utils_test.go @@ -0,0 +1,69 @@ +package shares + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/rollkit/rollkit/libs/testfactory" +) + +// func FuzzBlobSharesUsed(f *testing.F) { +// f.Add(uint32(1)) +// f.Fuzz(func(t *testing.T, a uint32) { +// if a < 1 { +// t.Skip() +// } +// ml := SparseSharesNeeded(a) +// blob := testfactory.GenerateRandomBlob(int(a)) +// rawShares, err := SplitBlobs(0, nil, []types.Blob{blob}, false) +// require.NoError(t, err) +// require.Equal(t, len(rawShares), ml) +// }) +// } + +func Test_zeroPadIfNecessary(t *testing.T) { + type args struct { + share []byte + width int + } + tests := []struct { + name string + args args + wantPadded []byte + wantBytesOfPadding int + }{ + {"pad", args{[]byte{1, 2, 3}, 6}, []byte{1, 2, 3, 0, 0, 0}, 3}, + {"not necessary (equal to shareSize)", args{[]byte{1, 2, 3}, 3}, []byte{1, 2, 3}, 0}, + {"not necessary (greater shareSize)", args{[]byte{1, 2, 3}, 2}, []byte{1, 2, 3}, 0}, + } + for _, tt := range tests { + tt := tt // stupid scopelint :-/ + t.Run(tt.name, func(t *testing.T) { + gotPadded, gotBytesOfPadding := zeroPadIfNecessary(tt.args.share, tt.args.width) + if !reflect.DeepEqual(gotPadded, tt.wantPadded) { + t.Errorf("zeroPadIfNecessary gotPadded %v, wantPadded %v", gotPadded, tt.wantPadded) + } + if gotBytesOfPadding != tt.wantBytesOfPadding { + t.Errorf("zeroPadIfNecessary gotBytesOfPadding %v, wantBytesOfPadding %v", gotBytesOfPadding, tt.wantBytesOfPadding) + } + }) + } +} + +func TestParseDelimiter(t *testing.T) { + for i := uint64(0); i < 100; i++ { + tx := testfactory.GenerateRandomTxs(1, int(i))[0] + input, err := MarshalDelimitedTx(tx) + if err != nil { + panic(err) + } + res, txLen, err := ParseDelimiter(input) + if err != nil { + panic(err) + } + assert.Equal(t, i, txLen) + assert.Equal(t, []byte(tx), res) + } +} diff --git a/libs/testfactory/txs.go b/libs/testfactory/txs.go new file mode 100644 index 00000000000..50da0937c25 --- /dev/null +++ b/libs/testfactory/txs.go @@ -0,0 +1,32 @@ +package testfactory + +import ( + mrand "math/rand" + + "github.com/tendermint/tendermint/types" +) + +func GenerateRandomlySizedTxs(count, maxSize int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + size := mrand.Intn(maxSize) + if size == 0 { + size = 1 + } + txs[i] = GenerateRandomTxs(1, size)[0] + } + return txs +} + +func GenerateRandomTxs(count, size int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + tx := make([]byte, size) + _, err := mrand.Read(tx) + if err != nil { + panic(err) + } + txs[i] = tx + } + return txs +}