diff --git a/api/apihttp/apihttp_test.go b/api/apihttp/apihttp_test.go index 0ee49d41e..aa4df0c35 100644 --- a/api/apihttp/apihttp_test.go +++ b/api/apihttp/apihttp_test.go @@ -28,9 +28,8 @@ import ( "github.com/bbva/qed/balloon" "github.com/bbva/qed/balloon/history" - historynav "github.com/bbva/qed/balloon/history/navigation" + "github.com/bbva/qed/balloon/history/navigation" "github.com/bbva/qed/balloon/hyper" - hypernav "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" "github.com/bbva/qed/protocol" "github.com/bbva/qed/raftwal" @@ -57,8 +56,8 @@ func (b fakeRaftBalloon) Join(nodeID, addr string, metadata map[string]string) e func (b fakeRaftBalloon) QueryDigestMembership(keyDigest hashing.Digest, version uint64) (*balloon.MembershipProof, error) { return &balloon.MembershipProof{ Exists: true, - HyperProof: hyper.NewQueryProof([]byte{0x0}, []byte{0x0}, hypernav.AuditPath{}, nil), - HistoryProof: history.NewMembershipProof(0, 0, historynav.AuditPath{}, nil), + HyperProof: hyper.NewQueryProof([]byte{0x0}, []byte{0x0}, hyper.AuditPath{}, nil), + HistoryProof: history.NewMembershipProof(0, 0, navigation.AuditPath{}, nil), CurrentVersion: 1, QueryVersion: 1, ActualVersion: 2, @@ -71,8 +70,8 @@ func (b fakeRaftBalloon) QueryMembership(event []byte, version uint64) (*balloon hasher := hashing.NewFakeXorHasher() return &balloon.MembershipProof{ Exists: true, - HyperProof: hyper.NewQueryProof([]byte{0x0}, []byte{0x0}, hypernav.AuditPath{}, nil), - HistoryProof: history.NewMembershipProof(0, 0, historynav.AuditPath{}, nil), + HyperProof: hyper.NewQueryProof([]byte{0x0}, []byte{0x0}, hyper.AuditPath{}, nil), + HistoryProof: history.NewMembershipProof(0, 0, navigation.AuditPath{}, nil), CurrentVersion: 1, QueryVersion: 1, ActualVersion: 2, @@ -86,7 +85,7 @@ func (b fakeRaftBalloon) QueryConsistency(start, end uint64) (*balloon.Increment ip := balloon.IncrementalProof{ Start: 2, End: 8, - AuditPath: historynav.AuditPath{pathKey: hashing.Digest{0x00}}, + AuditPath: navigation.AuditPath{pathKey: hashing.Digest{0x00}}, Hasher: hashing.NewFakeXorHasher(), } return &ip, nil diff --git a/balloon/hyper/batch.go b/balloon/hyper/batch.go new file mode 100644 index 000000000..11ce50f4b --- /dev/null +++ b/balloon/hyper/batch.go @@ -0,0 +1,116 @@ +/* + Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package hyper + +import ( + "fmt" + "strings" + + "github.com/bbva/qed/hashing" +) + +type batchNode struct { + batch [][]byte + nodeSize int // in bytes +} + +func newEmptyBatchNode(nodeSize int) *batchNode { + return &batchNode{ + nodeSize: nodeSize, + batch: make([][]byte, 31, 31), + } +} + +func newBatchNode(nodeSize int, batch [][]byte) *batchNode { + return &batchNode{ + nodeSize: nodeSize, + batch: batch, + } +} + +func (b batchNode) String() string { + var strs []string + for i, n := range b.batch { + strs = append(strs, fmt.Sprintf("[%d - %#x]", i, n)) + } + return strings.Join(strs, "\n") +} + +func (b batchNode) HasLeafAt(i int8) bool { + return len(b.batch[i]) > 0 && b.batch[i][b.nodeSize] == byte(1) +} + +func (b batchNode) AddHashAt(i int8, value []byte) { + b.batch[i] = append(value, byte(0)) +} + +func (b batchNode) AddLeafAt(i int8, hash hashing.Digest, key, value []byte) { + b.batch[i] = append(hash, byte(1)) + b.batch[2*i+1] = append(key, byte(2)) + b.batch[2*i+2] = append(value, byte(2)) +} + +func (b batchNode) GetLeafKVAt(i int8) ([]byte, []byte) { + return b.batch[2*i+1][:b.nodeSize], b.batch[2*i+2][:b.nodeSize] +} + +func (b batchNode) HasElementAt(i int8) bool { + return len(b.batch[i]) > 0 +} + +func (b batchNode) GetElementAt(i int8) []byte { + return b.batch[i][:b.nodeSize] +} + +func (b batchNode) ResetElementAt(i int8) { + b.batch[i] = nil +} + +func (b batchNode) Serialize() []byte { + serialized := make([]byte, 4) + for i := uint16(0); i < 31; i++ { + if len(b.batch[i]) != 0 { + bitSet(serialized, i) + serialized = append(serialized, b.batch[i]...) + } + } + return serialized +} + +func parseBatch(nodeSize int, value []byte) [][]byte { + batch := make([][]byte, 31, 31) // 31 nodes (including the root) + bitmap := value[:4] // the first 4 bytes define the bitmap + size := nodeSize + 1 + + j := 0 + for i := 0; i < 31; i++ { + if bitIsSet(bitmap, i) { + batch[i] = value[4+size*j : 4+size*(j+1)] + j++ + } + } + + return batch +} + +func parseBatchNode(nodeSize int, value []byte) *batchNode { + return newBatchNode(nodeSize, parseBatch(nodeSize, value)) +} + +func bitIsSet(bits []byte, i int) bool { + return bits[i/8]&(1<= index splitIndex := sort.Search(len(l), func(i int) bool { return bytes.Compare(l[i].Index, index) >= 0 @@ -60,13 +59,13 @@ func (l Leaves) Split(index []byte) (left, right Leaves) { return l[:splitIndex], l[splitIndex:] } -type TraverseBatch func(pos navigation.Position, leaves Leaves, batch *BatchNode, iBatch int8, ops *OperationsStack) +type traverseBatch func(pos position, leaves leaves, batch *batchNode, iBatch int8, ops *operationsStack) -func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches BatchLoader) *OperationsStack { +func pruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches batchLoader) *operationsStack { - var traverse, traverseThroughCache, traverseAfterCache TraverseBatch + var traverse, traverseThroughCache, traverseAfterCache traverseBatch - traverse = func(pos navigation.Position, leaves Leaves, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverse = func(pos position, leaves leaves, batch *batchNode, iBatch int8, ops *operationsStack) { if batch == nil { batch = batches.Load(pos) } @@ -77,7 +76,7 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches } } - traverseThroughCache = func(pos navigation.Position, leaves Leaves, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverseThroughCache = func(pos position, leaves leaves, batch *batchNode, iBatch int8, ops *operationsStack) { if len(leaves) == 0 { // discarded branch if batch.HasElementAt(iBatch) { @@ -91,26 +90,26 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches // at the end of a batch tree if iBatch > 0 && pos.Height%4 == 0 { traverse(pos, leaves, nil, 0, ops) - ops.Push(updateBatchNode(pos, iBatch, batch)) + ops.Push(updatebatchNode(pos, iBatch, batch)) return } // on an internal node with more than one leaf rightPos := pos.Right() - leftLeaves, rightLeaves := leaves.Split(rightPos.Index) + leftleaves, rightleaves := leaves.Split(rightPos.Index) - traverseThroughCache(pos.Left(), leftLeaves, batch, 2*iBatch+1, ops) - traverseThroughCache(rightPos, rightLeaves, batch, 2*iBatch+2, ops) + traverseThroughCache(pos.Left(), leftleaves, batch, 2*iBatch+1, ops) + traverseThroughCache(rightPos, rightleaves, batch, 2*iBatch+2, ops) - ops.PushAll(innerHash(pos), updateBatchNode(pos, iBatch, batch)) + ops.PushAll(innerHash(pos), updatebatchNode(pos, iBatch, batch)) if iBatch == 0 { // it's the root of the batch tree ops.Push(putInCache(pos, batch)) } } - traverseAfterCache = func(pos navigation.Position, leaves Leaves, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverseAfterCache = func(pos position, leaves leaves, batch *batchNode, iBatch int8, ops *operationsStack) { if len(leaves) == 0 { // discarded branch if batch.HasElementAt(iBatch) { @@ -128,12 +127,12 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches panic("Oops, something went wrong. We cannot have more than one leaf at the end of the main tree") } // create or update the leaf with a new shortcut - newBatch := NewEmptyBatchNode(len(pos.Index)) + newBatch := newEmptyBatchNode(len(pos.Index)) ops.PushAll( leafHash(pos, leaves[0].Value), updateBatchShortcut(pos, 0, newBatch, leaves[0].Index, leaves[0].Value), mutateBatch(pos, newBatch), - updateBatchNode(pos, iBatch, batch), + updatebatchNode(pos, iBatch, batch), ) return } @@ -143,22 +142,22 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches if len(leaves) > 1 { // with more than one leaf to insert -> it's impossible to be a shortcut leaf traverse(pos, leaves, nil, 0, ops) - ops.Push(updateBatchNode(pos, iBatch, batch)) + ops.Push(updatebatchNode(pos, iBatch, batch)) return } // with only one leaf to insert -> continue traversing if batch.HasElementAt(iBatch) { traverse(pos, leaves, nil, 0, ops) - ops.Push(updateBatchNode(pos, iBatch, batch)) + ops.Push(updatebatchNode(pos, iBatch, batch)) return } // nil value (no previous node stored) so create a new shortcut batch - newBatch := NewEmptyBatchNode(len(pos.Index)) + newBatch := newEmptyBatchNode(len(pos.Index)) ops.PushAll( leafHash(pos, leaves[0].Value), updateBatchShortcut(pos, 0, newBatch, leaves[0].Index, leaves[0].Value), mutateBatch(pos, newBatch), - updateBatchNode(pos, iBatch, batch), + updatebatchNode(pos, iBatch, batch), ) return } @@ -182,7 +181,7 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches if batch.HasLeafAt(iBatch) { // push down leaf key, value := batch.GetLeafKVAt(iBatch) - leaves = leaves.InsertSorted(Leaf{key, value}) + leaves = leaves.InsertSorted(leaf{key, value}) batch.ResetElementAt(iBatch) batch.ResetElementAt(2*iBatch + 1) batch.ResetElementAt(2*iBatch + 2) @@ -193,23 +192,23 @@ func PruneToInsert(index []byte, value []byte, cacheHeightLimit uint16, batches // on an internal node with more than one leaf rightPos := pos.Right() - leftLeaves, rightLeaves := leaves.Split(rightPos.Index) + leftleaves, rightleaves := leaves.Split(rightPos.Index) - traverseAfterCache(pos.Left(), leftLeaves, batch, 2*iBatch+1, ops) - traverseAfterCache(rightPos, rightLeaves, batch, 2*iBatch+2, ops) + traverseAfterCache(pos.Left(), leftleaves, batch, 2*iBatch+1, ops) + traverseAfterCache(rightPos, rightleaves, batch, 2*iBatch+2, ops) - ops.PushAll(innerHash(pos), updateBatchNode(pos, iBatch, batch)) + ops.PushAll(innerHash(pos), updatebatchNode(pos, iBatch, batch)) if iBatch == 0 { // at root node -> mutate batch ops.Push(mutateBatch(pos, batch)) } } - ops := NewOperationsStack() + ops := newOperationsStack() version := util.AddPaddingToBytes(value, len(index)) version = version[len(version)-len(index):] // TODO GET RID OF THIS: used only to pass tests - leaves := make(Leaves, 0) - leaves = leaves.InsertSorted(Leaf{index, version}) - traverse(navigation.NewRootPosition(uint16(len(index))), leaves, nil, 0, ops) + leaves := make(leaves, 0) + leaves = leaves.InsertSorted(leaf{index, version}) + traverse(newRootPosition(uint16(len(index))), leaves, nil, 0, ops) return ops } diff --git a/balloon/hyper/pruning/insert_test.go b/balloon/hyper/insert_test.go similarity index 77% rename from balloon/hyper/pruning/insert_test.go rename to balloon/hyper/insert_test.go index 83255ee9a..c4c591d39 100644 --- a/balloon/hyper/pruning/insert_test.go +++ b/balloon/hyper/insert_test.go @@ -14,13 +14,12 @@ limitations under the License. */ -package pruning +package hyper import ( "testing" "github.com/bbva/qed/balloon/cache" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" "github.com/bbva/qed/storage" "github.com/stretchr/testify/assert" @@ -42,23 +41,23 @@ func TestPruneToInsert(t *testing.T) { cachedBatches: map[string][]byte{}, storedBatches: map[string][]byte{}, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {GetDefaultHashCode, pos(128, 7)}, - {UpdateBatchNodeCode, pos(0, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetDefaultHashCode, pos(64, 6)}, - {UpdateBatchNodeCode, pos(0, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetDefaultHashCode, pos(32, 5)}, - {UpdateBatchNodeCode, pos(0, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetDefaultHashCode, pos(16, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {MutateBatchCode, pos(0, 4)}, - {UpdateBatchShortcutCode, pos(0, 4)}, - {LeafHashCode, pos(0, 4)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {getDefaultHashCode, pos(128, 7)}, + {updateBatchNodeCode, pos(0, 7)}, + {innerHashCode, pos(0, 7)}, + {getDefaultHashCode, pos(64, 6)}, + {updateBatchNodeCode, pos(0, 6)}, + {innerHashCode, pos(0, 6)}, + {getDefaultHashCode, pos(32, 5)}, + {updateBatchNodeCode, pos(0, 5)}, + {innerHashCode, pos(0, 5)}, + {getDefaultHashCode, pos(16, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {mutateBatchCode, pos(0, 4)}, + {updateBatchShortcutCode, pos(0, 4)}, + {leafHashCode, pos(0, 4)}, }, }, { @@ -84,23 +83,23 @@ func TestPruneToInsert(t *testing.T) { }, }, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {GetDefaultHashCode, pos(128, 7)}, - {UpdateBatchNodeCode, pos(0, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetDefaultHashCode, pos(64, 6)}, - {UpdateBatchNodeCode, pos(0, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetDefaultHashCode, pos(32, 5)}, - {UpdateBatchNodeCode, pos(0, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetDefaultHashCode, pos(16, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {MutateBatchCode, pos(0, 4)}, - {UpdateBatchShortcutCode, pos(0, 4)}, - {LeafHashCode, pos(0, 4)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {getDefaultHashCode, pos(128, 7)}, + {updateBatchNodeCode, pos(0, 7)}, + {innerHashCode, pos(0, 7)}, + {getDefaultHashCode, pos(64, 6)}, + {updateBatchNodeCode, pos(0, 6)}, + {innerHashCode, pos(0, 6)}, + {getDefaultHashCode, pos(32, 5)}, + {updateBatchNodeCode, pos(0, 5)}, + {innerHashCode, pos(0, 5)}, + {getDefaultHashCode, pos(16, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {mutateBatchCode, pos(0, 4)}, + {updateBatchShortcutCode, pos(0, 4)}, + {leafHashCode, pos(0, 4)}, }, }, { @@ -127,40 +126,40 @@ func TestPruneToInsert(t *testing.T) { }, }, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {GetDefaultHashCode, pos(128, 7)}, - {UpdateBatchNodeCode, pos(0, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetDefaultHashCode, pos(64, 6)}, - {UpdateBatchNodeCode, pos(0, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetDefaultHashCode, pos(32, 5)}, - {UpdateBatchNodeCode, pos(0, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetDefaultHashCode, pos(16, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {MutateBatchCode, pos(0, 4)}, // reset previous shortcut - {UpdateBatchNodeCode, pos(0, 4)}, - {InnerHashCode, pos(0, 4)}, - {GetDefaultHashCode, pos(8, 3)}, - {UpdateBatchNodeCode, pos(0, 3)}, - {InnerHashCode, pos(0, 3)}, - {GetDefaultHashCode, pos(4, 2)}, - {UpdateBatchNodeCode, pos(0, 2)}, - {InnerHashCode, pos(0, 2)}, - {GetDefaultHashCode, pos(2, 1)}, - {UpdateBatchNodeCode, pos(0, 1)}, - {InnerHashCode, pos(0, 1)}, - {UpdateBatchNodeCode, pos(1, 0)}, - {MutateBatchCode, pos(1, 0)}, // new batch - {UpdateBatchShortcutCode, pos(1, 0)}, - {LeafHashCode, pos(1, 0)}, - {UpdateBatchNodeCode, pos(0, 0)}, - {MutateBatchCode, pos(0, 0)}, // new batch - {UpdateBatchShortcutCode, pos(0, 0)}, - {LeafHashCode, pos(0, 0)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {getDefaultHashCode, pos(128, 7)}, + {updateBatchNodeCode, pos(0, 7)}, + {innerHashCode, pos(0, 7)}, + {getDefaultHashCode, pos(64, 6)}, + {updateBatchNodeCode, pos(0, 6)}, + {innerHashCode, pos(0, 6)}, + {getDefaultHashCode, pos(32, 5)}, + {updateBatchNodeCode, pos(0, 5)}, + {innerHashCode, pos(0, 5)}, + {getDefaultHashCode, pos(16, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {mutateBatchCode, pos(0, 4)}, // reset previous shortcut + {updateBatchNodeCode, pos(0, 4)}, + {innerHashCode, pos(0, 4)}, + {getDefaultHashCode, pos(8, 3)}, + {updateBatchNodeCode, pos(0, 3)}, + {innerHashCode, pos(0, 3)}, + {getDefaultHashCode, pos(4, 2)}, + {updateBatchNodeCode, pos(0, 2)}, + {innerHashCode, pos(0, 2)}, + {getDefaultHashCode, pos(2, 1)}, + {updateBatchNodeCode, pos(0, 1)}, + {innerHashCode, pos(0, 1)}, + {updateBatchNodeCode, pos(1, 0)}, + {mutateBatchCode, pos(1, 0)}, // new batch + {updateBatchShortcutCode, pos(1, 0)}, + {leafHashCode, pos(1, 0)}, + {updateBatchNodeCode, pos(0, 0)}, + {mutateBatchCode, pos(0, 0)}, // new batch + {updateBatchShortcutCode, pos(0, 0)}, + {leafHashCode, pos(0, 0)}, }, }, { @@ -187,27 +186,27 @@ func TestPruneToInsert(t *testing.T) { }, }, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {GetDefaultHashCode, pos(128, 7)}, - {UpdateBatchNodeCode, pos(0, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetDefaultHashCode, pos(64, 6)}, - {UpdateBatchNodeCode, pos(0, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetDefaultHashCode, pos(32, 5)}, - {UpdateBatchNodeCode, pos(0, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetDefaultHashCode, pos(16, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {MutateBatchCode, pos(0, 4)}, // reset previous shortcut - {UpdateBatchNodeCode, pos(0, 4)}, - {InnerHashCode, pos(0, 4)}, - {UpdateBatchShortcutCode, pos(8, 3)}, - {LeafHashCode, pos(8, 3)}, - {UpdateBatchShortcutCode, pos(0, 3)}, - {LeafHashCode, pos(0, 3)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {getDefaultHashCode, pos(128, 7)}, + {updateBatchNodeCode, pos(0, 7)}, + {innerHashCode, pos(0, 7)}, + {getDefaultHashCode, pos(64, 6)}, + {updateBatchNodeCode, pos(0, 6)}, + {innerHashCode, pos(0, 6)}, + {getDefaultHashCode, pos(32, 5)}, + {updateBatchNodeCode, pos(0, 5)}, + {innerHashCode, pos(0, 5)}, + {getDefaultHashCode, pos(16, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {mutateBatchCode, pos(0, 4)}, // reset previous shortcut + {updateBatchNodeCode, pos(0, 4)}, + {innerHashCode, pos(0, 4)}, + {updateBatchShortcutCode, pos(8, 3)}, + {leafHashCode, pos(8, 3)}, + {updateBatchShortcutCode, pos(0, 3)}, + {leafHashCode, pos(0, 3)}, }, }, { @@ -239,30 +238,30 @@ func TestPruneToInsert(t *testing.T) { }, }, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {GetDefaultHashCode, pos(128, 7)}, - {UpdateBatchNodeCode, pos(0, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetDefaultHashCode, pos(64, 6)}, - {UpdateBatchNodeCode, pos(0, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetDefaultHashCode, pos(32, 5)}, - {UpdateBatchNodeCode, pos(0, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetDefaultHashCode, pos(16, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {MutateBatchCode, pos(0, 4)}, - {UpdateBatchNodeCode, pos(0, 4)}, - {InnerHashCode, pos(0, 4)}, - {UpdateBatchNodeCode, pos(8, 3)}, - {InnerHashCode, pos(8, 3)}, - {UpdateBatchShortcutCode, pos(12, 2)}, - {LeafHashCode, pos(12, 2)}, - {UpdateBatchShortcutCode, pos(8, 2)}, - {LeafHashCode, pos(8, 2)}, - {GetProvidedHashCode, pos(0, 3)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {getDefaultHashCode, pos(128, 7)}, + {updateBatchNodeCode, pos(0, 7)}, + {innerHashCode, pos(0, 7)}, + {getDefaultHashCode, pos(64, 6)}, + {updateBatchNodeCode, pos(0, 6)}, + {innerHashCode, pos(0, 6)}, + {getDefaultHashCode, pos(32, 5)}, + {updateBatchNodeCode, pos(0, 5)}, + {innerHashCode, pos(0, 5)}, + {getDefaultHashCode, pos(16, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {mutateBatchCode, pos(0, 4)}, + {updateBatchNodeCode, pos(0, 4)}, + {innerHashCode, pos(0, 4)}, + {updateBatchNodeCode, pos(8, 3)}, + {innerHashCode, pos(8, 3)}, + {updateBatchShortcutCode, pos(12, 2)}, + {leafHashCode, pos(12, 2)}, + {updateBatchShortcutCode, pos(8, 2)}, + {leafHashCode, pos(8, 2)}, + {getProvidedHashCode, pos(0, 3)}, }, }, { @@ -288,23 +287,23 @@ func TestPruneToInsert(t *testing.T) { }, }, expectedOps: []op{ - {PutInCacheCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(0, 8)}, - {InnerHashCode, pos(0, 8)}, - {UpdateBatchNodeCode, pos(128, 7)}, - {InnerHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(192, 6)}, - {UpdateBatchNodeCode, pos(128, 6)}, - {InnerHashCode, pos(128, 6)}, - {GetDefaultHashCode, pos(160, 5)}, - {UpdateBatchNodeCode, pos(128, 5)}, - {InnerHashCode, pos(128, 5)}, - {GetDefaultHashCode, pos(144, 4)}, - {UpdateBatchNodeCode, pos(128, 4)}, - {MutateBatchCode, pos(128, 4)}, - {UpdateBatchShortcutCode, pos(128, 4)}, - {LeafHashCode, pos(128, 4)}, - {GetProvidedHashCode, pos(0, 7)}, + {putInCacheCode, pos(0, 8)}, + {updateBatchNodeCode, pos(0, 8)}, + {innerHashCode, pos(0, 8)}, + {updateBatchNodeCode, pos(128, 7)}, + {innerHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(192, 6)}, + {updateBatchNodeCode, pos(128, 6)}, + {innerHashCode, pos(128, 6)}, + {getDefaultHashCode, pos(160, 5)}, + {updateBatchNodeCode, pos(128, 5)}, + {innerHashCode, pos(128, 5)}, + {getDefaultHashCode, pos(144, 4)}, + {updateBatchNodeCode, pos(128, 4)}, + {mutateBatchCode, pos(128, 4)}, + {updateBatchShortcutCode, pos(128, 4)}, + {leafHashCode, pos(128, 4)}, + {getProvidedHashCode, pos(0, 7)}, }, }, } @@ -313,8 +312,8 @@ func TestPruneToInsert(t *testing.T) { cacheHeightLimit := batchLevels * 4 for i, c := range testCases { - loader := NewFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) - prunedOps := PruneToInsert(c.index, c.value, cacheHeightLimit, loader).List() + loader := newFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) + prunedOps := pruneToInsert(c.index, c.value, cacheHeightLimit, loader).List() require.Truef(t, len(c.expectedOps) == len(prunedOps), "The size of the pruned ops should match the expected for test case %d", i) for j := 0; j < len(prunedOps); j++ { assert.Equalf(t, c.expectedOps[j].Code, prunedOps[j].Code, "The pruned operation's code should match for test case %d", i) @@ -659,10 +658,10 @@ func TestInsertInterpretation(t *testing.T) { for i, c := range testCases { cache := cache.NewFakeCache([]byte{0x0}) - batches := NewFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) + batches := newFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) - ops := PruneToInsert(c.index, c.value, cacheHeightLimit, batches) - ctx := &Context{ + ops := pruneToInsert(c.index, c.value, cacheHeightLimit, batches) + ctx := &pruningContext{ Hasher: hashing.NewFakeXorHasher(), Cache: cache, DefaultHashes: defaultHashes, @@ -681,6 +680,6 @@ func TestInsertInterpretation(t *testing.T) { } type cachedElement struct { - Pos navigation.Position + Pos position Value []byte } diff --git a/balloon/hyper/pruning/loader.go b/balloon/hyper/loader.go similarity index 67% rename from balloon/hyper/pruning/loader.go rename to balloon/hyper/loader.go index 90034af67..13b349935 100644 --- a/balloon/hyper/pruning/loader.go +++ b/balloon/hyper/loader.go @@ -14,59 +14,58 @@ limitations under the License. */ -package pruning +package hyper import ( "github.com/bbva/qed/log" "github.com/bbva/qed/balloon/cache" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/storage" ) -type BatchLoader interface { - Load(pos navigation.Position) *BatchNode +type batchLoader interface { + Load(pos position) *batchNode } // TODO maybe use a function -type DefaultBatchLoader struct { +type defaultBatchLoader struct { cacheHeightLimit uint16 cache cache.Cache store storage.Store } -func NewDefaultBatchLoader(store storage.Store, cache cache.Cache, cacheHeightLimit uint16) *DefaultBatchLoader { - return &DefaultBatchLoader{ +func NewDefaultBatchLoader(store storage.Store, cache cache.Cache, cacheHeightLimit uint16) *defaultBatchLoader { + return &defaultBatchLoader{ cacheHeightLimit: cacheHeightLimit, cache: cache, store: store, } } -func (l DefaultBatchLoader) Load(pos navigation.Position) *BatchNode { +func (l defaultBatchLoader) Load(pos position) *batchNode { if pos.Height > l.cacheHeightLimit { return l.loadBatchFromCache(pos) } return l.loadBatchFromStore(pos) } -func (l DefaultBatchLoader) loadBatchFromCache(pos navigation.Position) *BatchNode { +func (l defaultBatchLoader) loadBatchFromCache(pos position) *batchNode { value, ok := l.cache.Get(pos.Bytes()) if !ok { - return NewEmptyBatchNode(len(pos.Index)) + return newEmptyBatchNode(len(pos.Index)) } - batch := ParseBatchNode(len(pos.Index), value) + batch := parseBatchNode(len(pos.Index), value) return batch } -func (l DefaultBatchLoader) loadBatchFromStore(pos navigation.Position) *BatchNode { +func (l defaultBatchLoader) loadBatchFromStore(pos position) *batchNode { kv, err := l.store.Get(storage.HyperCachePrefix, pos.Bytes()) if err != nil { if err == storage.ErrKeyNotFound { - return NewEmptyBatchNode(len(pos.Index)) + return newEmptyBatchNode(len(pos.Index)) } log.Fatalf("Oops, something went wrong. Unable to load batch: %v", err) } - batch := ParseBatchNode(len(pos.Index), kv.Value) + batch := parseBatchNode(len(pos.Index), kv.Value) return batch } diff --git a/balloon/hyper/navigation/audit.go b/balloon/hyper/navigation/audit.go deleted file mode 100644 index a544cb8b9..000000000 --- a/balloon/hyper/navigation/audit.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package navigation - -import ( - "github.com/bbva/qed/hashing" -) - -type AuditPath map[string]hashing.Digest - -func (p AuditPath) Get(pos Position) (hashing.Digest, bool) { - digest, ok := p[pos.StringId()] - return digest, ok -} - -func NewAuditPath() AuditPath { - return make(AuditPath, 0) -} diff --git a/balloon/hyper/navigation/test_util.go b/balloon/hyper/navigation/test_util.go deleted file mode 100644 index 730a8b0fa..000000000 --- a/balloon/hyper/navigation/test_util.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package navigation - -func pos(index uint8, height uint16) Position { - return NewPosition([]byte{byte(index)}, height) -} diff --git a/balloon/hyper/operation.go b/balloon/hyper/operation.go new file mode 100644 index 000000000..8c8f76f9a --- /dev/null +++ b/balloon/hyper/operation.go @@ -0,0 +1,230 @@ +/* + Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package hyper + +import ( + "github.com/bbva/qed/balloon/cache" + "github.com/bbva/qed/hashing" + "github.com/bbva/qed/log" + "github.com/bbva/qed/storage" +) + +type pruningContext struct { + Hasher hashing.Hasher + Cache cache.ModifiableCache + DefaultHashes []hashing.Digest + Mutations []*storage.Mutation + AuditPath AuditPath +} + +type operationCode int + +const ( + leafHashCode operationCode = iota + innerHashCode + updateBatchNodeCode + updateBatchShortcutCode + getDefaultHashCode + getProvidedHashCode + putInCacheCode + mutateBatchCode + collectHashCode + getFromPathCode + useHashCode + noOpCode +) + +type interpreter func(ops *operationsStack, c *pruningContext) hashing.Digest + +type operation struct { + Code operationCode + Pos position + Interpret interpreter +} + +func leafHash(pos position, value []byte) *operation { + return &operation{ + Code: leafHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + return c.Hasher.Salted(pos.Bytes(), value) + }, + } +} + +func innerHash(pos position) *operation { + return &operation{ + Code: innerHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + leftHash := ops.Pop().Interpret(ops, c) + rightHash := ops.Pop().Interpret(ops, c) + return c.Hasher.Salted(pos.Bytes(), leftHash, rightHash) + }, + } +} + +func updatebatchNode(pos position, idx int8, batch *batchNode) *operation { + return &operation{ + Code: updateBatchNodeCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash := ops.Pop().Interpret(ops, c) + batch.AddHashAt(idx, hash) + return hash + }, + } +} + +func updateBatchShortcut(pos position, idx int8, batch *batchNode, key, value []byte) *operation { + return &operation{ + Code: updateBatchShortcutCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash := ops.Pop().Interpret(ops, c) + batch.AddLeafAt(idx, hash, key, value) + return hash + }, + } +} + +func getDefaultHash(pos position) *operation { + return &operation{ + Code: getDefaultHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + return c.DefaultHashes[pos.Height] + }, + } +} + +func getProvidedHash(pos position, idx int8, batch *batchNode) *operation { + return &operation{ + Code: getProvidedHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + return batch.GetElementAt(idx) + }, + } +} + +func putInCache(pos position, batch *batchNode) *operation { + return &operation{ + Code: putInCacheCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash := ops.Pop().Interpret(ops, c) + c.Cache.Put(pos.Bytes(), batch.Serialize()) + return hash + }, + } +} + +func mutateBatch(pos position, batch *batchNode) *operation { + return &operation{ + Code: mutateBatchCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash := ops.Pop().Interpret(ops, c) + c.Mutations = append(c.Mutations, storage.NewMutation(storage.HyperCachePrefix, pos.Bytes(), batch.Serialize())) + return hash + }, + } +} + +func collectHash(pos position) *operation { + return &operation{ + Code: collectHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash := ops.Pop().Interpret(ops, c) + c.AuditPath[pos.StringId()] = hash + return hash + }, + } +} + +func getFromPath(pos position) *operation { + return &operation{ + Code: getFromPathCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + hash, ok := c.AuditPath.Get(pos) + if !ok { + log.Fatalf("Oops, something went wrong. Invalid position in audit path") + } + return hash + }, + } +} + +func useHash(pos position, digest []byte) *operation { + return &operation{ + Code: useHashCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + return digest + }, + } +} + +func noOp(pos position) *operation { + return &operation{ + Code: noOpCode, + Pos: pos, + Interpret: func(ops *operationsStack, c *pruningContext) hashing.Digest { + return nil + }, + } +} + +type operationsStack []*operation + +func newOperationsStack() *operationsStack { + return new(operationsStack) +} + +func (s *operationsStack) Len() int { + return len(*s) +} + +func (s operationsStack) Peek() (op *operation) { + return s[len(s)-1] +} + +func (s *operationsStack) Pop() (op *operation) { + i := s.Len() - 1 + op = (*s)[i] + *s = (*s)[:i] + return +} + +func (s *operationsStack) Push(op *operation) { + *s = append(*s, op) +} + +func (s *operationsStack) PushAll(ops ...*operation) { + *s = append(*s, ops...) +} + +func (s *operationsStack) List() []*operation { + l := make([]*operation, 0) + for s.Len() > 0 { + l = append(l, s.Pop()) + } + return l +} diff --git a/balloon/hyper/navigation/position.go b/balloon/hyper/position.go similarity index 69% rename from balloon/hyper/navigation/position.go rename to balloon/hyper/position.go index 0767ed0b4..e95a6482b 100644 --- a/balloon/hyper/navigation/position.go +++ b/balloon/hyper/position.go @@ -14,7 +14,7 @@ limitations under the License. */ -package navigation +package hyper import ( "fmt" @@ -22,7 +22,7 @@ import ( "github.com/bbva/qed/util" ) -type Position struct { +type position struct { Index []byte Height uint16 @@ -30,10 +30,10 @@ type Position struct { numBits uint16 } -func NewPosition(index []byte, height uint16) Position { +func newPosition(index []byte, height uint16) position { // Size of the index plus 2 bytes for the height b := append(util.Uint16AsBytes(height), index[:len(index)]...) - return Position{ + return position{ Index: index, Height: height, serialized: b, // memoized @@ -41,48 +41,48 @@ func NewPosition(index []byte, height uint16) Position { } } -func NewRootPosition(indexNumBytes uint16) Position { - return NewPosition(make([]byte, indexNumBytes), indexNumBytes*8) +func newRootPosition(indexNumBytes uint16) position { + return newPosition(make([]byte, indexNumBytes), indexNumBytes*8) } -func (p Position) Bytes() []byte { +func (p position) Bytes() []byte { return p.serialized } -func (p Position) String() string { +func (p position) String() string { return fmt.Sprintf("Pos(%d, %d)", p.Index, p.Height) } -func (p Position) StringId() string { +func (p position) StringId() string { return fmt.Sprintf("%#x|%d", p.Index, p.Height) } -func (p Position) Left() Position { +func (p position) Left() position { if p.IsLeaf() { return p } - return NewPosition(p.Index, p.Height-1) + return newPosition(p.Index, p.Height-1) } -func (p Position) Right() Position { +func (p position) Right() position { if p.IsLeaf() { return p } - return NewPosition(p.splitBase(), p.Height-1) + return newPosition(p.splitBase(), p.Height-1) } -func (p Position) IsLeaf() bool { +func (p position) IsLeaf() bool { return p.Height == 0 } -func (p Position) FirstDescendant() Position { +func (p position) FirstDescendant() position { if p.IsLeaf() { return p } - return NewPosition(p.Index, 0) + return newPosition(p.Index, 0) } -func (p Position) LastDescendant() Position { +func (p position) LastDescendant() position { if p.IsLeaf() { return p } @@ -91,10 +91,10 @@ func (p Position) LastDescendant() Position { for bit := p.numBits - p.Height; bit < p.numBits; bit++ { bitSet(index, bit) } - return NewPosition(index, 0) + return newPosition(index, 0) } -func (p Position) splitBase() []byte { +func (p position) splitBase() []byte { splitBit := p.numBits - p.Height split := make([]byte, p.numBits/8) copy(split, p.Index) diff --git a/balloon/hyper/navigation/position_test.go b/balloon/hyper/position_test.go similarity index 81% rename from balloon/hyper/navigation/position_test.go rename to balloon/hyper/position_test.go index 75aea893a..d0e359fce 100644 --- a/balloon/hyper/navigation/position_test.go +++ b/balloon/hyper/position_test.go @@ -14,7 +14,7 @@ limitations under the License. */ -package navigation +package hyper import ( "testing" @@ -26,18 +26,18 @@ func TestRoot(t *testing.T) { testCases := []struct { indexNumBytes uint16 - expectedPos Position + expectedPos position }{ - {1, NewPosition(make([]byte, 1), 8)}, - {2, NewPosition(make([]byte, 2), 16)}, - {4, NewPosition(make([]byte, 4), 32)}, - {8, NewPosition(make([]byte, 8), 64)}, - {16, NewPosition(make([]byte, 16), 128)}, - {32, NewPosition(make([]byte, 32), 256)}, + {1, newPosition(make([]byte, 1), 8)}, + {2, newPosition(make([]byte, 2), 16)}, + {4, newPosition(make([]byte, 4), 32)}, + {8, newPosition(make([]byte, 8), 64)}, + {16, newPosition(make([]byte, 16), 128)}, + {32, newPosition(make([]byte, 32), 256)}, } for i, c := range testCases { - rootPos := NewRootPosition(c.indexNumBytes) + rootPos := newRootPosition(c.indexNumBytes) require.Equalf(t, c.expectedPos, rootPos, "The root position should match in test case %d", i) } @@ -46,8 +46,8 @@ func TestRoot(t *testing.T) { func TestIsLeaf(t *testing.T) { testCases := []struct { - position Position - ok bool + pos position + ok bool }{ {pos(0, 0), true}, {pos(0, 1), false}, @@ -56,7 +56,7 @@ func TestIsLeaf(t *testing.T) { } for i, c := range testCases { - result := c.position.IsLeaf() + result := c.pos.IsLeaf() require.Equalf(t, c.ok, result, "The leaf checking should match for test case %d", i) } @@ -65,8 +65,8 @@ func TestIsLeaf(t *testing.T) { func TestLeft(t *testing.T) { testCases := []struct { - position Position - expectedLeft Position + pos position + expectedLeft position }{ {pos(0, 0), pos(0, 0)}, {pos(0, 0), pos(0, 0)}, @@ -86,7 +86,7 @@ func TestLeft(t *testing.T) { } for i, c := range testCases { - left := c.position.Left() + left := c.pos.Left() require.Equalf(t, c.expectedLeft, left, "The left positions should match for test case %d", i) } } @@ -94,8 +94,8 @@ func TestLeft(t *testing.T) { func TestRight(t *testing.T) { testCases := []struct { - position Position - expectedRight Position + pos position + expectedRight position }{ {pos(0, 0), pos(0, 0)}, {pos(0, 0), pos(0, 0)}, @@ -115,7 +115,7 @@ func TestRight(t *testing.T) { } for i, c := range testCases { - right := c.position.Right() + right := c.pos.Right() require.Equalf(t, c.expectedRight, right, "The right positions should match for test case %d", i) } } @@ -123,8 +123,8 @@ func TestRight(t *testing.T) { func TestFirstDescendant(t *testing.T) { testCases := []struct { - position Position - expectedPos Position + pos position + expectedPos position }{ {pos(0, 0), pos(0, 0)}, {pos(1, 0), pos(1, 0)}, @@ -135,7 +135,7 @@ func TestFirstDescendant(t *testing.T) { } for i, c := range testCases { - first := c.position.FirstDescendant() + first := c.pos.FirstDescendant() require.Equalf(t, c.expectedPos, first, "The first descentant position should match for test case %d", i) } @@ -144,8 +144,8 @@ func TestFirstDescendant(t *testing.T) { func TestLastDescendant(t *testing.T) { testCases := []struct { - position Position - expectedPos Position + pos position + expectedPos position }{ {pos(0, 0), pos(0, 0)}, {pos(1, 0), pos(1, 0)}, @@ -156,7 +156,7 @@ func TestLastDescendant(t *testing.T) { } for i, c := range testCases { - last := c.position.LastDescendant() + last := c.pos.LastDescendant() require.Equalf(t, c.expectedPos, last, "The first descentant position should match for test case %d", i) } diff --git a/balloon/hyper/proof.go b/balloon/hyper/proof.go index 281e4db83..bf8587593 100644 --- a/balloon/hyper/proof.go +++ b/balloon/hyper/proof.go @@ -19,19 +19,28 @@ package hyper import ( "bytes" - "github.com/bbva/qed/balloon/hyper/navigation" - "github.com/bbva/qed/balloon/hyper/pruning" "github.com/bbva/qed/hashing" "github.com/bbva/qed/log" ) +type AuditPath map[string]hashing.Digest + +func (p AuditPath) Get(pos position) (hashing.Digest, bool) { + digest, ok := p[pos.StringId()] + return digest, ok +} + +func NewAuditPath() AuditPath { + return make(AuditPath, 0) +} + type QueryProof struct { - AuditPath navigation.AuditPath + AuditPath AuditPath Key, Value []byte hasher hashing.Hasher } -func NewQueryProof(key, value []byte, auditPath navigation.AuditPath, hasher hashing.Hasher) *QueryProof { +func NewQueryProof(key, value []byte, auditPath AuditPath, hasher hashing.Hasher) *QueryProof { return &QueryProof{ Key: key, Value: value, @@ -53,8 +62,8 @@ func (p QueryProof) Verify(key []byte, expectedRootHash hashing.Digest) (valid b } // build a stack of operations and then interpret it to recompute the root hash - ops := pruning.PruneToVerify(key, p.Value, p.hasher.Len()-uint16(len(p.AuditPath))) - ctx := &pruning.Context{ + ops := pruneToVerify(key, p.Value, p.hasher.Len()-uint16(len(p.AuditPath))) + ctx := &pruningContext{ Hasher: p.hasher, AuditPath: p.AuditPath, } diff --git a/balloon/hyper/proof_test.go b/balloon/hyper/proof_test.go index 2296efead..7113e37ba 100644 --- a/balloon/hyper/proof_test.go +++ b/balloon/hyper/proof_test.go @@ -21,15 +21,13 @@ import ( "github.com/bbva/qed/hashing" "github.com/stretchr/testify/assert" - - "github.com/bbva/qed/balloon/hyper/navigation" ) func TestProofVerify(t *testing.T) { testCases := []struct { key, value []byte - auditPath navigation.AuditPath + auditPath AuditPath rootHash hashing.Digest verifyResult bool }{ @@ -37,7 +35,7 @@ func TestProofVerify(t *testing.T) { // verify key=0 with empty audit path key: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{}, + auditPath: AuditPath{}, rootHash: hashing.Digest{0x0}, verifyResult: false, }, @@ -45,7 +43,7 @@ func TestProofVerify(t *testing.T) { // verify key=0 with empty audit path key: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{ + auditPath: AuditPath{ "0x80|7": hashing.Digest{0x0}, "0x40|6": hashing.Digest{0x0}, "0x20|5": hashing.Digest{0x0}, @@ -58,7 +56,7 @@ func TestProofVerify(t *testing.T) { // verify key=0 with empty audit path key: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{ + auditPath: AuditPath{ "0x80|7": hashing.Digest{0x0}, "0x40|6": hashing.Digest{0x0}, "0x20|5": hashing.Digest{0x0}, diff --git a/balloon/hyper/pruning/batch.go b/balloon/hyper/pruning/batch.go deleted file mode 100644 index bb07cb42a..000000000 --- a/balloon/hyper/pruning/batch.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - Copyright 2018 Banco Bilbao Vizcaya Argentaria, S.A. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package pruning - -import ( - "fmt" - "strings" - - "github.com/bbva/qed/hashing" -) - -type BatchNode struct { - Batch [][]byte - nodeSize int // in bytes -} - -func NewEmptyBatchNode(nodeSize int) *BatchNode { - return &BatchNode{ - nodeSize: nodeSize, - Batch: make([][]byte, 31, 31), - } -} - -func NewBatchNode(nodeSize int, batch [][]byte) *BatchNode { - return &BatchNode{ - nodeSize: nodeSize, - Batch: batch, - } -} - -func (b BatchNode) String() string { - var strs []string - for i, n := range b.Batch { - strs = append(strs, fmt.Sprintf("[%d - %#x]", i, n)) - } - return strings.Join(strs, "\n") -} - -func (b BatchNode) HasLeafAt(i int8) bool { - return len(b.Batch[i]) > 0 && b.Batch[i][b.nodeSize] == byte(1) -} - -func (b BatchNode) AddHashAt(i int8, value []byte) { - b.Batch[i] = append(value, byte(0)) -} - -func (b BatchNode) AddLeafAt(i int8, hash hashing.Digest, key, value []byte) { - b.Batch[i] = append(hash, byte(1)) - b.Batch[2*i+1] = append(key, byte(2)) - b.Batch[2*i+2] = append(value, byte(2)) -} - -func (b BatchNode) GetLeafKVAt(i int8) ([]byte, []byte) { - return b.Batch[2*i+1][:b.nodeSize], b.Batch[2*i+2][:b.nodeSize] -} - -func (b BatchNode) HasElementAt(i int8) bool { - return len(b.Batch[i]) > 0 -} - -func (b BatchNode) GetElementAt(i int8) []byte { - return b.Batch[i][:b.nodeSize] -} - -func (b BatchNode) ResetElementAt(i int8) { - b.Batch[i] = nil -} - -func (b BatchNode) Serialize() []byte { - serialized := make([]byte, 4) - for i := 0; i < 31; i++ { - if len(b.Batch[i]) != 0 { - bitSet(serialized, i) - serialized = append(serialized, b.Batch[i]...) - } - } - return serialized -} - -func ParseBatch(nodeSize int, value []byte) [][]byte { - batch := make([][]byte, 31, 31) // 31 nodes (including the root) - bitmap := value[:4] // the first 4 bytes define the bitmap - size := nodeSize + 1 - - j := 0 - for i := 0; i < 31; i++ { - if bitIsSet(bitmap, i) { - batch[i] = value[4+size*j : 4+size*(j+1)] - j++ - } - } - - return batch -} - -func ParseBatchNode(nodeSize int, value []byte) *BatchNode { - return NewBatchNode(nodeSize, ParseBatch(nodeSize, value)) -} - -func bitIsSet(bits []byte, i int) bool { - return bits[i/8]&(1< 0 { - l = append(l, s.Pop()) - } - return l -} diff --git a/balloon/hyper/pruning/rebuild.go b/balloon/hyper/rebuild.go similarity index 67% rename from balloon/hyper/pruning/rebuild.go rename to balloon/hyper/rebuild.go index c337913b4..170e819f1 100644 --- a/balloon/hyper/pruning/rebuild.go +++ b/balloon/hyper/rebuild.go @@ -14,21 +14,19 @@ limitations under the License. */ -package pruning +package hyper import ( "bytes" - - "github.com/bbva/qed/balloon/hyper/navigation" ) -func PruneToRebuild(index, serializedBatch []byte, cacheHeightLimit uint16, batches BatchLoader) *OperationsStack { +func pruneToRebuild(index, serializedBatch []byte, cacheHeightLimit uint16, batches batchLoader) *operationsStack { - persistedBatch := ParseBatchNode(len(index), serializedBatch) + persistedBatch := parseBatchNode(len(index), serializedBatch) - var traverse, discardBranch func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) + var traverse, discardBranch func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) - discardBranch = func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) { + discardBranch = func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) { if batch.HasElementAt(iBatch) { ops.Push(getProvidedHash(pos, iBatch, batch)) @@ -37,12 +35,12 @@ func PruneToRebuild(index, serializedBatch []byte, cacheHeightLimit uint16, batc } } - traverse = func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverse = func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) { // we don't need to check the length of the leaves because we // always have to descend to the cache height limit if pos.Height == cacheHeightLimit { - ops.PushAll(useHash(pos, persistedBatch.GetElementAt(0)), updateBatchNode(pos, iBatch, batch)) + ops.PushAll(useHash(pos, persistedBatch.GetElementAt(0)), updatebatchNode(pos, iBatch, batch)) return } @@ -53,7 +51,7 @@ func PruneToRebuild(index, serializedBatch []byte, cacheHeightLimit uint16, batc // at the end of a batch tree if iBatch > 0 && pos.Height%4 == 0 { traverse(pos, nil, 0, ops) - ops.Push(updateBatchNode(pos, iBatch, batch)) + ops.Push(updatebatchNode(pos, iBatch, batch)) return } @@ -67,15 +65,15 @@ func PruneToRebuild(index, serializedBatch []byte, cacheHeightLimit uint16, batc traverse(rightPos, batch, 2*iBatch+2, ops) } - ops.PushAll(innerHash(pos), updateBatchNode(pos, iBatch, batch)) + ops.PushAll(innerHash(pos), updatebatchNode(pos, iBatch, batch)) if iBatch == 0 { // it's the root of the batch tree ops.Push(putInCache(pos, batch)) } } - ops := NewOperationsStack() - traverse(navigation.NewRootPosition(uint16(len(index))), nil, 0, ops) + ops := newOperationsStack() + traverse(newRootPosition(uint16(len(index))), nil, 0, ops) return ops } diff --git a/balloon/hyper/pruning/rebuild_test.go b/balloon/hyper/rebuild_test.go similarity index 97% rename from balloon/hyper/pruning/rebuild_test.go rename to balloon/hyper/rebuild_test.go index ac398ddd6..f256ea3c6 100644 --- a/balloon/hyper/pruning/rebuild_test.go +++ b/balloon/hyper/rebuild_test.go @@ -14,7 +14,7 @@ limitations under the License. */ -package pruning +package hyper import "testing" diff --git a/balloon/hyper/pruning/search.go b/balloon/hyper/search.go similarity index 76% rename from balloon/hyper/pruning/search.go rename to balloon/hyper/search.go index 3deeab016..9b06a5139 100644 --- a/balloon/hyper/pruning/search.go +++ b/balloon/hyper/search.go @@ -14,26 +14,24 @@ limitations under the License. */ -package pruning +package hyper import ( "bytes" - - "github.com/bbva/qed/balloon/hyper/navigation" ) -func PruneToFind(index []byte, batches BatchLoader) *OperationsStack { +func pruneToFind(index []byte, batches batchLoader) *operationsStack { - var traverse, traverseBatch, discardBranch func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) + var traverse, traverseBatch, discardBranch func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) - traverse = func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverse = func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) { if batch == nil { batch = batches.Load(pos) } traverseBatch(pos, batch, iBatch, ops) } - discardBranch = func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) { + discardBranch = func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) { if batch.HasElementAt(iBatch) { ops.PushAll(getProvidedHash(pos, iBatch, batch), collectHash(pos)) } else { @@ -41,7 +39,7 @@ func PruneToFind(index []byte, batches BatchLoader) *OperationsStack { } } - traverseBatch = func(pos navigation.Position, batch *BatchNode, iBatch int8, ops *OperationsStack) { + traverseBatch = func(pos position, batch *batchNode, iBatch int8, ops *operationsStack) { // We found a nil value. That means there is no previous node stored on the current // path so we stop traversing because the index does no exist in the tree. @@ -79,8 +77,8 @@ func PruneToFind(index []byte, batches BatchLoader) *OperationsStack { ops.Push(innerHash(pos)) } - ops := NewOperationsStack() - root := navigation.NewRootPosition(uint16(len(index))) + ops := newOperationsStack() + root := newRootPosition(uint16(len(index))) traverse(root, nil, 0, ops) if ops.Len() == 0 { ops.Push(noOp(root)) diff --git a/balloon/hyper/pruning/search_test.go b/balloon/hyper/search_test.go similarity index 79% rename from balloon/hyper/pruning/search_test.go rename to balloon/hyper/search_test.go index bb4b08d6c..67ef93ca2 100644 --- a/balloon/hyper/pruning/search_test.go +++ b/balloon/hyper/search_test.go @@ -14,13 +14,12 @@ limitations under the License. */ -package pruning +package hyper import ( "testing" "github.com/bbva/qed/balloon/cache" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -40,7 +39,7 @@ func TestPruneToFind(t *testing.T) { cachedBatches: map[string][]byte{}, storedBatches: map[string][]byte{}, expectedOps: []op{ - {NoOpCode, pos(0, 8)}, // empty audit path + {noOpCode, pos(0, 8)}, // empty audit path }, }, { @@ -65,19 +64,19 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {CollectHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {CollectHashCode, pos(64, 6)}, - {GetDefaultHashCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {CollectHashCode, pos(32, 5)}, - {GetDefaultHashCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {CollectHashCode, pos(16, 4)}, - {GetDefaultHashCode, pos(16, 4)}, - {GetProvidedHashCode, pos(0, 4)}, // we stop traversing at the shortcut (index=0) + {innerHashCode, pos(0, 8)}, + {collectHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {collectHashCode, pos(64, 6)}, + {getDefaultHashCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {collectHashCode, pos(32, 5)}, + {getDefaultHashCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {collectHashCode, pos(16, 4)}, + {getDefaultHashCode, pos(16, 4)}, + {getProvidedHashCode, pos(0, 4)}, // we stop traversing at the shortcut (index=0) }, }, { @@ -103,19 +102,19 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {CollectHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {CollectHashCode, pos(64, 6)}, - {GetDefaultHashCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {CollectHashCode, pos(32, 5)}, - {GetDefaultHashCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {CollectHashCode, pos(16, 4)}, - {GetDefaultHashCode, pos(16, 4)}, - {GetProvidedHashCode, pos(0, 4)}, // stop at the position of the shorcut (index=0) + {innerHashCode, pos(0, 8)}, + {collectHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {collectHashCode, pos(64, 6)}, + {getDefaultHashCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {collectHashCode, pos(32, 5)}, + {getDefaultHashCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {collectHashCode, pos(16, 4)}, + {getDefaultHashCode, pos(16, 4)}, + {getProvidedHashCode, pos(0, 4)}, // stop at the position of the shorcut (index=0) }, }, { @@ -156,31 +155,31 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {CollectHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {CollectHashCode, pos(64, 6)}, - {GetDefaultHashCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {CollectHashCode, pos(32, 5)}, - {GetDefaultHashCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {CollectHashCode, pos(16, 4)}, - {GetDefaultHashCode, pos(16, 4)}, - {InnerHashCode, pos(0, 4)}, - {CollectHashCode, pos(8, 3)}, - {GetDefaultHashCode, pos(8, 3)}, - {InnerHashCode, pos(0, 3)}, - {CollectHashCode, pos(4, 2)}, - {GetDefaultHashCode, pos(4, 2)}, - {InnerHashCode, pos(0, 2)}, - {CollectHashCode, pos(2, 1)}, - {GetDefaultHashCode, pos(2, 1)}, - {InnerHashCode, pos(0, 1)}, - {GetProvidedHashCode, pos(1, 0)}, // shortcut found but not collected - {CollectHashCode, pos(0, 0)}, - {GetProvidedHashCode, pos(0, 0)}, // we take the hash of the index=0 position from the batch + {innerHashCode, pos(0, 8)}, + {collectHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {collectHashCode, pos(64, 6)}, + {getDefaultHashCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {collectHashCode, pos(32, 5)}, + {getDefaultHashCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {collectHashCode, pos(16, 4)}, + {getDefaultHashCode, pos(16, 4)}, + {innerHashCode, pos(0, 4)}, + {collectHashCode, pos(8, 3)}, + {getDefaultHashCode, pos(8, 3)}, + {innerHashCode, pos(0, 3)}, + {collectHashCode, pos(4, 2)}, + {getDefaultHashCode, pos(4, 2)}, + {innerHashCode, pos(0, 2)}, + {collectHashCode, pos(2, 1)}, + {getDefaultHashCode, pos(2, 1)}, + {innerHashCode, pos(0, 1)}, + {getProvidedHashCode, pos(1, 0)}, // shortcut found but not collected + {collectHashCode, pos(0, 0)}, + {getProvidedHashCode, pos(0, 0)}, // we take the hash of the index=0 position from the batch }, }, { @@ -206,19 +205,19 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {CollectHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {CollectHashCode, pos(64, 6)}, - {GetDefaultHashCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {CollectHashCode, pos(32, 5)}, - {GetDefaultHashCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {CollectHashCode, pos(16, 4)}, - {GetDefaultHashCode, pos(16, 4)}, - {GetProvidedHashCode, pos(0, 4)}, // stop at the position of the shorcut (index=0) + {innerHashCode, pos(0, 8)}, + {collectHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {collectHashCode, pos(64, 6)}, + {getDefaultHashCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {collectHashCode, pos(32, 5)}, + {getDefaultHashCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {collectHashCode, pos(16, 4)}, + {getDefaultHashCode, pos(16, 4)}, + {getProvidedHashCode, pos(0, 4)}, // stop at the position of the shorcut (index=0) }, }, { @@ -251,25 +250,25 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {CollectHashCode, pos(128, 7)}, - {GetDefaultHashCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {CollectHashCode, pos(64, 6)}, - {GetDefaultHashCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {CollectHashCode, pos(32, 5)}, - {GetDefaultHashCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {CollectHashCode, pos(16, 4)}, - {GetDefaultHashCode, pos(16, 4)}, - {InnerHashCode, pos(0, 4)}, - {InnerHashCode, pos(8, 3)}, - {GetProvidedHashCode, pos(12, 2)}, // found shortcut index=12 - {CollectHashCode, pos(8, 2)}, - {GetProvidedHashCode, pos(8, 2)}, // shortcut index=8 - {CollectHashCode, pos(0, 3)}, - {GetProvidedHashCode, pos(0, 3)}, // shortcut index=0 + {innerHashCode, pos(0, 8)}, + {collectHashCode, pos(128, 7)}, + {getDefaultHashCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {collectHashCode, pos(64, 6)}, + {getDefaultHashCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {collectHashCode, pos(32, 5)}, + {getDefaultHashCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {collectHashCode, pos(16, 4)}, + {getDefaultHashCode, pos(16, 4)}, + {innerHashCode, pos(0, 4)}, + {innerHashCode, pos(8, 3)}, + {getProvidedHashCode, pos(12, 2)}, // found shortcut index=12 + {collectHashCode, pos(8, 2)}, + {getProvidedHashCode, pos(8, 2)}, // shortcut index=8 + {collectHashCode, pos(0, 3)}, + {getProvidedHashCode, pos(0, 3)}, // shortcut index=0 }, }, { @@ -294,10 +293,10 @@ func TestPruneToFind(t *testing.T) { }, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {NoOpCode, pos(128, 7)}, // not found - {CollectHashCode, pos(0, 7)}, - {GetProvidedHashCode, pos(0, 7)}, // we discard the previous path updated by the previous insertion + {innerHashCode, pos(0, 8)}, + {noOpCode, pos(128, 7)}, // not found + {collectHashCode, pos(0, 7)}, + {getProvidedHashCode, pos(0, 7)}, // we discard the previous path updated by the previous insertion }, }, } @@ -306,8 +305,8 @@ func TestPruneToFind(t *testing.T) { cacheHeightLimit := batchLevels * 4 for i, c := range testCases { - loader := NewFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) - prunedOps := PruneToFind(c.index, loader).List() + loader := newFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) + prunedOps := pruneToFind(c.index, loader).List() require.Truef(t, len(c.expectedOps) == len(prunedOps), "The size of the pruned ops should match the expected for test case %d", i) for j := 0; j < len(prunedOps); j++ { assert.Equalf(t, c.expectedOps[j].Code, prunedOps[j].Code, "The pruned operation's code should match for test case %d", i) @@ -322,14 +321,14 @@ func TestSearchInterpretation(t *testing.T) { index []byte cachedBatches map[string][]byte storedBatches map[string][]byte - expectedAuditPath navigation.AuditPath + expectedAuditPath AuditPath }{ { // search for index=0 on empty tree index: []byte{0}, cachedBatches: map[string][]byte{}, storedBatches: map[string][]byte{}, - expectedAuditPath: navigation.AuditPath{}, + expectedAuditPath: AuditPath{}, }, { // search for index=0 on tree with only one leaf @@ -352,7 +351,7 @@ func TestSearchInterpretation(t *testing.T) { 0x00, 0x02, // iBatch 2 -> value=0x00 }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, @@ -381,7 +380,7 @@ func TestSearchInterpretation(t *testing.T) { 0x00, 0x02, // iBatch 2 -> value=0x00 }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, @@ -425,7 +424,7 @@ func TestSearchInterpretation(t *testing.T) { 0x01, 0x00, // iBatch 16 -> hash=0x01 }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, @@ -458,7 +457,7 @@ func TestSearchInterpretation(t *testing.T) { 0x00, 0x02, // iBatch 2 -> value=0x00 }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, @@ -494,7 +493,7 @@ func TestSearchInterpretation(t *testing.T) { 0x0c, 0x02, // iBatch 14 -> value=0x0c }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, @@ -524,7 +523,7 @@ func TestSearchInterpretation(t *testing.T) { 0x00, 0x02, // iBatch 2 -> value=0x00 }, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ pos(0, 7).StringId(): []byte{0x0}, }, }, @@ -536,14 +535,14 @@ func TestSearchInterpretation(t *testing.T) { for i, c := range testCases { cache := cache.NewFakeCache([]byte{0x0}) - batches := NewFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) + batches := newFakeBatchLoader(c.cachedBatches, c.storedBatches, cacheHeightLimit) - ops := PruneToFind(c.index, batches) - ctx := &Context{ + ops := pruneToFind(c.index, batches) + ctx := &pruningContext{ Hasher: hashing.NewFakeXorHasher(), Cache: cache, DefaultHashes: defaultHashes, - AuditPath: navigation.NewAuditPath(), + AuditPath: NewAuditPath(), } ops.Pop().Interpret(ops, ctx) diff --git a/balloon/hyper/pruning/test_util.go b/balloon/hyper/test_util.go similarity index 53% rename from balloon/hyper/pruning/test_util.go rename to balloon/hyper/test_util.go index 488bfa89d..0ef9837f0 100644 --- a/balloon/hyper/pruning/test_util.go +++ b/balloon/hyper/test_util.go @@ -14,53 +14,49 @@ limitations under the License. */ -package pruning +package hyper -import ( - "github.com/bbva/qed/balloon/hyper/navigation" -) - -func pos(index byte, height uint16) navigation.Position { - return navigation.NewPosition([]byte{index}, height) +func pos(index byte, height uint16) position { + return newPosition([]byte{index}, height) } type op struct { - Code OperationCode - Pos navigation.Position + Code operationCode + Pos position } -type FakeBatchLoader struct { +type fakeBatchLoader struct { cacheHeightLimit uint16 - cached map[string]*BatchNode - stored map[string]*BatchNode + cached map[string]*batchNode + stored map[string]*batchNode } -func NewFakeBatchLoader(cached map[string][]byte, stored map[string][]byte, cacheHeightLimit uint16) *FakeBatchLoader { - loader := &FakeBatchLoader{ +func newFakeBatchLoader(cached map[string][]byte, stored map[string][]byte, cacheHeightLimit uint16) *fakeBatchLoader { + loader := &fakeBatchLoader{ cacheHeightLimit: cacheHeightLimit, - cached: make(map[string]*BatchNode, 0), - stored: make(map[string]*BatchNode, 0), + cached: make(map[string]*batchNode, 0), + stored: make(map[string]*batchNode, 0), } for k, v := range cached { - loader.cached[k] = ParseBatchNode(1, v) + loader.cached[k] = parseBatchNode(1, v) } for k, v := range stored { - loader.stored[k] = ParseBatchNode(1, v) + loader.stored[k] = parseBatchNode(1, v) } return loader } -func (l *FakeBatchLoader) Load(pos navigation.Position) *BatchNode { +func (l *fakeBatchLoader) Load(pos position) *batchNode { if pos.Height > l.cacheHeightLimit { batch, ok := l.cached[pos.StringId()] if !ok { - return NewEmptyBatchNode(len(pos.Index)) + return newEmptyBatchNode(len(pos.Index)) } return batch } batch, ok := l.stored[pos.StringId()] if !ok { - return NewEmptyBatchNode(len(pos.Index)) + return newEmptyBatchNode(len(pos.Index)) } return batch } diff --git a/balloon/hyper/tree.go b/balloon/hyper/tree.go index 5d9f78420..7400af915 100644 --- a/balloon/hyper/tree.go +++ b/balloon/hyper/tree.go @@ -19,11 +19,9 @@ package hyper import ( "sync" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/log" "github.com/bbva/qed/balloon/cache" - "github.com/bbva/qed/balloon/hyper/pruning" "github.com/bbva/qed/hashing" "github.com/bbva/qed/storage" "github.com/bbva/qed/util" @@ -41,7 +39,7 @@ type HyperTree struct { hasher hashing.Hasher cacheHeightLimit uint16 defaultHashes []hashing.Digest - batchLoader pruning.BatchLoader + batchLoader batchLoader sync.RWMutex } @@ -59,7 +57,7 @@ func NewHyperTree(hasherF func() hashing.Hasher, store storage.Store, cache cach hasher: hasher, cacheHeightLimit: cacheHeightLimit, defaultHashes: make([]hashing.Digest, numBits), - batchLoader: pruning.NewDefaultBatchLoader(store, cache, cacheHeightLimit), + batchLoader: NewDefaultBatchLoader(store, cache, cacheHeightLimit), } tree.defaultHashes[0] = tree.hasher.Do([]byte{0x0}, []byte{0x0}) @@ -82,8 +80,8 @@ func (t *HyperTree) Add(eventDigest hashing.Digest, version uint64) (hashing.Dig versionAsBytes := util.Uint64AsBytes(version) // build a stack of operations and then interpret it to generate the root hash - ops := pruning.PruneToInsert(eventDigest, versionAsBytes, t.cacheHeightLimit, t.batchLoader) - ctx := &pruning.Context{ + ops := pruneToInsert(eventDigest, versionAsBytes, t.cacheHeightLimit, t.batchLoader) + ctx := &pruningContext{ Hasher: t.hasher, Cache: t.cache, DefaultHashes: t.defaultHashes, @@ -108,12 +106,12 @@ func (t *HyperTree) QueryMembership(eventDigest hashing.Digest, version []byte) //log.Debugf("Proving membership for index %d with version %d", eventDigest, version) // build a stack of operations and then interpret it to generate the audit path - ops := pruning.PruneToFind(eventDigest, t.batchLoader) - ctx := &pruning.Context{ + ops := pruneToFind(eventDigest, t.batchLoader) + ctx := &pruningContext{ Hasher: t.hasher, Cache: t.cache, DefaultHashes: t.defaultHashes, - AuditPath: make(navigation.AuditPath, 0), + AuditPath: make(AuditPath, 0), } ops.Pop().Interpret(ops, ctx) @@ -140,8 +138,8 @@ func (t *HyperTree) RebuildCache() { // insert every node into cache for _, node := range nodes { - ops := pruning.PruneToRebuild(node.Key[2:], node.Value, t.cacheHeightLimit, t.batchLoader) - ctx := &pruning.Context{ + ops := pruneToRebuild(node.Key[2:], node.Value, t.cacheHeightLimit, t.batchLoader) + ctx := &pruningContext{ Hasher: t.hasher, Cache: t.cache, DefaultHashes: t.defaultHashes, diff --git a/balloon/hyper/tree_test.go b/balloon/hyper/tree_test.go index c608c30c2..926fb4957 100644 --- a/balloon/hyper/tree_test.go +++ b/balloon/hyper/tree_test.go @@ -21,7 +21,6 @@ import ( "testing" "github.com/bbva/qed/balloon/cache" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" "github.com/bbva/qed/log" "github.com/bbva/qed/storage" @@ -72,13 +71,13 @@ func TestProveMembership(t *testing.T) { testCases := []struct { addedKeys map[uint64]hashing.Digest - expectedAuditPath navigation.AuditPath + expectedAuditPath AuditPath }{ { addedKeys: map[uint64]hashing.Digest{ uint64(0): {0x0}, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ "0x80|7": hashing.Digest{0x0}, "0x40|6": hashing.Digest{0x0}, "0x20|5": hashing.Digest{0x0}, @@ -91,7 +90,7 @@ func TestProveMembership(t *testing.T) { uint64(1): {0x1}, uint64(2): {0x2}, }, - expectedAuditPath: navigation.AuditPath{ + expectedAuditPath: AuditPath{ "0x80|7": hashing.Digest{0x0}, "0x40|6": hashing.Digest{0x0}, "0x20|5": hashing.Digest{0x0}, diff --git a/balloon/hyper/pruning/verify.go b/balloon/hyper/verify.go similarity index 76% rename from balloon/hyper/pruning/verify.go rename to balloon/hyper/verify.go index c72dc82d1..f9f592330 100644 --- a/balloon/hyper/pruning/verify.go +++ b/balloon/hyper/verify.go @@ -14,23 +14,22 @@ limitations under the License. */ -package pruning +package hyper import ( "bytes" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/util" ) -func PruneToVerify(index, value []byte, auditPathHeight uint16) *OperationsStack { +func pruneToVerify(index, value []byte, auditPathHeight uint16) *operationsStack { version := util.AddPaddingToBytes(value, len(index)) version = version[len(version)-len(index):] // TODO GET RID OF THIS: used only to pass tests - var traverse func(pos navigation.Position, ops *OperationsStack) + var traverse func(pos position, ops *operationsStack) - traverse = func(pos navigation.Position, ops *OperationsStack) { + traverse = func(pos position, ops *operationsStack) { if pos.Height <= auditPathHeight { ops.Push(leafHash(pos, version)) @@ -50,8 +49,8 @@ func PruneToVerify(index, value []byte, auditPathHeight uint16) *OperationsStack } - ops := NewOperationsStack() - traverse(navigation.NewRootPosition(uint16(len(index))), ops) + ops := newOperationsStack() + traverse(newRootPosition(uint16(len(index))), ops) return ops } diff --git a/balloon/hyper/pruning/verify_test.go b/balloon/hyper/verify_test.go similarity index 77% rename from balloon/hyper/pruning/verify_test.go rename to balloon/hyper/verify_test.go index 603dfb864..a6327bba5 100644 --- a/balloon/hyper/pruning/verify_test.go +++ b/balloon/hyper/verify_test.go @@ -14,12 +14,11 @@ limitations under the License. */ -package pruning +package hyper import ( "testing" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -29,44 +28,44 @@ func TestPruneToVerify(t *testing.T) { testCases := []struct { index, value []byte - auditPath navigation.AuditPath + auditPath AuditPath expectedOps []op }{ { // verify index=0 with empty audit path index: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{}, + auditPath: AuditPath{}, expectedOps: []op{ - {LeafHashCode, pos(0, 8)}, + {leafHashCode, pos(0, 8)}, }, }, { // verify index=0 index: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{ + auditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x0}, pos(32, 5).StringId(): []byte{0x0}, pos(16, 4).StringId(): []byte{0x0}, }, expectedOps: []op{ - {InnerHashCode, pos(0, 8)}, - {GetFromPathCode, pos(128, 7)}, - {InnerHashCode, pos(0, 7)}, - {GetFromPathCode, pos(64, 6)}, - {InnerHashCode, pos(0, 6)}, - {GetFromPathCode, pos(32, 5)}, - {InnerHashCode, pos(0, 5)}, - {GetFromPathCode, pos(16, 4)}, - {LeafHashCode, pos(0, 4)}, + {innerHashCode, pos(0, 8)}, + {getFromPathCode, pos(128, 7)}, + {innerHashCode, pos(0, 7)}, + {getFromPathCode, pos(64, 6)}, + {innerHashCode, pos(0, 6)}, + {getFromPathCode, pos(32, 5)}, + {innerHashCode, pos(0, 5)}, + {getFromPathCode, pos(16, 4)}, + {leafHashCode, pos(0, 4)}, }, }, } for i, c := range testCases { - prunedOps := PruneToVerify(c.index, c.value, uint16(8-len(c.auditPath))).List() + prunedOps := pruneToVerify(c.index, c.value, uint16(8-len(c.auditPath))).List() require.Truef(t, len(c.expectedOps) == len(prunedOps), "The size of the pruned ops should match the expected for test case %d", i) for j := 0; j < len(prunedOps); j++ { assert.Equalf(t, c.expectedOps[j].Code, prunedOps[j].Code, "The pruned operation's code should match for test case %d", i) @@ -79,21 +78,21 @@ func TestVerifyInterpretation(t *testing.T) { testCases := []struct { index, value []byte - auditPath navigation.AuditPath + auditPath AuditPath expectedRootHash hashing.Digest }{ { // verify index=0 with empty audit path index: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{}, + auditPath: AuditPath{}, expectedRootHash: []byte{0}, }, { // verify index=0 index: []byte{0}, value: []byte{0}, - auditPath: navigation.AuditPath{ + auditPath: AuditPath{ pos(128, 7).StringId(): []byte{0x0}, pos(64, 6).StringId(): []byte{0x1}, pos(32, 5).StringId(): []byte{0x2}, @@ -105,8 +104,8 @@ func TestVerifyInterpretation(t *testing.T) { for i, c := range testCases { - ops := PruneToVerify(c.index, c.value, uint16(8-len(c.auditPath))) - ctx := &Context{ + ops := pruneToVerify(c.index, c.value, uint16(8-len(c.auditPath))) + ctx := &pruningContext{ Hasher: hashing.NewFakeXorHasher(), Cache: nil, DefaultHashes: nil, diff --git a/balloon/test_util.go b/balloon/test_util.go index 9e5d6f17a..1bd7e6ead 100644 --- a/balloon/test_util.go +++ b/balloon/test_util.go @@ -18,22 +18,21 @@ package balloon import ( "github.com/bbva/qed/balloon/history" - historynav "github.com/bbva/qed/balloon/history/navigation" + "github.com/bbva/qed/balloon/history/navigation" "github.com/bbva/qed/balloon/hyper" - "github.com/bbva/qed/balloon/hyper/navigation" "github.com/bbva/qed/hashing" ) func NewFakeQueryProof(shouldVerify bool, value []byte, hasher hashing.Hasher) *hyper.QueryProof { if shouldVerify { - return hyper.NewQueryProof([]byte{0}, value, navigation.AuditPath{"128|7": value}, hasher) + return hyper.NewQueryProof([]byte{0}, value, hyper.AuditPath{"128|7": value}, hasher) } - return hyper.NewQueryProof([]byte{0}, []byte{0}, navigation.AuditPath{}, hasher) + return hyper.NewQueryProof([]byte{0}, []byte{0}, hyper.AuditPath{}, hasher) } func NewFakeMembershipProof(shouldVerify bool, hasher hashing.Hasher) *history.MembershipProof { if shouldVerify { - return history.NewMembershipProof(0, 0, historynav.AuditPath{}, hasher) + return history.NewMembershipProof(0, 0, navigation.AuditPath{}, hasher) } - return history.NewMembershipProof(1, 1, historynav.AuditPath{}, hasher) + return history.NewMembershipProof(1, 1, navigation.AuditPath{}, hasher) } diff --git a/protocol/protocol.go b/protocol/protocol.go index d7ed6e6cc..aaea66271 100644 --- a/protocol/protocol.go +++ b/protocol/protocol.go @@ -22,7 +22,7 @@ import ( "github.com/bbva/qed/balloon" "github.com/bbva/qed/balloon/history" - historynav "github.com/bbva/qed/balloon/history/navigation" + "github.com/bbva/qed/balloon/history/navigation" "github.com/bbva/qed/balloon/hyper" "github.com/bbva/qed/gossip/member" "github.com/bbva/qed/hashing" @@ -145,7 +145,7 @@ func ToBalloonProof(mr *MembershipResult, hasherF func() hashing.Hasher) *balloo historyProof := history.NewMembershipProof( mr.ActualVersion, mr.QueryVersion, - historynav.ParseAuditPath(mr.History), + navigation.ParseAuditPath(mr.History), hasherF(), ) @@ -179,5 +179,5 @@ func ToIncrementalResponse(proof *balloon.IncrementalProof) *IncrementalResponse } func ToIncrementalProof(ir *IncrementalResponse, hasher hashing.Hasher) *balloon.IncrementalProof { - return balloon.NewIncrementalProof(ir.Start, ir.End, historynav.ParseAuditPath(ir.AuditPath), hasher) + return balloon.NewIncrementalProof(ir.Start, ir.End, navigation.ParseAuditPath(ir.AuditPath), hasher) }