Skip to content

Commit

Permalink
chore: rename dag to bServ
Browse files Browse the repository at this point in the history
  • Loading branch information
vgonkivs committed May 23, 2022
1 parent 3f00e54 commit 82530f9
Show file tree
Hide file tree
Showing 20 changed files with 137 additions and 137 deletions.
36 changes: 18 additions & 18 deletions das/daser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ var timeout = time.Second * 15
// the DASer checkpoint is updated to network head.
func TestDASerLifecycle(t *testing.T) {
ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
dag := mdutils.Bserv()
bServ := mdutils.Bserv()

// 15 headers from the past and 15 future headers
mockGet, shareServ, sub := createDASerSubcomponents(t, dag, 15, 15)
mockGet, shareServ, sub := createDASerSubcomponents(t, bServ, 15, 15)

ctx, cancel := context.WithTimeout(context.Background(), timeout)
t.Cleanup(cancel)
Expand Down Expand Up @@ -61,10 +61,10 @@ func TestDASerLifecycle(t *testing.T) {

func TestDASer_Restart(t *testing.T) {
ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
dag := mdutils.Bserv()
bServ := mdutils.Bserv()

// 15 headers from the past and 15 future headers
mockGet, shareServ, sub := createDASerSubcomponents(t, dag, 15, 15)
mockGet, shareServ, sub := createDASerSubcomponents(t, bServ, 15, 15)

ctx, cancel := context.WithTimeout(context.Background(), timeout)
t.Cleanup(cancel)
Expand All @@ -85,10 +85,10 @@ func TestDASer_Restart(t *testing.T) {
require.NoError(t, err)

// reset mockGet, generate 15 "past" headers, building off chain head which is 30
mockGet.generateHeaders(t, dag, 30, 45)
mockGet.generateHeaders(t, bServ, 30, 45)
mockGet.doneCh = make(chan struct{})
// reset dummy subscriber
mockGet.fillSubWithHeaders(t, sub, dag, 45, 60)
mockGet.fillSubWithHeaders(t, sub, bServ, 45, 60)
// manually set mockGet head to trigger stop at 45
mockGet.head = int64(45)

Expand Down Expand Up @@ -124,9 +124,9 @@ func TestDASer_Restart(t *testing.T) {

func TestDASer_catchUp(t *testing.T) {
ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
dag := mdutils.Bserv()
bServ := mdutils.Bserv()

mockGet, shareServ, _ := createDASerSubcomponents(t, dag, 5, 0)
mockGet, shareServ, _ := createDASerSubcomponents(t, bServ, 5, 0)

ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
Expand Down Expand Up @@ -165,9 +165,9 @@ func TestDASer_catchUp(t *testing.T) {
// difference of 1
func TestDASer_catchUp_oneHeader(t *testing.T) {
ds := ds_sync.MutexWrap(datastore.NewMapDatastore())
dag := mdutils.Bserv()
bServ := mdutils.Bserv()

mockGet, shareServ, _ := createDASerSubcomponents(t, dag, 6, 0)
mockGet, shareServ, _ := createDASerSubcomponents(t, bServ, 6, 0)
daser := NewDASer(shareServ, nil, mockGet, ds)

// store checkpoint
Expand Down Expand Up @@ -213,21 +213,21 @@ func TestDASer_catchUp_oneHeader(t *testing.T) {
// mockGetter, share.Service, and mock header.Subscriber.
func createDASerSubcomponents(
t *testing.T,
dag blockservice.BlockService,
bServ blockservice.BlockService,
numGetter,
numSub int,
) (*mockGetter, *share.Service, *header.DummySubscriber) {
shareServ := share.NewService(dag, share.NewLightAvailability(dag))
shareServ := share.NewService(bServ, share.NewLightAvailability(bServ))

mockGet := &mockGetter{
headers: make(map[int64]*header.ExtendedHeader),
doneCh: make(chan struct{}),
}

mockGet.generateHeaders(t, dag, 0, numGetter)
mockGet.generateHeaders(t, bServ, 0, numGetter)

sub := new(header.DummySubscriber)
mockGet.fillSubWithHeaders(t, sub, dag, numGetter, numGetter+numSub)
mockGet.fillSubWithHeaders(t, sub, bServ, numGetter, numGetter+numSub)

return mockGet, shareServ, sub
}
Expand All @@ -236,15 +236,15 @@ func createDASerSubcomponents(
func (m *mockGetter) fillSubWithHeaders(
t *testing.T,
sub *header.DummySubscriber,
dag blockservice.BlockService,
bServ blockservice.BlockService,
startHeight,
endHeight int,
) {
sub.Headers = make([]*header.ExtendedHeader, endHeight-startHeight)

index := 0
for i := startHeight; i < endHeight; i++ {
dah := share.RandFillDAG(t, 16, dag)
dah := share.RandFillDAG(t, 16, bServ)

randHeader := header.RandExtendedHeader(t)
randHeader.DataHash = dah.Hash()
Expand All @@ -266,9 +266,9 @@ type mockGetter struct {
headers map[int64]*header.ExtendedHeader
}

func (m *mockGetter) generateHeaders(t *testing.T, dag blockservice.BlockService, startHeight, endHeight int) {
func (m *mockGetter) generateHeaders(t *testing.T, bServ blockservice.BlockService, startHeight, endHeight int) {
for i := startHeight; i < endHeight; i++ {
dah := share.RandFillDAG(t, 16, dag)
dah := share.RandFillDAG(t, 16, bServ)

randHeader := header.RandExtendedHeader(t)
randHeader.DataHash = dah.Hash()
Expand Down
6 changes: 3 additions & 3 deletions fraud/bad_encoding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ import (
)

func TestFraudProofValidation(t *testing.T) {
dag := mdutils.Bserv()
bServ := mdutils.Bserv()
eds := ipld.RandEDS(t, 2)

shares := ipld.ExtractEDS(eds)
copy(shares[3][8:], shares[4][8:])
eds, err := ipld.ImportShares(context.Background(), shares, dag)
eds, err := ipld.ImportShares(context.Background(), shares, bServ)
require.NoError(t, err)
da := da.NewDataAvailabilityHeader(eds)
r := ipld.NewRetriever(dag)
r := ipld.NewRetriever(bServ)
_, err = r.Retrieve(context.Background(), &da)
var errByz *ipld.ErrByzantine
require.True(t, errors.As(err, &errByz))
Expand Down
4 changes: 2 additions & 2 deletions header/core/exchange.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ type Exchange struct {
construct header.ConstructFn
}

func NewExchange(fetcher *core.BlockFetcher, dag blockservice.BlockService, construct header.ConstructFn) *Exchange {
func NewExchange(fetcher *core.BlockFetcher, bServ blockservice.BlockService, construct header.ConstructFn) *Exchange {
return &Exchange{
fetcher: fetcher,
shareStore: dag,
shareStore: bServ,
construct: construct,
}
}
Expand Down
8 changes: 4 additions & 4 deletions header/core/listener.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,21 @@ import (
type Listener struct {
bcast header.Broadcaster
fetcher *core.BlockFetcher
dag blockservice.BlockService
bServ blockservice.BlockService
construct header.ConstructFn
cancel context.CancelFunc
}

func NewListener(
bcast header.Broadcaster,
fetcher *core.BlockFetcher,
dag blockservice.BlockService,
bServ blockservice.BlockService,
construct header.ConstructFn,
) *Listener {
return &Listener{
bcast: bcast,
fetcher: fetcher,
dag: dag,
bServ: bServ,
construct: construct,
}
}
Expand Down Expand Up @@ -89,7 +89,7 @@ func (cl *Listener) listen(ctx context.Context, sub <-chan *types.Block) {
return
}

eh, err := cl.construct(ctx, b, comm, vals, cl.dag)
eh, err := cl.construct(ctx, b, comm, vals, cl.bServ)
if err != nil {
log.Errorw("listener: making extended header", "err", err)
return
Expand Down
4 changes: 2 additions & 2 deletions header/header.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,12 @@ func MakeExtendedHeader(
b *core.Block,
comm *core.Commit,
vals *core.ValidatorSet,
dag blockservice.BlockService,
bServ blockservice.BlockService,
) (*ExtendedHeader, error) {
var dah DataAvailabilityHeader
if len(b.Txs) > 0 {
namespacedShares, _ := b.Data.ComputeShares()
extended, err := ipld.AddShares(ctx, namespacedShares.RawShares(), dag)
extended, err := ipld.AddShares(ctx, namespacedShares.RawShares(), bServ)
if err != nil {
return nil, err
}
Expand Down
4 changes: 2 additions & 2 deletions ipld/add.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (
"github.com/tendermint/tendermint/pkg/wrapper"
)

// AddShares erasures and extends shares to IPLD DAG using the provided ipld.NodeAdder.
// AddShares erasures and extends shares to blockservice.BlockService using the provided ipld.NodeAdder.
func AddShares(
ctx context.Context,
shares []Share,
Expand All @@ -41,7 +41,7 @@ func AddShares(
return eds, batchAdder.Commit()
}

// ImportShares imports flattend chunks of data into Extended Data square and saves it in IPLD DAG
// ImportShares imports flattend chunks of data into Extended Data square and saves it in blockservice.BlockService
func ImportShares(
ctx context.Context,
shares [][]byte,
Expand Down
36 changes: 18 additions & 18 deletions ipld/get.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ import (
// GetShare fetches and returns the data for leaf `leafIndex` of root `rootCid`.
func GetShare(
ctx context.Context,
dag blockservice.BlockGetter,
bGetter blockservice.BlockGetter,
rootCid cid.Cid,
leafIndex int,
totalLeafs int, // this corresponds to the extended square width
) (Share, error) {
nd, err := GetLeaf(ctx, dag, rootCid, leafIndex, totalLeafs)
nd, err := GetLeaf(ctx, bGetter, rootCid, leafIndex, totalLeafs)
if err != nil {
return nil, err
}
Expand All @@ -31,9 +31,9 @@ func GetShare(

// GetLeaf fetches and returns the raw leaf.
// It walks down the IPLD NMT tree until it finds the requested one.
func GetLeaf(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid, leaf, total int) (ipld.Node, error) {
func GetLeaf(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, leaf, total int) (ipld.Node, error) {
// request the node
nd, err := plugin.Get(ctx, dag, root)
nd, err := plugin.Get(ctx, bGetter, root)
if err != nil {
return nil, err
}
Expand All @@ -42,7 +42,7 @@ func GetLeaf(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid, le
lnks := nd.Links()
if len(lnks) == 1 {
// in case there is only one we reached tree's bottom, so finally request the leaf.
return plugin.Get(ctx, dag, lnks[0].Cid)
return plugin.Get(ctx, bGetter, lnks[0].Cid)
}

// route walk to appropriate children
Expand All @@ -54,14 +54,14 @@ func GetLeaf(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid, le
}

// recursively walk down through selected children
return GetLeaf(ctx, dag, root, leaf, total)
return GetLeaf(ctx, bGetter, root, leaf, total)
}

// GetProofsForShares fetches Merkle proofs for the given shares
// and returns the result as an array of ShareWithProof.
func GetProofsForShares(
ctx context.Context,
dag blockservice.BlockGetter,
bGetter blockservice.BlockGetter,
root cid.Cid,
shares [][]byte,
) ([]*ShareWithProof, error) {
Expand All @@ -71,11 +71,11 @@ func GetProofsForShares(
proof := make([]cid.Cid, 0)
// TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same tree.
// Add options that will control what data will be fetched.
s, err := GetLeaf(ctx, dag, root, index, len(shares))
s, err := GetLeaf(ctx, bGetter, root, index, len(shares))
if err != nil {
return nil, err
}
proof, err = GetProof(ctx, dag, root, proof, index, len(shares))
proof, err = GetProof(ctx, bGetter, root, proof, index, len(shares))
if err != nil {
return nil, err
}
Expand All @@ -90,13 +90,13 @@ func GetProofsForShares(
// It walks down the IPLD NMT tree until it reaches the leaf and returns collected proof
func GetProof(
ctx context.Context,
dag blockservice.BlockGetter,
bGetter blockservice.BlockGetter,
root cid.Cid,
proof []cid.Cid,
leaf, total int,
) ([]cid.Cid, error) {
// request the node
nd, err := plugin.Get(ctx, dag, root)
nd, err := plugin.Get(ctx, bGetter, root)
if err != nil {
return nil, err
}
Expand All @@ -115,26 +115,26 @@ func GetProof(
proof = append(proof, lnks[1].Cid)
} else {
root, leaf = lnks[1].Cid, leaf-total // otherwise go down the second
proof, err = GetProof(ctx, dag, root, proof, leaf, total)
proof, err = GetProof(ctx, bGetter, root, proof, leaf, total)
if err != nil {
return nil, err
}
return append(proof, lnks[0].Cid), nil
}

// recursively walk down through selected children
return GetProof(ctx, dag, root, proof, leaf, total)
return GetProof(ctx, bGetter, root, proof, leaf, total)
}

// GetSharesByNamespace returns all the shares from the given root
// with the given namespace.ID.
func GetSharesByNamespace(
ctx context.Context,
dag blockservice.BlockGetter,
bGetter blockservice.BlockGetter,
root cid.Cid,
nID namespace.ID,
) ([]Share, error) {
leaves, err := GetLeavesByNamespace(ctx, dag, root, nID)
leaves, err := GetLeavesByNamespace(ctx, bGetter, root, nID)
if err != nil {
return nil, err
}
Expand All @@ -151,7 +151,7 @@ func GetSharesByNamespace(
// If nothing is found it returns both data and err as nil.
func GetLeavesByNamespace(
ctx context.Context,
dag blockservice.BlockGetter,
bGetter blockservice.BlockGetter,
root cid.Cid,
nID namespace.ID,
) ([]ipld.Node, error) {
Expand All @@ -164,7 +164,7 @@ func GetLeavesByNamespace(
return nil, nil
}
// request the node
nd, err := plugin.Get(ctx, dag, root)
nd, err := plugin.Get(ctx, bGetter, root)
if err != nil {
return nil, err
}
Expand All @@ -177,7 +177,7 @@ func GetLeavesByNamespace(
// if there are some links, then traverse them
var out []ipld.Node
for _, lnk := range nd.Links() {
nds, err := GetLeavesByNamespace(ctx, dag, lnk.Cid, nID)
nds, err := GetLeavesByNamespace(ctx, bGetter, lnk.Cid, nID)
if err != nil {
return out, err
}
Expand Down
6 changes: 3 additions & 3 deletions ipld/get_shares.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ var pool = workerpool.New(NumWorkersLimit)
// tree, so it's not suitable for anything else besides that. Parts on the
// implementation that rely on this property are explicitly tagged with
// (bin-tree-feat).
func GetShares(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid, shares int, put func(int, Share)) {
func GetShares(ctx context.Context, bGetter blockservice.BlockGetter, root cid.Cid, shares int, put func(int, Share)) {
// job is not used anywhere else, so can be kept here
type job struct {
id cid.Cid
Expand All @@ -72,7 +72,7 @@ func GetShares(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid,
// processing of each other
pool.Submit(func() {
defer wg.Done()
nd, err := plugin.Get(ctx, dag, j.id)
nd, err := plugin.Get(ctx, bGetter, j.id)
if err != nil {
// we don't really care about errors here
// just fetch as much as possible
Expand All @@ -83,7 +83,7 @@ func GetShares(ctx context.Context, dag blockservice.BlockGetter, root cid.Cid,
if len(lnks) == 1 { // so we are almost there
// the reason why the comment on 'total' is lying, as each
// leaf has its own additional leaf(hack) so get it
nd, err := plugin.Get(ctx, dag, lnks[0].Cid)
nd, err := plugin.Get(ctx, bGetter, lnks[0].Cid)
if err != nil {
// again, we don't care
return
Expand Down
Loading

0 comments on commit 82530f9

Please sign in to comment.