This repository has been archived by the owner on Feb 1, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 112
Use shared peer task queue with Graphsync #119
Merged
Merged
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,6 +8,9 @@ import ( | |
|
||
bsmsg "github.com/ipfs/go-bitswap/message" | ||
wl "github.com/ipfs/go-bitswap/wantlist" | ||
cid "github.com/ipfs/go-cid" | ||
"github.com/ipfs/go-peertaskqueue" | ||
"github.com/ipfs/go-peertaskqueue/peertask" | ||
|
||
blocks "github.com/ipfs/go-block-format" | ||
bstore "github.com/ipfs/go-ipfs-blockstore" | ||
|
@@ -73,7 +76,7 @@ type Engine struct { | |
// peerRequestQueue is a priority queue of requests received from peers. | ||
// Requests are popped from the queue, packaged up, and placed in the | ||
// outbox. | ||
peerRequestQueue *prq | ||
peerRequestQueue *peertaskqueue.PeerTaskQueue | ||
|
||
// FIXME it's a bit odd for the client and the worker to both share memory | ||
// (both modify the peerRequestQueue) and also to communicate over the | ||
|
@@ -100,7 +103,7 @@ func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine { | |
e := &Engine{ | ||
ledgerMap: make(map[peer.ID]*ledger), | ||
bs: bs, | ||
peerRequestQueue: newPRQ(), | ||
peerRequestQueue: peertaskqueue.New(), | ||
outbox: make(chan (<-chan *Envelope), outboxChanBuffer), | ||
workSignal: make(chan struct{}, 1), | ||
ticker: time.NewTicker(time.Millisecond * 100), | ||
|
@@ -159,23 +162,23 @@ func (e *Engine) taskWorker(ctx context.Context) { | |
// context is cancelled before the next Envelope can be created. | ||
func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { | ||
for { | ||
nextTask := e.peerRequestQueue.Pop() | ||
nextTask := e.peerRequestQueue.PopBlock() | ||
for nextTask == nil { | ||
select { | ||
case <-ctx.Done(): | ||
return nil, ctx.Err() | ||
case <-e.workSignal: | ||
nextTask = e.peerRequestQueue.Pop() | ||
nextTask = e.peerRequestQueue.PopBlock() | ||
case <-e.ticker.C: | ||
e.peerRequestQueue.thawRound() | ||
nextTask = e.peerRequestQueue.Pop() | ||
e.peerRequestQueue.ThawRound() | ||
nextTask = e.peerRequestQueue.PopBlock() | ||
} | ||
} | ||
|
||
// with a task in hand, we're ready to prepare the envelope... | ||
msg := bsmsg.New(true) | ||
for _, entry := range nextTask.Entries { | ||
block, err := e.bs.Get(entry.Cid) | ||
for _, entry := range nextTask.Tasks { | ||
block, err := e.bs.Get(entry.Identifier.(cid.Cid)) | ||
if err != nil { | ||
log.Errorf("tried to execute a task and errored fetching block: %s", err) | ||
continue | ||
|
@@ -186,15 +189,15 @@ func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { | |
if msg.Empty() { | ||
// If we don't have the block, don't hold that against the peer | ||
// make sure to update that the task has been 'completed' | ||
nextTask.Done(nextTask.Entries) | ||
nextTask.Done(nextTask.Tasks) | ||
continue | ||
} | ||
|
||
return &Envelope{ | ||
Peer: nextTask.Target, | ||
Message: msg, | ||
Sent: func() { | ||
nextTask.Done(nextTask.Entries) | ||
nextTask.Done(nextTask.Tasks) | ||
select { | ||
case e.workSignal <- struct{}{}: | ||
// work completing may mean that our queue will provide new | ||
|
@@ -246,7 +249,7 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { | |
} | ||
|
||
var msgSize int | ||
var activeEntries []wl.Entry | ||
var activeEntries []peertask.Task | ||
for _, entry := range m.Wantlist() { | ||
if entry.Cancel { | ||
log.Debugf("%s cancel %s", p, entry.Cid) | ||
|
@@ -265,17 +268,17 @@ func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) { | |
// we have the block | ||
newWorkExists = true | ||
if msgSize+blockSize > maxMessageSize { | ||
e.peerRequestQueue.Push(p, activeEntries...) | ||
activeEntries = []wl.Entry{} | ||
e.peerRequestQueue.PushBlock(p, activeEntries...) | ||
activeEntries = []peertask.Task{} | ||
msgSize = 0 | ||
} | ||
activeEntries = append(activeEntries, entry.Entry) | ||
activeEntries = append(activeEntries, peertask.Task{Identifier: entry.Cid, Priority: entry.Priority}) | ||
msgSize += blockSize | ||
} | ||
} | ||
} | ||
if len(activeEntries) > 0 { | ||
e.peerRequestQueue.Push(p, activeEntries...) | ||
e.peerRequestQueue.PushBlock(p, activeEntries...) | ||
} | ||
for _, block := range m.Blocks() { | ||
log.Debugf("got block %s %d bytes", block, len(block.RawData())) | ||
|
@@ -289,7 +292,10 @@ func (e *Engine) addBlock(block blocks.Block) { | |
for _, l := range e.ledgerMap { | ||
l.lk.Lock() | ||
if entry, ok := l.WantListContains(block.Cid()); ok { | ||
e.peerRequestQueue.Push(l.Partner, entry) | ||
e.peerRequestQueue.PushBlock(l.Partner, peertask.Task{ | ||
Identifier: entry.Cid, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm a bit worried about allocations here. This used to be a bit of a hot-spot until we made entries and CIDs by-value. This is a great abstraction so I'm not going to block on that but we just need to keep it in mind. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yea I mean, it's still technically by-value -- just a new value that makes a new allocation :) I will keep an eye on it as this goes into production. |
||
Priority: entry.Priority, | ||
}) | ||
work = true | ||
} | ||
l.lk.Unlock() | ||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How does graphsync use freeze/thaw?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
so graphsync does support cancellation, and long term it makes sense that if a request gets cancelled, we probably out to wait and see if that peer is going to send other cancel requests... especially if we move to a context where requests are being broadcast.
however, I think it makes sense to have an option to turn freezing off since at least in graphsync's current implementation it does not make a lot of sense. I think I will probably go ahead and implement that.