From 579eda7e4110dd71f14891adf58464f790346e13 Mon Sep 17 00:00:00 2001 From: Dirk McCormick Date: Thu, 21 Feb 2019 09:18:47 -0500 Subject: [PATCH] chore: callbacks -> async / await --- package.json | 13 +- src/index.js | 523 ++++++------- src/network.js | 163 ++-- src/peer-queue.js | 52 +- src/private.js | 649 ++++++++-------- src/providers.js | 228 +++--- src/query.js | 206 +++-- src/random-walk.js | 131 ++-- src/routing.js | 55 +- src/rpc/handlers/add-provider.js | 11 +- src/rpc/handlers/find-node.js | 37 +- src/rpc/handlers/get-providers.js | 73 +- src/rpc/handlers/get-value.js | 43 +- src/rpc/handlers/index.js | 2 +- src/rpc/handlers/ping.js | 7 +- src/rpc/handlers/put-value.js | 31 +- src/rpc/index.js | 34 +- src/utils.js | 105 ++- src/worker-queue.js | 118 +++ test/kad-dht.spec.js | 951 +++++++++--------------- test/kad-utils.spec.js | 129 ++-- test/limited-peer-list.spec.js | 10 +- test/message.spec.js | 108 ++- test/network.spec.js | 168 ++--- test/peer-list.spec.js | 10 +- test/peer-queue.spec.js | 52 +- test/providers.spec.js | 188 ++--- test/query.spec.js | 157 ++-- test/routing.spec.js | 140 ++-- test/rpc/handlers/add-provider.spec.js | 83 +-- test/rpc/handlers/find-node.spec.js | 77 +- test/rpc/handlers/get-providers.spec.js | 96 +-- test/rpc/handlers/get-value.spec.js | 124 ++- test/rpc/handlers/ping.spec.js | 32 +- test/rpc/handlers/put-value.spec.js | 63 +- test/rpc/index.spec.js | 47 +- test/utils/create-disjoint-tracks.js | 118 ++- test/utils/create-peer-info.js | 11 +- test/utils/create-values.js | 13 +- test/utils/test-dht.js | 82 +- test/worker-queue.spec.js | 127 ++++ 41 files changed, 2451 insertions(+), 2816 deletions(-) create mode 100644 src/worker-queue.js create mode 100644 test/worker-queue.spec.js diff --git a/package.json b/package.json index 914439e9..0ff5c510 100644 --- a/package.json +++ b/package.json @@ -45,14 +45,15 @@ "err-code": "^1.1.2", "hashlru": "^2.3.0", "heap": "~0.2.6", - "interface-datastore": "~0.6.0", + "interface-datastore": "ipfs/interface-datastore#refactor/async-iterators", "k-bucket": "^5.0.0", - "libp2p-crypto": "~0.16.0", - "libp2p-record": "~0.6.2", + "libp2p-crypto": "libp2p/js-libp2p-crypto#feat/async-await", + "libp2p-record": "dirkmc/js-libp2p-record#feat/async-await", "multihashes": "~0.4.14", - "multihashing-async": "~0.5.2", - "peer-id": "~0.12.2", - "peer-info": "~0.15.1", + "multihashing-async": "multiformats/js-multihashing-async#feat/async-iterators", + "p-queue": "^3.1.0", + "peer-id": "libp2p/js-peer-id#feat/async-await", + "peer-info": "libp2p/js-peer-info#feat/async-await", "priorityqueue": "~0.2.1", "protons": "^1.0.1", "pull-length-prefixed": "^1.3.1", diff --git a/src/index.js b/src/index.js index dc334b28..07f0c46d 100644 --- a/src/index.js +++ b/src/index.js @@ -3,9 +3,6 @@ const { EventEmitter } = require('events') const libp2pRecord = require('libp2p-record') const MemoryStore = require('interface-datastore').MemoryDatastore -const waterfall = require('async/waterfall') -const each = require('async/each') -const timeout = require('async/timeout') const PeerId = require('peer-id') const PeerInfo = require('peer-info') const crypto = require('libp2p-crypto') @@ -37,7 +34,7 @@ class KadDHT extends EventEmitter { * @param {number} options.kBucketSize k-bucket size (default 20) * @param {Datastore} options.datastore datastore (default MemoryDatastore) * @param {boolean} options.enabledDiscovery enable dht discovery (default true) - * @param {object} options.validators validators object with namespace as keys and function(key, record, callback) + * @param {object} options.validators validators object with namespace as keys and function(key, record) * @param {object} options.selectors selectors object with namespace as keys and function(key, records) */ constructor (sw, options) { @@ -132,35 +129,27 @@ class KadDHT extends EventEmitter { /** * Start listening to incoming connections. * - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - start (callback) { + async start () { this._running = true - this.network.start((err) => { - if (err) { - return callback(err) - } + await this.network.start() - // Start random walk if enabled - this.randomWalkEnabled && this.randomWalk.start() - callback() - }) + // Start random walk if enabled + this.randomWalkEnabled && this.randomWalk.start() } /** * Stop accepting incoming connections and sending outgoing * messages. * - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - stop (callback) { + async stop () { this._running = false - this.randomWalk.stop(() => { // guarantee that random walk is stopped if it was started - this.providers.stop() - this.network.stop(callback) - }) + await this.randomWalk.stop() // guarantee that random walk is stopped if it was started + this.providers.stop() + return this.network.stop() } /** @@ -177,26 +166,25 @@ class KadDHT extends EventEmitter { } /** - * Store the given key/value pair in the DHT. + * Store the given key/value pair in the DHT. * * @param {Buffer} key * @param {Buffer} value - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - put (key, value, callback) { + put (key, value) { this._log('PutValue %b', key) - waterfall([ - (cb) => utils.createPutRecord(key, value, cb), - (rec, cb) => waterfall([ - (cb) => this._putLocal(key, rec, cb), - (cb) => this.getClosestPeers(key, cb), - (peers, cb) => each(peers, (peer, cb) => { - this._putValueToPeer(key, rec, peer, cb) - }, cb) - ], cb) - ], callback) + const rec = utils.createPutRecord(key, value) + return Promise.all([ + this._putLocal(key, rec), + (async () => { + const peers = await this.getClosestPeers(key) + return Promise.all(peers.map((peer) => { + return this._putValueToPeer(key, rec, peer) + })) + })() + ]) } /** @@ -205,25 +193,17 @@ class KadDHT extends EventEmitter { * * @param {Buffer} key * @param {Object} options - get options - * @param {number} options.timeout - optional timeout (default: 60000) - * @param {function(Error, Buffer)} callback - * @returns {void} + * @param {number} options.timeout - optional timeout in ms (default: 60000) + * @returns {Promise} */ - get (key, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } else { - options = options || {} - } - + get (key, options = {}) { if (!options.maxTimeout && !options.timeout) { options.timeout = c.minute // default } else if (options.maxTimeout && !options.timeout) { // TODO this will be deprecated in a next release options.timeout = options.maxTimeout } - this._get(key, options, callback) + return this._get(key, options) } /** @@ -233,17 +213,9 @@ class KadDHT extends EventEmitter { * @param {number} nvals * @param {Object} options - get options * @param {number} options.timeout - optional timeout (default: 60000) - * @param {function(Error, Array<{from: PeerId, val: Buffer}>)} callback - * @returns {void} + * @returns {Promise>} */ - getMany (key, nvals, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } else { - options = options || {} - } - + async getMany (key, nvals, options = {}) { if (!options.maxTimeout && !options.timeout) { options.timeout = c.minute // default } else if (options.maxTimeout && !options.timeout) { // TODO this will be deprecated in a next release @@ -253,145 +225,133 @@ class KadDHT extends EventEmitter { this._log('getMany %b (%s)', key, nvals) let vals = [] - this._getLocal(key, (err, localRec) => { - if (err && nvals === 0) { - return callback(err) - } + let localRec, err + try { + localRec = await this._getLocal(key) + } catch (e) { + err = e + } - if (err == null) { - vals.push({ - val: localRec.value, - from: this.peerInfo.id - }) - } + if (err && nvals === 0) { + throw err + } - if (nvals <= 1) { - return callback(null, vals) - } + if (!err) { + vals.push({ + val: localRec.value, + from: this.peerInfo.id + }) + } + + if (nvals <= 1) { + return vals + } + + const paths = [] + const id = await utils.convertBuffer(key) + + const rtp = this.routingTable.closestPeers(id, c.ALPHA) - const paths = [] - waterfall([ - (cb) => utils.convertBuffer(key, cb), - (id, cb) => { - const rtp = this.routingTable.closestPeers(id, c.ALPHA) + this._log('peers in rt: %d', rtp.length) + if (rtp.length === 0) { + const errMsg = 'Failed to lookup key! No peers from routing table!' - this._log('peers in rt: %d', rtp.length) - if (rtp.length === 0) { - const errMsg = 'Failed to lookup key! No peers from routing table!' + this._log.error(errMsg) + throw errcode(errMsg, 'ERR_NO_PEERS_IN_ROUTING_TABLE') + } - this._log.error(errMsg) - return cb(errcode(new Error(errMsg), 'ERR_NO_PEERS_IN_ROUTING_TABLE')) + // we have peers, lets do the actual query to them + const query = new Query(this, key, (pathIndex, numPaths) => { + // This function body runs once per disjoint path + const pathSize = utils.pathSize(nvals - vals.length, numPaths) + const pathVals = [] + paths.push(pathVals) + + // Here we return the query function to use on this particular disjoint path + return async (peer) => { + let valueOrPeers + try { + valueOrPeers = await this._getValueOrPeers(peer, key) + } catch (err) { + // If we have an invalid record we just want to continue and fetch a new one. + if (err.code !== 'ERR_INVALID_RECORD') { + throw err } + } + let { record, peers } = valueOrPeers || {} - // we have peers, lets do the actual query to them - const query = new Query(this, key, (pathIndex, numPaths) => { - // This function body runs once per disjoint path - const pathSize = utils.pathSize(nvals - vals.length, numPaths) - const pathVals = [] - paths.push(pathVals) - - // Here we return the query function to use on this particular disjoint path - return (peer, cb) => { - this._getValueOrPeers(peer, key, (err, rec, peers) => { - if (err) { - // If we have an invalid record we just want to continue and fetch a new one. - if (!(err.code === 'ERR_INVALID_RECORD')) { - return cb(err) - } - } - - const res = { closerPeers: peers } - - if ((rec && rec.value) || (err && err.code === 'ERR_INVALID_RECORD')) { - pathVals.push({ - val: rec && rec.value, - from: peer - }) - } - - // enough is enough - if (pathVals.length >= pathSize) { - res.success = true - } - - cb(null, res) - }) - } - }) + const res = { closerPeers: peers } - // run our query - timeout((cb) => query.run(rtp, cb), options.timeout)(cb) + if ((record && record.value) || (err && err.code === 'ERR_INVALID_RECORD')) { + pathVals.push({ + val: record && record.value, + from: peer + }) } - ], (err) => { - // combine vals from each path - vals = [].concat.apply(vals, paths).slice(0, nvals) - if (err && vals.length === 0) { - return callback(err) + // enough is enough + if (pathVals.length >= pathSize) { + res.success = true } - callback(null, vals) - }) + return res + } }) + + // run our query + await query.run(rtp, options.timeout) + + // combine vals from each path + vals = [].concat.apply(vals, paths).slice(0, nvals) + + if (err && vals.length === 0) { + throw err + } + + return vals } /** * Kademlia 'node lookup' operation. * * @param {Buffer} key - * @param {function(Error, Array)} callback - * @returns {void} + * @returns {Promise>} */ - getClosestPeers (key, callback) { + async getClosestPeers (key) { this._log('getClosestPeers to %b', key) - utils.convertBuffer(key, (err, id) => { - if (err) { - return callback(err) - } - - const tablePeers = this.routingTable.closestPeers(id, c.ALPHA) - - const q = new Query(this, key, () => { - // There is no distinction between the disjoint paths, - // so there are no per-path variables in this scope. - // Just return the actual query function. - return (peer, callback) => { - waterfall([ - (cb) => this._closerPeersSingle(key, peer, cb), - (closer, cb) => { - cb(null, { - closerPeers: closer - }) - } - ], callback) + const id = await utils.convertBuffer(key) + + const tablePeers = this.routingTable.closestPeers(id, c.ALPHA) + + const q = new Query(this, key, () => { + // There is no distinction between the disjoint paths, + // so there are no per-path variables in this scope. + // Just return the actual query function. + return async (peer) => { + const closer = await this._closerPeersSingle(key, peer) + return { + closerPeers: closer } - }) + } + }) - q.run(tablePeers, (err, res) => { - if (err) { - return callback(err) - } + const res = await q.run(tablePeers) - if (!res || !res.finalSet) { - return callback(null, []) - } + if (!res || !res.finalSet) { + return [] + } - waterfall([ - (cb) => utils.sortClosestPeers(Array.from(res.finalSet), id, cb), - (sorted, cb) => cb(null, sorted.slice(0, c.K)) - ], callback) - }) - }) + const sorted = await utils.sortClosestPeers(Array.from(res.finalSet), id) + return sorted.slice(0, c.K) } /** * Get the public key for the given peer id. * * @param {PeerId} peer - * @param {function(Error, PubKey)} callback - * @returns {void} + * @returns {Promise} */ - getPublicKey (peer, callback) { + async getPublicKey (peer) { this._log('getPublicKey %s', peer.toB58String()) // local check let info @@ -400,34 +360,32 @@ class KadDHT extends EventEmitter { if (info && info.id.pubKey) { this._log('getPublicKey: found local copy') - return callback(null, info.id.pubKey) + return info.id.pubKey } } else { info = this.peerBook.put(new PeerInfo(peer)) } - // try the node directly - this._getPublicKeyFromNode(peer, (err, pk) => { - if (!err) { - info.id = new PeerId(peer.id, null, pk) - this.peerBook.put(info) - return callback(null, pk) - } + // try the node directly + try { + const pk = await this._getPublicKeyFromNode(peer) + info.id = new PeerId(peer.id, null, pk) + this.peerBook.put(info) + + return pk + } catch (err) { + this._log('getPublicKey: could not get public key from node: %s', err) + } - // dht directly - const pkKey = utils.keyForPublicKey(peer) - this.get(pkKey, (err, value) => { - if (err) { - return callback(err) - } + // dht directly + const pkKey = utils.keyForPublicKey(peer) + const value = await this.get(pkKey) - const pk = crypto.unmarshalPublicKey(value) - info.id = new PeerId(peer, null, pk) - this.peerBook.put(info) + const pk = crypto.unmarshalPublicKey(value) + info.id = new PeerId(peer, null, pk) + this.peerBook.put(info) - callback(null, pk) - }) - }) + return pk } /** @@ -435,20 +393,17 @@ class KadDHT extends EventEmitter { * Returns the `PeerInfo` for it, if found, otherwise `undefined`. * * @param {PeerId} peer - * @param {function(Error, PeerInfo)} callback - * @returns {void} + * @returns {Promise} */ - findPeerLocal (peer, callback) { + async findPeerLocal (peer) { this._log('findPeerLocal %s', peer.toB58String()) - this.routingTable.find(peer, (err, p) => { - if (err) { - return callback(err) - } - if (!p || !this.peerBook.has(p)) { - return callback() - } - callback(null, this.peerBook.get(p)) - }) + const p = await this.routingTable.find(peer) + + if (!p || !this.peerBook.has(p)) { + return + } + + return this.peerBook.get(p) } // ----------- Content Routing @@ -457,25 +412,22 @@ class KadDHT extends EventEmitter { * Announce to the network that we can provide given key's value. * * @param {CID} key - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - provide (key, callback) { + async provide (key) { this._log('provide: %s', key.toBaseEncodedString()) - waterfall([ - (cb) => this.providers.addProvider(key, this.peerInfo.id, cb), - (cb) => this.getClosestPeers(key.buffer, cb), - (peers, cb) => { - const msg = new Message(Message.TYPES.ADD_PROVIDER, key.buffer, 0) - msg.providerPeers = peers.map((p) => new PeerInfo(p)) - - each(peers, (peer, cb) => { - this._log('putProvider %s to %s', key.toBaseEncodedString(), peer.toB58String()) - this.network.sendMessage(peer, msg, cb) - }, cb) - } - ], (err) => callback(err)) + await this.providers.addProvider(key, this.peerInfo.id) + + const peers = await this.getClosestPeers(key.buffer) + + const msg = new Message(Message.TYPES.ADD_PROVIDER, key.buffer, 0) + msg.providerPeers = peers.map((p) => new PeerInfo(p)) + + return Promise.all(peers.map((peer) => { + this._log('putProvider %s to %s', key.toBaseEncodedString(), peer.toB58String()) + return this.network.sendMessage(peer, msg) + })) } /** @@ -485,17 +437,9 @@ class KadDHT extends EventEmitter { * @param {Object} options - findProviders options * @param {number} options.timeout - how long the query should maximally run, in milliseconds (default: 60000) * @param {number} options.maxNumProviders - maximum number of providers to find - * @param {function(Error, Array)} callback - * @returns {void} + * @returns {Promise>} */ - findProviders (key, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } else { - options = options || {} - } - + findProviders (key, options = {}) { if (!options.maxTimeout && !options.timeout) { options.timeout = c.minute // default } else if (options.maxTimeout && !options.timeout) { // TODO this will be deprecated in a next release @@ -505,7 +449,7 @@ class KadDHT extends EventEmitter { options.maxNumProviders = options.maxNumProviders || c.K this._log('findProviders %s', key.toBaseEncodedString()) - this._findNProviders(key, options.timeout, options.maxNumProviders, callback) + return this._findNProviders(key, options.timeout, options.maxNumProviders) } // ----------- Peer Routing @@ -516,17 +460,9 @@ class KadDHT extends EventEmitter { * @param {PeerId} id * @param {Object} options - findPeer options * @param {number} options.timeout - how long the query should maximally run, in milliseconds (default: 60000) - * @param {function(Error, PeerInfo)} callback - * @returns {void} + * @returns {Promise} */ - findPeer (id, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } else { - options = options || {} - } - + async findPeer (id, options = {}) { if (!options.maxTimeout && !options.timeout) { options.timeout = c.minute // default } else if (options.maxTimeout && !options.timeout) { // TODO this will be deprecated in a next release @@ -535,80 +471,69 @@ class KadDHT extends EventEmitter { this._log('findPeer %s', id.toB58String()) - this.findPeerLocal(id, (err, pi) => { - if (err) { - return callback(err) - } + const pi = await this.findPeerLocal(id) - // already got it - if (pi != null) { - this._log('found local') - return callback(null, pi) - } + // already got it + if (pi != null) { + this._log('found local') + return pi + } - waterfall([ - (cb) => utils.convertPeerId(id, cb), - (key, cb) => { - const peers = this.routingTable.closestPeers(key, c.ALPHA) + const key = await utils.convertPeerId(id) - if (peers.length === 0) { - return cb(errcode(new Error('Peer lookup failed'), 'ERR_LOOKUP_FAILED')) - } + const peers = this.routingTable.closestPeers(key, c.ALPHA) - // sanity check - const match = peers.find((p) => p.isEqual(id)) - if (match && this.peerBook.has(id)) { - this._log('found in peerbook') - return cb(null, this.peerBook.get(id)) - } + if (peers.length === 0) { + throw errcode('Peer lookup failed', 'ERR_LOOKUP_FAILED') + } - // query the network - const query = new Query(this, id.id, () => { - // There is no distinction between the disjoint paths, - // so there are no per-path variables in this scope. - // Just return the actual query function. - return (peer, cb) => { - waterfall([ - (cb) => this._findPeerSingle(peer, id, cb), - (msg, cb) => { - const match = msg.closerPeers.find((p) => p.id.isEqual(id)) - - // found it - if (match) { - return cb(null, { - peer: match, - success: true - }) - } - - cb(null, { - closerPeers: msg.closerPeers - }) - } - ], cb) - } - }) + // sanity check + const match = peers.find((p) => p.isEqual(id)) + if (match && this.peerBook.has(id)) { + this._log('found in peerbook') + return this.peerBook.get(id) + } - timeout((cb) => { - query.run(peers, cb) - }, options.timeout)(cb) - }, - (result, cb) => { - let success = false - result.paths.forEach((result) => { - if (result.success) { - success = true - this.peerBook.put(result.peer) - } - }) - this._log('findPeer %s: %s', id.toB58String(), success) - if (!success) { - return cb(errcode(new Error('No peer found'), 'ERR_NOT_FOUND')) + // query the network + const query = new Query(this, id.id, () => { + // There is no distinction between the disjoint paths, + // so there are no per-path variables in this scope. + // Just return the actual query function. + return async (peer) => { + const msg = await this._findPeerSingle(peer, id) + + const match = msg.closerPeers.find((p) => p.id.isEqual(id)) + + // found it + if (match) { + return { + peer: match, + success: true } - cb(null, this.peerBook.get(id)) } - ], callback) + + return { + closerPeers: msg.closerPeers + } + } + }) + + const result = await query.run(peers, options.timeout) + + let success = false + result.paths.forEach((res) => { + if (res.success) { + success = true + this.peerBook.put(res.peer) + } }) + + this._log('findPeer %s: %s', id.toB58String(), success) + if (!success) { + throw errcode('No peer found', 'ERR_NOT_FOUND') + } + + return this.peerBook.get(id) } _peerDiscovered (peerInfo) { diff --git a/src/network.js b/src/network.js index 29781ec8..e18219d3 100644 --- a/src/network.js +++ b/src/network.js @@ -1,9 +1,7 @@ 'use strict' const pull = require('pull-stream') -const timeout = require('async/timeout') const lp = require('pull-length-prefixed') -const setImmediate = require('async/setImmediate') const errcode = require('err-code') @@ -33,19 +31,16 @@ class Network { /** * Start the network. * - * @param {function(Error)} callback * @returns {void} */ - start (callback) { - const cb = (err) => setImmediate(() => callback(err)) - + start () { if (this._running) { - return cb(errcode(new Error('Network is already running'), 'ERR_NETWORK_ALREADY_RUNNING')) + throw errcode('Network is already running', 'ERR_NETWORK_ALREADY_RUNNING') } // TODO add a way to check if switch has started or not if (!this.dht.isStarted) { - return cb(errcode(new Error('Can not start network'), 'ERR_CANNOT_START_NETWORK')) + throw errcode('Cannot start network', 'ERR_CANNOT_START_NETWORK') } this._running = true @@ -55,27 +50,21 @@ class Network { // handle new connections this.dht.switch.on('peer-mux-established', this._onPeerConnected) - - cb() } /** * Stop all network activity. * - * @param {function(Error)} callback * @returns {void} */ - stop (callback) { - const cb = (err) => setImmediate(() => callback(err)) - + stop () { if (!this.dht.isStarted && !this.isStarted) { - return cb(errcode(new Error('Network is already stopped'), 'ERR_NETWORK_ALREADY_STOPPED')) + throw errcode('Network is already stopped', 'ERR_NETWORK_ALREADY_STOPPED') } this._running = false this.dht.switch.removeListener('peer-mux-established', this._onPeerConnected) this.dht.switch.unhandle(c.PROTOCOL_DHT) - cb() } /** @@ -105,27 +94,28 @@ class Network { * @private */ _onPeerConnected (peer) { + const peerId = peer.id.toB58String() if (!this.isConnected) { - return this._log.error('Network is offline') + return this._log.error('Received connection from %s but network is offline', peerId) } - this.dht.switch.dial(peer, c.PROTOCOL_DHT, (err, conn) => { + this.dht.switch.dial(peer, c.PROTOCOL_DHT, async (err, conn) => { if (err) { - return this._log('%s does not support protocol: %s', peer.id.toB58String(), c.PROTOCOL_DHT) + return this._log('%s does not support protocol: %s', peerId, c.PROTOCOL_DHT) } // TODO: conn.close() pull(pull.empty(), conn) - this.dht._add(peer, (err) => { - if (err) { - return this._log.error('Failed to add to the routing table', err) - } + try { + await this.dht._add(peer) + } catch (err) { + return this._log.error(`Failed to add ${peerId} to the routing table`, err) + } - this.dht._peerDiscovered(peer) + this.dht._peerDiscovered(peer) - this._log('added to the routing table: %s', peer.id.toB58String()) - }) + this._log('added to the routing table: %s', peerId) }) } @@ -134,22 +124,23 @@ class Network { * * @param {PeerId} to - The peer that should receive a message * @param {Message} msg - The message to send. - * @param {function(Error, Message)} callback - * @returns {void} + * @returns {Promise} */ - sendRequest (to, msg, callback) { + sendRequest (to, msg) { // TODO: record latency if (!this.isConnected) { - return callback(errcode(new Error('Network is offline'), 'ERR_NETWORK_OFFLINE')) + throw errcode('Network is offline', 'ERR_NETWORK_OFFLINE') } - this._log('sending to: %s', to.toB58String()) - this.dht.switch.dial(to, c.PROTOCOL_DHT, (err, conn) => { - if (err) { - return callback(err) - } + this._log('sending request to: %s', to.toB58String()) + return new Promise((resolve, reject) => { + this.dht.switch.dial(to, c.PROTOCOL_DHT, (err, conn) => { + if (err) { + return reject(err) + } - this._writeReadMessage(conn, msg.serialize(), callback) + resolve(this._writeReadMessage(conn, msg.serialize())) + }) }) } @@ -158,22 +149,23 @@ class Network { * * @param {PeerId} to * @param {Message} msg - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - sendMessage (to, msg, callback) { + sendMessage (to, msg) { if (!this.isConnected) { - return setImmediate(() => callback(errcode(new Error('Network is offline'), 'ERR_NETWORK_OFFLINE'))) + throw errcode('Network is offline', 'ERR_NETWORK_OFFLINE') } - this._log('sending to: %s', to.toB58String()) + this._log('sending message to: %s', to.toB58String()) - this.dht.switch.dial(to, c.PROTOCOL_DHT, (err, conn) => { - if (err) { - return callback(err) - } + return new Promise((resolve, reject) => { + this.dht.switch.dial(to, c.PROTOCOL_DHT, (err, conn) => { + if (err) { + return reject(err) + } - this._writeMessage(conn, msg.serialize(), callback) + resolve(this._writeMessage(conn, msg.serialize())) + }) }) } @@ -184,15 +176,15 @@ class Network { * * @param {Connection} conn - the connection to use * @param {Buffer} msg - the message to send - * @param {function(Error, Message)} callback - * @returns {void} + * @returns {Promise} * @private */ - _writeReadMessage (conn, msg, callback) { - timeout( - writeReadMessage, - this.readMessageTimeout - )(conn, msg, callback) + _writeReadMessage (conn, msg) { + return utils.promiseTimeout( + writeReadMessage(conn, msg), + this.readMessageTimeout, + `Send/Receive message timed out in ${this.readMessageTimeout}ms` + ) } /** @@ -200,45 +192,48 @@ class Network { * * @param {Connection} conn - the connection to use * @param {Buffer} msg - the message to send - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * @private */ - _writeMessage (conn, msg, callback) { + _writeMessage (conn, msg) { + return new Promise((resolve, reject) => { + pull( + pull.values([msg]), + lp.encode(), + conn, + pull.onEnd((err) => err ? reject(err) : resolve()) + ) + }) + } +} + +function writeReadMessage (conn, msg) { + return new Promise((resolve, reject) => { pull( pull.values([msg]), lp.encode(), conn, - pull.onEnd(callback) - ) - } -} - -function writeReadMessage (conn, msg, callback) { - pull( - pull.values([msg]), - lp.encode(), - conn, - pull.filter((msg) => msg.length < c.maxMessageSize), - lp.decode(), - pull.collect((err, res) => { - if (err) { - return callback(err) - } - if (res.length === 0) { - return callback(errcode(new Error('No message received'), 'ERR_NO_MESSAGE_RECEIVED')) - } + pull.filter((msg) => msg.length < c.maxMessageSize), + lp.decode(), + pull.collect((err, res) => { + if (err) { + return reject(err) + } + if (res.length === 0) { + return reject(errcode('No message received', 'ERR_NO_MESSAGE_RECEIVED')) + } - let response - try { - response = Message.deserialize(res[0]) - } catch (err) { - return callback(errcode(err, 'ERR_FAILED_DESERIALIZE_RESPONSE')) - } + let response + try { + response = Message.deserialize(res[0]) + } catch (err) { + return reject(errcode(err, 'ERR_FAILED_DESERIALIZE_RESPONSE')) + } - callback(null, response) - }) - ) + resolve(response) + }) + ) + }) } module.exports = Network diff --git a/src/peer-queue.js b/src/peer-queue.js index b94773ec..8cd3d4b0 100644 --- a/src/peer-queue.js +++ b/src/peer-queue.js @@ -17,34 +17,22 @@ class PeerQueue { * Create from a given peer id. * * @param {PeerId} id - * @param {function(Error, PeerQueue)} callback - * @returns {void} + * @returns {Promise} */ - static fromPeerId (id, callback) { - utils.convertPeerId(id, (err, key) => { - if (err) { - return callback(err) - } - - callback(null, new PeerQueue(key)) - }) + static async fromPeerId (id) { + const key = await utils.convertPeerId(id) + return new PeerQueue(key) } /** * Create from a given buffer. * - * @param {Buffer} key - * @param {function(Error, PeerQueue)} callback - * @returns {void} + * @param {Buffer} buff + * @returns {Promise} */ - static fromKey (key, callback) { - utils.convertBuffer(key, (err, key) => { - if (err) { - return callback(err) - } - - callback(null, new PeerQueue(key)) - }) + static async fromKey (buff) { + const key = await utils.convertBuffer(buff) + return new PeerQueue(key) } /** @@ -62,24 +50,18 @@ class PeerQueue { * Add a new PeerId to the queue. * * @param {PeerId} id - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} */ - enqueue (id, callback) { + async enqueue (id) { log('enqueue %s', id.toB58String()) - utils.convertPeerId(id, (err, key) => { - if (err) { - return callback(err) - } + const key = await utils.convertPeerId(id) - const el = { - id: id, - distance: distance(this.from, key) - } + const el = { + id: id, + distance: distance(this.from, key) + } - this.heap.push(el) - callback() - }) + this.heap.push(el) } /** diff --git a/src/private.js b/src/private.js index 6ee75663..c5a3e573 100644 --- a/src/private.js +++ b/src/private.js @@ -2,9 +2,6 @@ const PeerId = require('peer-id') const libp2pRecord = require('libp2p-record') -const waterfall = require('async/waterfall') -const each = require('async/each') -const timeout = require('async/timeout') const PeerInfo = require('peer-info') const errcode = require('err-code') @@ -23,173 +20,134 @@ module.exports = (dht) => ({ * the message. * * @param {Message} msg - * @param {function(Error, Array)} callback - * @returns {undefined} + * @returns {Promise>} * @private */ - _nearestPeersToQuery (msg, callback) { - utils.convertBuffer(msg.key, (err, key) => { - if (err) { - return callback(err) - } - let ids - try { - ids = dht.routingTable.closestPeers(key, dht.ncp) - } catch (err) { - return callback(err) - } + async _nearestPeersToQuery (msg) { + const key = await utils.convertBuffer(msg.key) - callback(null, ids.map((p) => { - if (dht.peerBook.has(p)) { - return dht.peerBook.get(p) - } else { - return dht.peerBook.put(new PeerInfo(p)) - } - })) + const ids = dht.routingTable.closestPeers(key, dht.ncp) + return ids.map((p) => { + if (dht.peerBook.has(p)) { + return dht.peerBook.get(p) + } + return dht.peerBook.put(new PeerInfo(p)) }) }, + /** * Get the nearest peers to the given query, but iff closer * than self. * * @param {Message} msg * @param {PeerInfo} peer - * @param {function(Error, Array)} callback - * @returns {undefined} + * @returns {Promise>} * @private */ - _betterPeersToQuery (msg, peer, callback) { + async _betterPeersToQuery (msg, peer) { dht._log('betterPeersToQuery') - dht._nearestPeersToQuery(msg, (err, closer) => { - if (err) { - return callback(err) - } - - const filtered = closer.filter((closer) => { - if (dht._isSelf(closer.id)) { - // Should bail, not sure - dht._log.error('trying to return self as closer') - return false - } + const closer = await dht._nearestPeersToQuery(msg) - return !closer.id.isEqual(peer.id) - }) + return closer.filter((closer) => { + if (dht._isSelf(closer.id)) { + // Should bail, not sure + dht._log.error('trying to return self as closer') + return false + } - callback(null, filtered) + return !closer.id.isEqual(peer.id) }) }, + /** - * Try to fetch a given record by from the local datastore. + * Try to fetch a given record from the local datastore. * Returns the record iff it is still valid, meaning * - it was either authored by this node, or * - it was receceived less than `MAX_RECORD_AGE` ago. * * @param {Buffer} key - * @param {function(Error, Record)} callback - * @returns {undefined} + * @returns {Promise} * - *@private + * @private */ - _checkLocalDatastore (key, callback) { + async _checkLocalDatastore (key) { dht._log('checkLocalDatastore: %b', key) const dsKey = utils.bufferToKey(key) - // 2. fetch value from ds - dht.datastore.has(dsKey, (err, exists) => { - if (err) { - return callback(err) - } - if (!exists) { - return callback() - } + // Fetch value from ds + const exists = await dht.datastore.has(dsKey) + if (!exists) { + return undefined + } - dht.datastore.get(dsKey, (err, res) => { - if (err) { - return callback(err) - } + const rawRecord = await dht.datastore.get(dsKey) - const rawRecord = res + // Create record from the returned bytes + const record = Record.deserialize(rawRecord) - // 4. create record from the returned bytes - let record - try { - record = Record.deserialize(rawRecord) - } catch (err) { - return callback(err) - } + if (!record) { + throw errcode('Invalid record', 'ERR_INVALID_RECORD') + } - if (!record) { - return callback(errcode(new Error('Invalid record'), 'ERR_INVALID_RECORD')) - } + // Check validity: compare time received with max record age + if (record.timeReceived == null || + utils.now() - record.timeReceived > c.MAX_RECORD_AGE) { + // If record is bad delete it and return + await dht.datastore.delete(dsKey) + return undefined + } - // 5. check validity - - // compare recvtime with maxrecordage - if (record.timeReceived == null || - utils.now() - record.timeReceived > c.MAX_RECORD_AGE) { - // 6. if: record is bad delete it and return - return dht.datastore.delete(dsKey, callback) - } - - // else: return good record - callback(null, record) - }) - }) + // Record is valid + return record }, + /** * Add the peer to the routing table and update it in the peerbook. * * @param {PeerInfo} peer - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} * * @private */ - _add (peer, callback) { + _add (peer) { peer = dht.peerBook.put(peer) - dht.routingTable.add(peer.id, callback) + return dht.routingTable.add(peer.id) }, + /** * Verify a record without searching the DHT. + * Returns a Promise that will reject if the record is invalid. * * @param {Record} record - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} * * @private */ - _verifyRecordLocally (record, callback) { + _verifyRecordLocally (record) { dht._log('verifyRecordLocally') - libp2pRecord.validator.verifyRecord( + return libp2pRecord.validator.verifyRecord( dht.validators, - record, - callback + record ) }, + /** * Find close peers for a given peer * * @param {Buffer} key * @param {PeerId} peer - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _closerPeersSingle (key, peer, callback) { + async _closerPeersSingle (key, peer) { dht._log('_closerPeersSingle %b from %s', key, peer.toB58String()) - dht._findPeerSingle(peer, new PeerId(key), (err, msg) => { - if (err) { - return callback(err) - } - - const out = msg.closerPeers - .filter((pInfo) => !dht._isSelf(pInfo.id)) - .map((pInfo) => dht.peerBook.put(pInfo)) - - callback(null, out) - }) + const msg = await dht._findPeerSingle(peer, new PeerId(key)) + return msg.closerPeers + .filter((pInfo) => !dht._isSelf(pInfo.id)) + .map((pInfo) => dht.peerBook.put(pInfo)) }, + /** * Is the given peer id the peer id? * @@ -201,160 +159,212 @@ module.exports = (dht) => ({ _isSelf (other) { return other && dht.peerInfo.id.id.equals(other.id) }, + /** * Ask peer `peer` if they know where the peer with id `target` is. * * @param {PeerId} peer * @param {PeerId} target - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _findPeerSingle (peer, target, callback) { + _findPeerSingle (peer, target) { dht._log('_findPeerSingle %s', peer.toB58String()) const msg = new Message(Message.TYPES.FIND_NODE, target.id, 0) - dht.network.sendRequest(peer, msg, callback) + return dht.network.sendRequest(peer, msg) }, + /** * Store the given key/value pair at the peer `target`. * * @param {Buffer} key * @param {Buffer} rec - encoded record * @param {PeerId} target - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _putValueToPeer (key, rec, target, callback) { + async _putValueToPeer (key, rec, target) { const msg = new Message(Message.TYPES.PUT_VALUE, key, 0) msg.record = rec - dht.network.sendRequest(target, msg, (err, resp) => { - if (err) { - return callback(err) - } - - if (!resp.record.value.equals(Record.deserialize(rec).value)) { - return callback(errcode(new Error('value not put correctly'), 'ERR_PUT_VALUE_INVALID')) - } + const resp = await dht.network.sendRequest(target, msg) - callback() - }) + if (!resp.record.value.equals(Record.deserialize(rec).value)) { + throw errcode('value not put correctly', 'ERR_PUT_VALUE_INVALID') + } }, + /** * Store the given key/value pair locally, in the datastore. * @param {Buffer} key * @param {Buffer} rec - encoded record - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _putLocal (key, rec, callback) { - dht.datastore.put(utils.bufferToKey(key), rec, callback) + _putLocal (key, rec) { + return dht.datastore.put(utils.bufferToKey(key), rec) }, + /** - * Get the value to the given key. + * Get the value for the given key. * * @param {Buffer} key * @param {Object} options - get options * @param {number} options.timeout - optional timeout (default: 60000) - * @param {function(Error, Record)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _get (key, options, callback) { + async _get (key, options) { + // waterfall([ + // (cb) => dht.getMany(key, 16, options, cb), + // (vals, cb) => { + // const recs = vals.map((v) => v.val) + // let i = 0 + + // try { + // i = libp2pRecord.selection.bestRecord(dht.selectors, key, recs) + // } catch (err) { + // // Assume the first record if no selector available + // if (err.code !== 'ERR_NO_SELECTOR_FUNCTION_FOR_RECORD_KEY') { + // return cb(err) + // } + // } + + // const best = recs[i] + // dht._log('GetValue %b %s', key, best) + + // if (!best) { + // return cb(errcode(new Error('best value was not found'), 'ERR_NOT_FOUND')) + // } + + // // Send out correction record + // waterfall([ + // (cb) => utils.createPutRecord(key, best, cb), + // (fixupRec, cb) => each(vals, (v, cb) => { + // // no need to do anything + // if (v.val.equals(best)) { + // return cb() + // } + + // // correct ourself + // if (dht._isSelf(v.from)) { + // return dht._putLocal(key, fixupRec, (err) => { + // if (err) { + // dht._log.error('Failed error correcting self', err) + // } + // cb() + // }) + // } + + // // send correction + // dht._putValueToPeer(key, fixupRec, v.from, (err) => { + // if (err) { + // dht._log.error('Failed error correcting entry', err) + // } + // cb() + // }) + // }, cb) + // ], (err) => cb(err, err ? null : best)) + // } + // ], callback) + dht._log('_get %b', key) - waterfall([ - (cb) => dht.getMany(key, 16, options, cb), - (vals, cb) => { - const recs = vals.map((v) => v.val) - let i = 0 + const vals = await dht.getMany(key, 16, options) - try { - i = libp2pRecord.selection.bestRecord(dht.selectors, key, recs) - } catch (err) { - // Assume the first record if no selector available - if (err.code !== 'ERR_NO_SELECTOR_FUNCTION_FOR_RECORD_KEY') { - return cb(err) - } - } + const recs = vals.map((v) => v.val) + let i = 0 + + try { + i = libp2pRecord.selection.bestRecord(dht.selectors, key, recs) + } catch (err) { + // Assume the first record if no selector available + if (err.code !== 'ERR_NO_SELECTOR_FUNCTION_FOR_RECORD_KEY') { + throw err + } + } - const best = recs[i] - dht._log('GetValue %b %s', key, best) + const best = recs[i] + dht._log('GetValue %b %s', key, best) - if (!best) { - return cb(errcode(new Error('best value was not found'), 'ERR_NOT_FOUND')) + if (!best) { + throw errcode('best value was not found', 'ERR_NOT_FOUND') + } + + await this._sendCorrectionRecord(key, vals, best) + + return best + }, + + /** + * Send the best record found to any peers that have an out of date record. + * + * @param {Buffer} key + * @param {Array} vals - values retrieved from the DHT + * @param {Object} best - the best record that was found + * @returns {Promise} + * + * @private + */ + async _sendCorrectionRecord (key, vals, best) { + const fixupRec = utils.createPutRecord(key, best) + + return Promise.all(vals.map((v) => { + // no need to do anything + if (v.val.equals(best)) { + return + } + + // correct ourself + if (dht._isSelf(v.from)) { + try { + return dht._putLocal(key, fixupRec) + } catch (err) { + dht._log.error('Failed error correcting self', err) + return } + } - // Send out correction record - waterfall([ - (cb) => utils.createPutRecord(key, best, cb), - (fixupRec, cb) => each(vals, (v, cb) => { - // no need to do anything - if (v.val.equals(best)) { - return cb() - } - - // correct ourself - if (dht._isSelf(v.from)) { - return dht._putLocal(key, fixupRec, (err) => { - if (err) { - dht._log.error('Failed error correcting self', err) - } - cb() - }) - } - - // send correction - dht._putValueToPeer(key, fixupRec, v.from, (err) => { - if (err) { - dht._log.error('Failed error correcting entry', err) - } - cb() - }) - }, cb) - ], (err) => cb(err, err ? null : best)) + // send correction + try { + return dht._putValueToPeer(key, fixupRec, v.from) + } catch (err) { + dht._log.error('Failed error correcting entry', err) } - ], callback) + })) }, + /** * Attempt to retrieve the value for the given key from * the local datastore. * * @param {Buffer} key - * @param {function(Error, Record)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _getLocal (key, callback) { + async _getLocal (key) { dht._log('getLocal %b', key) - waterfall([ - (cb) => dht.datastore.get(utils.bufferToKey(key), cb), - (raw, cb) => { - dht._log('found %b in local datastore', key) - let rec - try { - rec = Record.deserialize(raw) - } catch (err) { - return cb(err) - } + const raw = await dht.datastore.get(utils.bufferToKey(key)) - dht._verifyRecordLocally(rec, (err) => { - if (err) { - return cb(err) - } + dht._log('found %b in local datastore', key) + const rec = Record.deserialize(raw) - cb(null, rec) - }) - } - ], callback) + await dht._verifyRecordLocally(rec) + return rec }, + + /** + * Object containing a value or a list of closer peers. + * @typedef {Object} ValueOrPeers + * @property {Record} record - the record at the key + * @property {Array} peers - list of closer peers + */ + /** * Query a particular peer for the value for the given key. * It will either return the value or a list of closer peers. @@ -363,198 +373,187 @@ module.exports = (dht) => ({ * * @param {PeerId} peer * @param {Buffer} key - * @param {function(Error, Redcord, Array)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _getValueOrPeers (peer, key, callback) { - waterfall([ - (cb) => dht._getValueSingle(peer, key, cb), - (msg, cb) => { - const peers = msg.closerPeers - const record = msg.record - - if (record) { - // We have a record - return dht._verifyRecordOnline(record, (err) => { - if (err) { - const errMsg = 'invalid record received, discarded' - - dht._log(errMsg) - return cb(errcode(new Error(errMsg), 'ERR_INVALID_RECORD')) - } - - return cb(null, record, peers) - }) - } + async _getValueOrPeers (peer, key) { + const msg = await dht._getValueSingle(peer, key) - if (peers.length > 0) { - return cb(null, null, peers) - } + const peers = msg.closerPeers + const record = msg.record + + if (record) { + // We have a record + try { + await dht._verifyRecordOnline(record) + } catch (err) { + const errMsg = 'invalid record received, discarded' - cb(errcode(new Error('Not found'), 'ERR_NOT_FOUND')) + dht._log(errMsg) + throw errcode(errMsg, 'ERR_INVALID_RECORD') } - ], callback) + + return { record, peers } + } + + if (peers.length > 0) { + return { peers } + } + + throw errcode('Not found', 'ERR_NOT_FOUND') }, + /** * Get a value via rpc call for the given parameters. * * @param {PeerId} peer * @param {Buffer} key - * @param {function(Error, Message)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _getValueSingle (peer, key, callback) { + _getValueSingle (peer, key) { const msg = new Message(Message.TYPES.GET_VALUE, key, 0) - dht.network.sendRequest(peer, msg, callback) + return dht.network.sendRequest(peer, msg) }, + /** * Verify a record, fetching missing public keys from the network. * Calls back with an error if the record is invalid. * * @param {Record} record - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _verifyRecordOnline (record, callback) { - libp2pRecord.validator.verifyRecord(dht.validators, record, callback) + _verifyRecordOnline (record) { + return libp2pRecord.validator.verifyRecord(dht.validators, record) }, + /** * Get the public key directly from a node. * * @param {PeerId} peer - * @param {function(Error, PublicKey)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _getPublicKeyFromNode (peer, callback) { + async _getPublicKeyFromNode (peer) { const pkKey = utils.keyForPublicKey(peer) - waterfall([ - (cb) => dht._getValueSingle(peer, pkKey, cb), - (msg, cb) => { - if (!msg.record || !msg.record.value) { - return cb(errcode(new Error(`Node not responding with its public key: ${peer.toB58String()}`), 'ERR_INVALID_RECORD')) - } - PeerId.createFromPubKey(msg.record.value, cb) - }, - (recPeer, cb) => { - // compare hashes of the pub key - if (!recPeer.isEqual(peer)) { - return cb(errcode(new Error('public key does not match id'), 'ERR_PUBLIC_KEY_DOES_NOT_MATCH_ID')) - } + const msg = await dht._getValueSingle(peer, pkKey) - cb(null, recPeer.pubKey) - } - ], callback) + if (!msg.record || !msg.record.value) { + throw errcode(`Node not responding with its public key: ${peer.toB58String()}`, 'ERR_INVALID_RECORD') + } + + const recPeer = await PeerId.createFromPubKey(msg.record.value) + + // compare hashes of the pub key + if (!recPeer.isEqual(peer)) { + throw errcode('public key does not match id', 'ERR_PUBLIC_KEY_DOES_NOT_MATCH_ID') + } + + return recPeer.pubKey }, + /** * Search the dht for up to `n` providers of the given CID. * * @param {CID} key * @param {number} providerTimeout - How long the query should maximally run in milliseconds. * @param {number} n - * @param {function(Error, Array)} callback - * @returns {void} + * @returns {Promise>} * * @private */ - _findNProviders (key, providerTimeout, n, callback) { + async _findNProviders (key, providerTimeout, n) { let out = new LimitedPeerList(n) - dht.providers.getProviders(key, (err, provs) => { - if (err) { - return callback(err) + const provs = await dht.providers.getProviders(key) + + for (const id of provs) { + let info + if (dht.peerBook.has(id)) { + info = dht.peerBook.get(id) + } else { + info = dht.peerBook.put(new PeerInfo(id)) } + out.push(info) + } + + // All done + if (out.length >= n) { + return out.toArray() + } + + // need more, query the network + const paths = [] + const query = new Query(dht, key.buffer, (pathIndex, numPaths) => { + // This function body runs once per disjoint path + const pathSize = utils.pathSize(out.length - n, numPaths) + const pathProviders = new LimitedPeerList(pathSize) + paths.push(pathProviders) + + // Here we return the query function to use on this particular disjoint path + return async (peer) => { + const msg = await dht._findProvidersSingle(peer, key) + + const provs = msg.providerPeers + dht._log('(%s) found %s provider entries', dht.peerInfo.id.toB58String(), provs.length) + + for (const prov of provs) { + pathProviders.push(dht.peerBook.put(prov)) + } - provs.forEach((id) => { - let info - if (dht.peerBook.has(id)) { - info = dht.peerBook.get(id) - } else { - info = dht.peerBook.put(new PeerInfo(id)) + // hooray we have all that we want + if (pathProviders.length >= pathSize) { + return { success: true } } - out.push(info) - }) - // All done - if (out.length >= n) { - return callback(null, out.toArray()) + // it looks like we want some more + return { + closerPeers: msg.closerPeers + } } + }) - // need more, query the network - const paths = [] - const query = new Query(dht, key.buffer, (pathIndex, numPaths) => { - // This function body runs once per disjoint path - const pathSize = utils.pathSize(out.length - n, numPaths) - const pathProviders = new LimitedPeerList(pathSize) - paths.push(pathProviders) - - // Here we return the query function to use on this particular disjoint path - return (peer, cb) => { - waterfall([ - (cb) => dht._findProvidersSingle(peer, key, cb), - (msg, cb) => { - const provs = msg.providerPeers - dht._log('(%s) found %s provider entries', dht.peerInfo.id.toB58String(), provs.length) - - provs.forEach((prov) => { - pathProviders.push(dht.peerBook.put(prov)) - }) - - // hooray we have all that we want - if (pathProviders.length >= pathSize) { - return cb(null, { success: true }) - } - - // it looks like we want some more - cb(null, { - closerPeers: msg.closerPeers - }) - } - ], cb) - } - }) - - const peers = dht.routingTable.closestPeers(key.buffer, c.ALPHA) - - timeout((cb) => query.run(peers, cb), providerTimeout)((err) => { - // combine peers from each path - paths.forEach((path) => { - path.toArray().forEach((peer) => { - out.push(peer) - }) - }) - - if (err) { - if (err.code === 'ETIMEDOUT' && out.length > 0) { - return callback(null, out.toArray()) - } - return callback(err) - } + const peers = dht.routingTable.closestPeers(key.buffer, c.ALPHA) - callback(null, out.toArray()) - }) - }) + let err + try { + await query.run(peers, providerTimeout) + } catch (e) { + err = e + } + + // combine peers from each path + for (const path of paths) { + for (const peer of path.toArray()) { + out.push(peer) + } + } + + // Ignore timeout error if we have collected some records + if (err && (err.code !== 'ETIMEDOUT' || out.length === 0)) { + throw err + } + + return out.toArray() }, + /** * Check for providers from a single node. * * @param {PeerId} peer * @param {CID} key - * @param {function(Error, Message)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _findProvidersSingle (peer, key, callback) { + _findProvidersSingle (peer, key) { const msg = new Message(Message.TYPES.GET_PROVIDERS, key.buffer, 0) - dht.network.sendRequest(peer, msg, callback) + return dht.network.sendRequest(peer, msg) } }) diff --git a/src/providers.js b/src/providers.js index 85e79987..251c300e 100644 --- a/src/providers.js +++ b/src/providers.js @@ -2,11 +2,10 @@ const cache = require('hashlru') const varint = require('varint') -const each = require('async/each') -const pull = require('pull-stream') const CID = require('cids') const PeerId = require('peer-id') const Key = require('interface-datastore').Key +const Queue = require('p-queue') const c = require('./constants') const utils = require('./utils') @@ -56,6 +55,8 @@ class Providers { this.lruCacheSize = cacheSize || c.PROVIDERS_LRU_CACHE_SIZE this.providers = cache(this.lruCacheSize) + + this.syncQueue = new Queue({ concurrency: 1 }) } /** @@ -74,131 +75,116 @@ class Providers { * Check all providers if they are still valid, and if not * delete them. * - * @returns {undefined} + * @returns {Promise} * * @private */ _cleanup () { - this._getProviderCids((err, cids) => { - if (err) { + return this.syncQueue.add(async () => { + this._log('start cleanup') + let cids + try { + cids = await this._getProviderCids() + this._log('got %d cids', cids.length) + } catch (err) { return this._log.error('Failed to get cids', err) } - each(cids, (cid, cb) => { - this._getProvidersMap(cid, (err, provs) => { - if (err) { - return cb(err) - } - - provs.forEach((time, provider) => { + try { + await Promise.all(cids.map(async (cid) => { + const provs = await this._getProvidersMap(cid) + for (const [provider, time] of provs) { this._log('comparing: %s - %s > %s', Date.now(), time, this.provideValidity) if (Date.now() - time > this.provideValidity) { provs.delete(provider) } - }) + } if (provs.size === 0) { - return this._deleteProvidersMap(cid, cb) + return this._deleteProvidersMap(cid) } + })) + } catch (err) { + return this._log.error('Failed to cleanup', err) + } - cb() - }) - }, (err) => { - if (err) { - return this._log.error('Failed to cleanup', err) - } - - this._log('Cleanup successfull') - }) + this._log('Cleanup successful') }) } /** * Get a list of all cids that providers are known for. * - * @param {function(Error, Array)} callback - * @returns {undefined} + * @returns {Promise>} * * @private */ - _getProviderCids (callback) { - pull( - this.datastore.query({ prefix: c.PROVIDERS_KEY_PREFIX }), - pull.map((entry) => { - const parts = entry.key.toString().split('/') - if (parts.length !== 4) { - this._log.error('incorrectly formatted provider entry in datastore: %s', entry.key) - return - } - - let decoded - try { - decoded = utils.decodeBase32(parts[2]) - } catch (err) { - this._log.error('error decoding base32 provider key: %s', parts[2]) - return - } - - let cid - try { - cid = new CID(decoded) - } catch (err) { - this._log.error('error converting key to cid from datastore: %s', err.message) - } - - return cid - }), - pull.filter(Boolean), - pull.collect(callback) - ) + async _getProviderCids () { + const entries = [] + const it = this.datastore.query({ prefix: c.PROVIDERS_KEY_PREFIX }) + for await (const entry of it) { + entries.push(entry) + } + this._log('got %d entries', entries.length) + return entries.map((entry) => { + const parts = entry.key.toString().split('/') + if (parts.length !== 4) { + this._log.error('incorrectly formatted provider entry in datastore: %s', entry.key) + return + } + + let decoded + try { + decoded = utils.decodeBase32(parts[2]) + } catch (err) { + this._log.error('error decoding base32 provider key: %s', parts[2]) + return + } + + let cid + try { + cid = new CID(decoded) + } catch (err) { + this._log.error('error converting key to cid from datastore: %s', err.message) + } + + return cid + }).filter(Boolean) } /** * Get the currently known provider maps for a given CID. * * @param {CID} cid - * @param {function(Error, Map)} callback - * @returns {undefined} + * @returns {Promise>} * * @private */ - _getProvidersMap (cid, callback) { + _getProvidersMap (cid) { const provs = this.providers.get(makeProviderKey(cid)) - - if (!provs) { - return loadProviders(this.datastore, cid, callback) - } - - callback(null, provs) + return provs || loadProviders(this.datastore, cid) } /** * Completely remove a providers map entry for a given CID. * * @param {CID} cid - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} * * @private */ - _deleteProvidersMap (cid, callback) { + async _deleteProvidersMap (cid) { const dsKey = makeProviderKey(cid) this.providers.set(dsKey, null) const batch = this.datastore.batch() - - pull( - this.datastore.query({ - keysOnly: true, - prefix: dsKey - }), - pull.through((entry) => batch.delete(entry.key)), - pull.onEnd((err) => { - if (err) { - return callback(err) - } - batch.commit(callback) - }) - ) + const entries = this.datastore.query({ + keysOnly: true, + prefix: dsKey + }) + for await (const entry of entries) { + batch.delete(entry.key) + } + return batch.commit() } get cleanupInterval () { @@ -223,49 +209,34 @@ class Providers { * * @param {CID} cid * @param {PeerId} provider - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} */ - addProvider (cid, provider, callback) { - this._log('addProvider %s', cid.toBaseEncodedString()) - const dsKey = makeProviderKey(cid) - const provs = this.providers.get(dsKey) - - const next = (err, provs) => { - if (err) { - return callback(err) - } + addProvider (cid, provider) { + return this.syncQueue.add(async () => { + this._log('addProvider %s', cid.toBaseEncodedString()) + const provs = await this._getProvidersMap(cid) this._log('loaded %s provs', provs.size) const now = Date.now() provs.set(provider, now) + const dsKey = makeProviderKey(cid) this.providers.set(dsKey, provs) - writeProviderEntry(this.datastore, cid, provider, now, callback) - } - - if (!provs) { - loadProviders(this.datastore, cid, next) - } else { - next(null, provs) - } + return writeProviderEntry(this.datastore, cid, provider, now) + }) } /** * Get a list of providers for the given CID. * * @param {CID} cid - * @param {function(Error, Array)} callback - * @returns {undefined} + * @returns {Promise>} */ - getProviders (cid, callback) { - this._log('getProviders %s', cid.toBaseEncodedString()) - this._getProvidersMap(cid, (err, provs) => { - if (err) { - return callback(err) - } - - callback(null, Array.from(provs.keys())) + getProviders (cid) { + return this.syncQueue.add(async () => { + this._log('getProviders %s', cid.toBaseEncodedString()) + const provs = await this._getProvidersMap(cid) + return [...provs.keys()] }) } } @@ -289,19 +260,18 @@ function makeProviderKey (cid) { * @param {CID} cid * @param {PeerId} peer * @param {number} time - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} * * @private */ -function writeProviderEntry (store, cid, peer, time, callback) { +function writeProviderEntry (store, cid, peer, time) { const dsKey = [ makeProviderKey(cid), '/', utils.encodeBase32(peer.id) ].join('') - store.put(new Key(dsKey), Buffer.from(varint.encode(time)), callback) + return store.put(new Key(dsKey), Buffer.from(varint.encode(time))) } /** @@ -309,28 +279,20 @@ function writeProviderEntry (store, cid, peer, time, callback) { * * @param {Datastore} store * @param {CID} cid - * @param {function(Error, Map)} callback - * @returns {undefined} + * @returns {Promise>} * * @private */ -function loadProviders (store, cid, callback) { - pull( - store.query({ prefix: makeProviderKey(cid) }), - pull.map((entry) => { - const parts = entry.key.toString().split('/') - const lastPart = parts[parts.length - 1] - const rawPeerId = utils.decodeBase32(lastPart) - return [new PeerId(rawPeerId), readTime(entry.value)] - }), - pull.collect((err, res) => { - if (err) { - return callback(err) - } - - return callback(null, new Map(res)) - }) - ) +async function loadProviders (store, cid) { + const providers = new Map() + const it = store.query({ prefix: makeProviderKey(cid) }) + for await (const entry of it) { + const parts = entry.key.toString().split('/') + const lastPart = parts[parts.length - 1] + const rawPeerId = utils.decodeBase32(lastPart) + providers.set(new PeerId(rawPeerId), readTime(entry.value)) + } + return providers } function readTime (buf) { diff --git a/src/query.js b/src/query.js index a11cab40..6a37a29d 100644 --- a/src/query.js +++ b/src/query.js @@ -1,12 +1,10 @@ 'use strict' -const waterfall = require('async/waterfall') -const each = require('async/each') -const queue = require('async/queue') const mh = require('multihashes') const c = require('./constants') const PeerQueue = require('./peer-queue') +const WorkerQueue = require('./worker-queue') const utils = require('./utils') /** @@ -26,7 +24,7 @@ class Query { * Query function. * @typedef {queryFunc} function * @param {PeerId} next - Peer to query - * @param {function(Error, Object)} callback - Query result callback + * @returns {Object} */ /** @@ -46,14 +44,21 @@ class Query { this._log = utils.logger(this.dht.peerInfo.id, 'query:' + mh.toB58String(key)) } + /** + * Run result. + * @typedef {Object} RunResult + * @property {Set} finalSet - peers that were queried + * @property {Array} paths - array of states per disjoint path + */ + /** * Run this query, start with the given list of peers first. * * @param {Array} peers - * @param {function(Error, Object)} callback - * @returns {void} + * @param {number} [timeout] - timeout in ms. If undefined, runs forever. + * @returns {Promise} */ - run (peers, callback) { + async run (peers, timeout) { const run = { peersSeen: new Set(), errors: [], @@ -62,7 +67,7 @@ class Query { if (peers.length === 0) { this._log.error('Running query with no peers') - return callback() + return } // create correct number of paths @@ -85,100 +90,88 @@ class Query { } }) - each(run.paths, (path, cb) => { - waterfall([ - (cb) => PeerQueue.fromKey(this.key, cb), - (q, cb) => { - path.peersToQuery = q - each(path.peers, (p, cb) => addPeerToQuery(p, this.dht, path, cb), cb) - }, - (cb) => workerQueue(this, path, cb) - ], cb) - }, (err, results) => { - this._log('query:done') - if (err) { - return callback(err) + // Set up a worker queue for each path + const workers = await Promise.all(run.paths.map(async (path) => { + path.peersToQuery = await PeerQueue.fromKey(this.key) + await Promise.all(path.peers.map((p) => addPeerToQuery(p, this.dht, path))) + return workerQueue(this, path) + })) + + // Run the workers with a timeout + try { + await utils.promiseTimeout( + Promise.all(workers.map(w => w.onComplete())), + timeout, + `Query for key ${this.key} timed out in ${timeout}ms` + ) + } catch (err) { + // There was an error, so stop all the workers + for (const worker of workers) { + worker.stop() } + this._log(err.message) + throw err + } - if (run.errors.length === run.peersSeen.size) { - return callback(run.errors[0]) - } + this._log('query:done') - run.res = { - finalSet: run.peersSeen, - paths: [] - } + if (run.errors.length === run.peersSeen.size) { + throw run.errors[0] + } - run.paths.forEach((path) => { - if (path.res && path.res.success) { - run.res.paths.push(path.res) - } - }) + run.res = { + finalSet: run.peersSeen, + paths: [] + } - callback(null, run.res) + run.paths.forEach((path) => { + if (path.res && path.res.success) { + run.res.paths.push(path.res) + } }) + + return run.res } } /** - * Use the queue from async to keep `concurrency` amount items running + * Use the queue to keep `concurrency` amount items running * per path. * * @param {Query} query * @param {Object} path - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * @private */ -function workerQueue (query, path, callback) { - let killed = false - const q = queue((next, cb) => { +function workerQueue (query, path) { + const processPeer = async (queue, peer) => { query._log('queue:work') - execQuery(next, query, path, (err, done) => { - // Ignore after kill - if (killed) { - return cb() - } - query._log('queue:work:done', err, done) - if (err) { - return cb(err) - } - if (done) { - q.kill() - killed = true - return callback() - } - cb() - }) - }, query.concurrency) - const fill = () => { - query._log('queue:fill') - while (q.length() < query.concurrency && - path.peersToQuery.length > 0) { - q.push(path.peersToQuery.dequeue()) + let done, err + try { + done = await execQuery(peer, query, path) + } catch (e) { + query._log.error('queue', e) + err = e } - } - fill() + // Ignore tasks that finish after we're already done + if (!queue.running) { + return true + } - // callback handling - q.error = (err) => { - query._log.error('queue', err) - callback(err) - } + query._log('queue:work:done', err, done) - q.drain = () => { - query._log('queue:drain') - callback() - } + if (err) { + throw err + } - q.unsaturated = () => { - query._log('queue:unsatured') - fill() + return done } - q.buffer = 0 + return new WorkerQueue(path.peersToQuery, processPeer, { + concurrency: query.concurrency + }) } /** @@ -187,32 +180,32 @@ function workerQueue (query, path, callback) { * @param {PeerId} next * @param {Query} query * @param {Object} path - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * @private */ -function execQuery (next, query, path, callback) { - path.query(next, (err, res) => { - if (err) { - path.run.errors.push(err) - callback() - } else if (res.success) { - path.res = res - callback(null, true) - } else if (res.closerPeers && res.closerPeers.length > 0) { - each(res.closerPeers, (closer, cb) => { - // don't add ourselves - if (query.dht._isSelf(closer.id)) { - return cb() - } - closer = query.dht.peerBook.put(closer) - query.dht._peerDiscovered(closer) - addPeerToQuery(closer.id, query.dht, path, cb) - }, callback) - } else { - callback() - } - }) +async function execQuery (next, query, path) { + let res + try { + res = await path.query(next) + } catch (err) { + path.run.errors.push(err) + return + } + if (res.success) { + path.res = res + return true + } + if (res.closerPeers && res.closerPeers.length > 0) { + await Promise.all(res.closerPeers.map((closer) => { + // don't add ourselves + if (query.dht._isSelf(closer.id)) { + return + } + closer = query.dht.peerBook.put(closer) + query.dht._peerDiscovered(closer) + return addPeerToQuery(closer.id, query.dht, path) + })) + } } /** @@ -221,22 +214,21 @@ function execQuery (next, query, path, callback) { * @param {PeerId} next * @param {DHT} dht * @param {Object} path - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * @private */ -function addPeerToQuery (next, dht, path, callback) { +function addPeerToQuery (next, dht, path) { const run = path.run if (dht._isSelf(next)) { - return callback() + return } if (run.peersSeen.has(next)) { - return callback() + return } run.peersSeen.add(next) - path.peersToQuery.enqueue(next, callback) + return path.peersToQuery.enqueue(next) } module.exports = Query diff --git a/src/random-walk.js b/src/random-walk.js index 9a91f695..bee0a031 100644 --- a/src/random-walk.js +++ b/src/random-walk.js @@ -1,13 +1,11 @@ 'use strict' -const times = require('async/times') const crypto = require('libp2p-crypto') -const waterfall = require('async/waterfall') -const timeout = require('async/timeout') const multihashing = require('multihashing-async') const PeerId = require('peer-id') const assert = require('assert') const c = require('./constants') +const utils = require('./utils') const errcode = require('err-code') @@ -26,66 +24,64 @@ class RandomWalk { * @param {number} [queries=1] - how many queries to run per period * @param {number} [period=300000] - how often to run the the random-walk process, in milliseconds (5min) * @param {number} [timeout=10000] - how long to wait for the the random-walk query to run, in milliseconds (10s) - * @returns {void} + * @returns {undefined} */ start (queries, period, timeout) { if (queries == null) { queries = 1 } if (period == null) { period = 5 * c.minute } if (timeout == null) { timeout = 10 * c.second } // Don't run twice - if (this._running) { return } + if (this._runningHandle) { return } // Create running handle const runningHandle = { _onCancel: null, _timeoutId: null, - runPeriodically: (fn, period) => { - runningHandle._timeoutId = setTimeout(() => { + runPeriodically: () => { + runningHandle._timeoutId = setTimeout(async () => { runningHandle._timeoutId = null - fn((nextPeriod) => { - // Was walk cancelled while fn was being called? - if (runningHandle._onCancel) { - return runningHandle._onCancel() - } - // Schedule next - runningHandle.runPeriodically(fn, nextPeriod) - }) + await this._walk(queries, timeout) + + // Was walk cancelled while fn was being called? + if (runningHandle._onCancel) { + return runningHandle._onCancel() + } + + // Schedule next + runningHandle.runPeriodically() }, period) }, - cancel: (cb) => { - // Not currently running, can callback immediately + cancel: () => { + // Not currently running, can return immediately if (runningHandle._timeoutId) { clearTimeout(runningHandle._timeoutId) - return cb() + return } // Wait to finish and then call callback - runningHandle._onCancel = cb + return new Promise((resolve) => { + runningHandle._onCancel = resolve + }) } } // Start runner - runningHandle.runPeriodically((done) => { - this._walk(queries, timeout, () => done(period)) - }, period) + runningHandle.runPeriodically() this._runningHandle = runningHandle } /** * Stop the random-walk process. - * @param {function(Error)} callback * - * @returns {void} + * @returns {Promise} */ - stop (callback) { + stop () { const runningHandle = this._runningHandle - if (!runningHandle) { - return callback() + if (runningHandle) { + this._runningHandle = null + return runningHandle.cancel() } - - this._runningHandle = null - runningHandle.cancel(callback) } /** @@ -93,74 +89,67 @@ class RandomWalk { * * @param {number} queries * @param {number} walkTimeout - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _walk (queries, walkTimeout, callback) { + async _walk (queries, walkTimeout) { this._kadDHT._log('random-walk:start') - times(queries, (i, cb) => { - waterfall([ - (cb) => this._randomPeerId(cb), - (id, cb) => timeout((cb) => { - this._query(id, cb) - }, walkTimeout)(cb) - ], (err) => { - if (err) { - this._kadDHT._log.error('random-walk:error', err) - return callback(err) - } - - this._kadDHT._log('random-walk:done') - callback(null) - }) - }) + for (let i = 0; i < queries && this._runningHandle; i++) { + try { + const id = await this._randomPeerId() + await utils.promiseTimeout( + this._query(id), + walkTimeout, + `Random walk for id ${id} timed out in ${walkTimeout}ms` + ) + } catch (err) { + this._kadDHT._log.error('random-walk:error', err) + throw err + } + this._kadDHT._log('random-walk:done') + } } /** * The query run during a random walk request. * * @param {PeerId} id - * @param {function(Error)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _query (id, callback) { + async _query (id) { this._kadDHT._log('random-walk:query:%s', id.toB58String()) - this._kadDHT.findPeer(id, (err, peer) => { + let peer + try { + peer = await this._kadDHT.findPeer(id) + } catch (err) { + // expected case, we asked for random stuff after all if (err.code === 'ERR_NOT_FOUND') { - // expected case, we asked for random stuff after all - return callback() - } - if (err) { - return callback(err) + return } - this._kadDHT._log('random-walk:query:found', err, peer) + throw err + } + + this._kadDHT._log('random-walk:query:found', null, peer) - // wait what, there was something found? Lucky day! - callback(errcode(new Error(`random-walk: ACTUALLY FOUND PEER: ${peer}, ${id.toB58String()}`), 'ERR_FOUND_RANDOM_PEER')) - }) + // wait what, there was something found? Lucky day! + throw errcode(`random-walk: ACTUALLY FOUND PEER: ${peer}, ${id.toB58String()}`, 'ERR_FOUND_RANDOM_PEER') } /** * Generate a random peer id for random-walk purposes. * - * @param {function(Error, PeerId)} callback - * @returns {void} + * @returns {Promise} * * @private */ - _randomPeerId (callback) { - multihashing(crypto.randomBytes(16), 'sha2-256', (err, digest) => { - if (err) { - return callback(err) - } - callback(null, new PeerId(digest)) - }) + async _randomPeerId () { + const digest = await multihashing(crypto.randomBytes(16), 'sha2-256') + return new PeerId(digest) } } diff --git a/src/routing.js b/src/routing.js index 38e656e2..0f6b9a86 100644 --- a/src/routing.js +++ b/src/routing.js @@ -17,11 +17,7 @@ class RoutingTable { this.self = self this._onPing = this._onPing.bind(this) - utils.convertPeerId(self, (err, selfKey) => { - if (err) { - throw err - } - + utils.convertPeerId(self).then((selfKey) => { this.kb = new KBucket({ localNodeId: selfKey, numberOfNodesPerKBucket: kBucketSize, @@ -72,22 +68,13 @@ class RoutingTable { * Find a specific peer by id. * * @param {PeerId} peer - * @param {function(Error, PeerId)} callback - * @returns {void} + * @returns {Promise} */ - find (peer, callback) { - utils.convertPeerId(peer, (err, key) => { - if (err) { - return callback(err) - } - const closest = this.closestPeer(key) - - if (closest && closest.isEqual(peer)) { - return callback(null, closest) - } - - callback() - }) + async find (peer) { + const key = await utils.convertPeerId(peer) + const closest = this.closestPeer(key) + + return closest && closest.isEqual(peer) ? closest : undefined } /** @@ -119,34 +106,22 @@ class RoutingTable { * Add or update the routing table with the given peer. * * @param {PeerId} peer - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} */ - add (peer, callback) { - utils.convertPeerId(peer, (err, id) => { - if (err) { - return callback(err) - } - this.kb.add({ id: id, peer: peer }) - callback() - }) + async add (peer) { + const id = await utils.convertPeerId(peer) + this.kb.add({ id: id, peer: peer }) } /** * Remove a given peer from the table. * * @param {PeerId} peer - * @param {function(Error)} callback - * @returns {undefined} + * @returns {Promise} */ - remove (peer, callback) { - utils.convertPeerId(peer, (err, id) => { - if (err) { - return callback(err) - } - this.kb.remove(id) - callback() - }) + async remove (peer) { + const id = await utils.convertPeerId(peer) + this.kb.remove(id) } } diff --git a/src/rpc/handlers/add-provider.js b/src/rpc/handlers/add-provider.js index e80d2d70..6fdd128f 100644 --- a/src/rpc/handlers/add-provider.js +++ b/src/rpc/handlers/add-provider.js @@ -11,14 +11,13 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Promise} */ - return function addProvider (peer, msg, callback) { + return function addProvider (peer, msg) { log('start') if (!msg.key || msg.key.length === 0) { - return callback(errcode(new Error('Missing key'), 'ERR_MISSING_KEY')) + throw errcode('Missing key', 'ERR_MISSING_KEY') } let cid @@ -27,7 +26,7 @@ module.exports = (dht) => { } catch (err) { const errMsg = `Invalid CID: ${err.message}` - return callback(errcode(new Error(errMsg), 'ERR_INVALID_CID')) + throw errcode(errMsg, 'ERR_INVALID_CID') } msg.providerPeers.forEach((pi) => { @@ -49,6 +48,6 @@ module.exports = (dht) => { } }) - dht.providers.addProvider(cid, peer.id, callback) + return dht.providers.addProvider(cid, peer.id) } } diff --git a/src/rpc/handlers/find-node.js b/src/rpc/handlers/find-node.js index af2e286d..af0102bc 100644 --- a/src/rpc/handlers/find-node.js +++ b/src/rpc/handlers/find-node.js @@ -1,7 +1,5 @@ 'use strict' -const waterfall = require('async/waterfall') - const Message = require('../../message') const utils = require('../../utils') @@ -13,31 +11,26 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Promise} */ - return function findNode (peer, msg, callback) { + return async function findNode (peer, msg) { log('start') - waterfall([ - (cb) => { - if (msg.key.equals(dht.peerInfo.id.id)) { - return cb(null, [dht.peerInfo]) - } + let closer + if (msg.key.equals(dht.peerInfo.id.id)) { + closer = [dht.peerInfo] + } else { + closer = await dht._betterPeersToQuery(msg, peer) + } - dht._betterPeersToQuery(msg, peer, cb) - }, - (closer, cb) => { - const response = new Message(msg.type, Buffer.alloc(0), msg.clusterLevel) + const response = new Message(msg.type, Buffer.alloc(0), msg.clusterLevel) - if (closer.length > 0) { - response.closerPeers = closer - } else { - log('handle FindNode %s: could not find anything', peer.id.toB58String()) - } + if (closer.length > 0) { + response.closerPeers = closer + } else { + log('handle FindNode %s: could not find anything', peer.id.toB58String()) + } - cb(null, response) - } - ], callback) + return response } } diff --git a/src/rpc/handlers/get-providers.js b/src/rpc/handlers/get-providers.js index 4d067c87..429ca0be 100644 --- a/src/rpc/handlers/get-providers.js +++ b/src/rpc/handlers/get-providers.js @@ -1,7 +1,6 @@ 'use strict' const CID = require('cids') -const parallel = require('async/parallel') const PeerInfo = require('peer-info') const errcode = require('err-code') @@ -17,63 +16,55 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Promise} */ - return function getProviders (peer, msg, callback) { + return async function getProviders (peer, msg) { let cid try { cid = new CID(msg.key) } catch (err) { - return callback(errcode(new Error(`Invalid CID: ${err.message}`), 'ERR_INVALID_CID')) + throw errcode(`Invalid CID: ${err.message}`, 'ERR_INVALID_CID') } log('%s', cid.toBaseEncodedString()) const dsKey = utils.bufferToKey(cid.buffer) - parallel([ - (cb) => dht.datastore.has(dsKey, (err, exists) => { - if (err) { - log.error('Failed to check datastore existence', err) - return cb(null, false) - } - - cb(null, exists) + const res = await Promise.all([ + dht.datastore.has(dsKey).catch((err) => { + log.error('Failed to check datastore existence', err) + return false }), - (cb) => dht.providers.getProviders(cid, cb), - (cb) => dht._betterPeersToQuery(msg, peer, cb) - ], (err, res) => { - if (err) { - return callback(err) - } - const has = res[0] - const closer = res[2] - const providers = res[1].map((p) => { - if (dht.peerBook.has(p)) { - return dht.peerBook.get(p) - } - - return dht.peerBook.put(new PeerInfo(p)) - }) - - if (has) { - providers.push(dht.peerInfo) + dht.providers.getProviders(cid), + dht._betterPeersToQuery(msg, peer) + ]) + + const has = res[0] + const closer = res[2] + const providers = res[1].map((p) => { + if (dht.peerBook.has(p)) { + return dht.peerBook.get(p) } - const response = new Message(msg.type, msg.key, msg.clusterLevel) + return dht.peerBook.put(new PeerInfo(p)) + }) - if (providers.length > 0) { - response.providerPeers = providers - } + if (has) { + providers.push(dht.peerInfo) + } - if (closer.length > 0) { - response.closerPeers = closer - } + const response = new Message(msg.type, msg.key, msg.clusterLevel) - log('got %s providers %s closerPeers', providers.length, closer.length) + if (providers.length > 0) { + response.providerPeers = providers + } - callback(null, response) - }) + if (closer.length > 0) { + response.closerPeers = closer + } + + log('got %s providers %s closerPeers', providers.length, closer.length) + + return response } } diff --git a/src/rpc/handlers/get-value.js b/src/rpc/handlers/get-value.js index c87ff2ca..bafb4352 100644 --- a/src/rpc/handlers/get-value.js +++ b/src/rpc/handlers/get-value.js @@ -1,6 +1,5 @@ 'use strict' -const parallel = require('async/parallel') const Record = require('libp2p-record').Record const errcode = require('err-code') @@ -16,16 +15,15 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Promise} */ - return function getValue (peer, msg, callback) { + return async function getValue (peer, msg) { const key = msg.key log('key: %b', key) if (!key || key.length === 0) { - return callback(errcode(new Error('Invalid key'), 'ERR_INVALID_KEY')) + throw errcode('Invalid key', 'ERR_INVALID_KEY') } const response = new Message(Message.TYPES.GET_VALUE, key, msg.clusterLevel) @@ -44,32 +42,25 @@ module.exports = (dht) => { if (info && info.id.pubKey) { log('returning found public key') response.record = new Record(key, info.id.pubKey.bytes) - return callback(null, response) + return response } } - parallel([ - (cb) => dht._checkLocalDatastore(key, cb), - (cb) => dht._betterPeersToQuery(msg, peer, cb) - ], (err, res) => { - if (err) { - return callback(err) - } - - const record = res[0] - const closer = res[1] + const [record, closer] = await Promise.all([ + dht._checkLocalDatastore(key), + dht._betterPeersToQuery(msg, peer) + ]) - if (record) { - log('got record') - response.record = record - } + if (record) { + log('got record') + response.record = record + } - if (closer.length > 0) { - log('got closer %s', closer.length) - response.closerPeers = closer - } + if (closer.length > 0) { + log('got closer %s', closer.length) + response.closerPeers = closer + } - callback(null, response) - }) + return response } } diff --git a/src/rpc/handlers/index.js b/src/rpc/handlers/index.js index 5c0d909c..97f48bc8 100644 --- a/src/rpc/handlers/index.js +++ b/src/rpc/handlers/index.js @@ -17,7 +17,7 @@ module.exports = (dht) => { * * @param {number} type * - * @returns {function(PeerInfo, Message, function(Error, Message))} + * @returns {function(PeerInfo, Message)} * * @private */ diff --git a/src/rpc/handlers/ping.js b/src/rpc/handlers/ping.js index a3430393..dfb3c02b 100644 --- a/src/rpc/handlers/ping.js +++ b/src/rpc/handlers/ping.js @@ -10,11 +10,10 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Message} */ - return function ping (peer, msg, callback) { + return function ping (peer, msg) { log('from %s', peer.id.toB58String()) - callback(null, msg) + return msg } } diff --git a/src/rpc/handlers/put-value.js b/src/rpc/handlers/put-value.js index 46d0a742..015e848c 100644 --- a/src/rpc/handlers/put-value.js +++ b/src/rpc/handlers/put-value.js @@ -11,10 +11,9 @@ module.exports = (dht) => { * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {undefined} + * @returns {Promise} */ - return function putValue (peer, msg, callback) { + return async function putValue (peer, msg) { const key = msg.key log('key: %b', key) @@ -24,26 +23,22 @@ module.exports = (dht) => { const errMsg = `Empty record from: ${peer.id.toB58String()}` log.error(errMsg) - return callback(errcode(new Error(errMsg), 'ERR_EMPTY_RECORD')) + throw errcode(errMsg, 'ERR_EMPTY_RECORD') } - dht._verifyRecordLocally(record, (err) => { - if (err) { - log.error(err.message) - return callback(err) - } + try { + await dht._verifyRecordLocally(record) + } catch (err) { + log.error(err.message) + throw err + } - record.timeReceived = new Date() + record.timeReceived = new Date() - const key = utils.bufferToKey(record.key) + const k = utils.bufferToKey(record.key) - dht.datastore.put(key, record.serialize(), (err) => { - if (err) { - return callback(err) - } + await dht.datastore.put(k, record.serialize()) - callback(null, msg) - }) - }) + return msg } } diff --git a/src/rpc/index.js b/src/rpc/index.js index 1c5d43eb..b7717094 100644 --- a/src/rpc/index.js +++ b/src/rpc/index.js @@ -12,34 +12,34 @@ module.exports = (dht) => { const log = utils.logger(dht.peerInfo.id, 'rpc') const getMessageHandler = handlers(dht) + /** * Process incoming DHT messages. * * @param {PeerInfo} peer * @param {Message} msg - * @param {function(Error, Message)} callback - * @returns {void} + * @returns {Promise} * * @private */ - function handleMessage (peer, msg, callback) { + async function handleMessage (peer, msg) { // update the peer - dht._add(peer, (err) => { - if (err) { - log.error('Failed to update the kbucket store') - log.error(err) - } + try { + await dht._add(peer) + } catch (err) { + log.error('Failed to update the kbucket store') + log.error(err) + } - // get handler & exectue it - const handler = getMessageHandler(msg.type) + // get handler & execute it + const handler = getMessageHandler(msg.type) - if (!handler) { - log.error(`no handler found for message type: ${msg.type}`) - return callback() - } + if (!handler) { + log.error(`no handler found for message type: ${msg.type}`) + return + } - handler(peer, msg, callback) - }) + return handler(peer, msg) } /** @@ -75,7 +75,7 @@ module.exports = (dht) => { return msg }), pull.filter(Boolean), - pull.asyncMap((msg, cb) => handleMessage(peer, msg, cb)), + pull.asyncMap((msg, cb) => handleMessage(peer, msg).then(res => cb(null, res)).catch(cb)), // Not all handlers will return a response pull.filter(Boolean), pull.map((response) => { diff --git a/src/utils.js b/src/utils.js index 900cb139..1948bdc4 100644 --- a/src/utils.js +++ b/src/utils.js @@ -6,31 +6,28 @@ const mh = require('multihashes') const Key = require('interface-datastore').Key const base32 = require('base32.js') const distance = require('xor-distance') -const map = require('async/map') const Record = require('libp2p-record').Record -const setImmediate = require('async/setImmediate') const PeerId = require('peer-id') +const errcode = require('err-code') /** * Creates a DHT ID by hashing a given buffer. * * @param {Buffer} buf - * @param {function(Error, Buffer)} callback - * @returns {void} + * @returns {Promise} */ -exports.convertBuffer = (buf, callback) => { - multihashing.digest(buf, 'sha2-256', callback) +exports.convertBuffer = (buf) => { + return multihashing.digest(buf, 'sha2-256') } /** * Creates a DHT ID by hashing a Peer ID * * @param {PeerId} peer - * @param {function(Error, Buffer)} callback - * @returns {void} + * @returns {Promise} */ -exports.convertPeerId = (peer, callback) => { - multihashing.digest(peer.id, 'sha2-256', callback) +exports.convertPeerId = (peer) => { + return multihashing.digest(peer.id, 'sha2-256') } /** @@ -98,28 +95,17 @@ exports.decodeBase32 = (raw) => { * * @param {Array} peers * @param {Buffer} target - * @param {function(Error, )} callback - * @returns {void} + * @returns {Promise>} */ -exports.sortClosestPeers = (peers, target, callback) => { - map(peers, (peer, cb) => { - exports.convertPeerId(peer, (err, id) => { - if (err) { - return cb(err) - } - - cb(null, { - peer: peer, - distance: distance(id, target) - }) - }) - }, (err, distances) => { - if (err) { - return callback(err) +exports.sortClosestPeers = async (peers, target) => { + const distances = await Promise.all(peers.map(async (peer) => { + const id = await exports.convertPeerId(peer) + return { + peer: peer, + distance: distance(id, target) } - - callback(null, distances.sort(exports.xorCompare).map((d) => d.peer)) - }) + })) + return distances.sort(exports.xorCompare).map((d) => d.peer) } /** @@ -150,16 +136,12 @@ exports.pathSize = (resultsWanted, numPaths) => { * * @param {Buffer} key * @param {Buffer} value - * @param {function(Error, Buffer)} callback - * @returns {void} + * @returns {Buffer} */ -exports.createPutRecord = (key, value, callback) => { +exports.createPutRecord = (key, value) => { const timeReceived = new Date() const rec = new Record(key, value, timeReceived) - - setImmediate(() => { - callback(null, rec.serialize()) - }) + return rec.serialize() } /** @@ -190,3 +172,52 @@ exports.logger = (id, subsystem) => { return logger } + +/** + * Creates a Promise with a timeout + * + * @param {Promise} promise + * @param {number} timeout - timeout in ms. If undefined, there is no timeout. + * @param {number} [errMsg] - error message + * @returns {Promise} promise with a timeout + * + * @private + */ +exports.promiseTimeout = (promise, timeout, errMsg) => { + if (!timeout) { + return promise + } + return Promise.race([ + promise, + new Promise((resolve, reject) => setTimeout(() => { + reject(errcode(errMsg || 'Promise timed out', 'ETIMEDOUT')) + }, timeout)) + ]) +} + +/** + * Periodically retries the function call until it succeeds or the number of + * attempts exceeds times (in which case the Promise is rejected) + * + * @param {Object} options + * @param {number} options.times - maximum number of attempts to make + * @param {number} options.interval - interval between attempts in ms + * @param {function} fn - function to attempt to call + * @returns {Promise} + * + * @private + */ +exports.retry = async (options, fn) => { + for (let i = 0; i < options.times + 1; i++) { + try { + await fn() + return + } catch (err) { + if (i === options.times) { + throw err + } + } + + await new Promise((resolve) => setTimeout(resolve, options.interval)) + } +} diff --git a/src/worker-queue.js b/src/worker-queue.js new file mode 100644 index 00000000..9690f74c --- /dev/null +++ b/src/worker-queue.js @@ -0,0 +1,118 @@ +'use strict' + +const Queue = require('p-queue') +const EventEmitter = require('events') +const debug = require('debug') + +const log = debug('libp2p:dht:worker-queue') + +/** + * WorkerQueue is a queue that executes a set of tasks. + */ +class WorkerQueue extends EventEmitter { + /** + * Queue of items that can be dequeued. + * + * @typedef {SourceQueue} Object + * @param {function} dequeue - the next item to be processed + * @param {number} length - the length of the queue + */ + + /** + * Processes a task. If the work is complete, this function should return + * true (or Promise), in which case no more tasks will be processed. + * + * @typedef {TaskProcessor} function + * @param {WorkerQueue} queue + * @param {Object} task - task to be processed + * @returns {Promise} true if the work is complete + */ + + /** + * Create with a given source queue and task processor. + * + * @param {SourceQueue} source + * @param {TaskProcessor} processTask - processes a task in the queue + * @param {Object} options - options + * @param {number} options.concurrency - concurrency of processing (default: 1) + */ + constructor (source, processTask, options = {}) { + super() + this.source = source + this.processTask = processTask + this.concurrency = options.concurrency || 1 + this.q = new Queue({ concurrency: this.concurrency }) + + this.running = true + this._hydrate() + } + + /** + * Stop the queue and wait for it to finish processing + * + * @returns {Promise} + */ + stop () { + this.q.clear() + this.running = false + const onComplete = this.onComplete() + this.emit('stopped') + return onComplete + } + + /** + * Wait for the queue to complete + * + * @returns {Promise} + */ + async onComplete () { + this._onCompletePromise = this._onCompletePromise || new Promise((resolve, reject) => { + this.once('stopped', resolve) + this.once('err', reject) + }) + return this._onCompletePromise.then(() => log('drain')) + } + + _hydrate () { + log('hydrate (%d in progress, %d waiting)', this.q.pending, this.q.size) + // Note: q.pending is the number of tasks that are currently running, so we + // want to keep adding items until this equals the concurrency limit + while (this.q.pending < this.concurrency && this.source.length > 0) { + const peer = this.source.dequeue() + + // Add tasks to the queue without waiting for them to finish + this._addTask(peer) + } + log('hydrated (%d in progress, %d waiting)', this.q.pending, this.q.size) + } + + async _addTask (task) { + const taskPromise = this.q.add(() => this.processTask(this, task)) + + // Wait for the task to finish + let done + try { + done = await taskPromise + } catch (err) { + this.emit('err', err) + return + } + + // If the task indicates that we're done, bail out + if (done) { + this.stop() + return + } + + // Hydrate the queue with more tasks + log('unsaturated (%d in progress, %d waiting)', this.q.pending, this.q.size) + this._hydrate() + + // If no more tasks were found then we're done + if (this.q.pending === 0) { + this.stop() + } + } +} + +module.exports = WorkerQueue diff --git a/test/kad-dht.spec.js b/test/kad-dht.spec.js index dfb6949f..9cd37c8a 100644 --- a/test/kad-dht.spec.js +++ b/test/kad-dht.spec.js @@ -6,13 +6,6 @@ chai.use(require('dirty-chai')) chai.use(require('chai-checkmark')) const expect = chai.expect const sinon = require('sinon') -const series = require('async/series') -const times = require('async/times') -const parallel = require('async/parallel') -const timeout = require('async/timeout') -const retry = require('async/retry') -const each = require('async/each') -const waterfall = require('async/waterfall') const random = require('lodash.random') const Record = require('libp2p-record').Record const PeerId = require('peer-id') @@ -33,74 +26,72 @@ const createValues = require('./utils/create-values') const TestDHT = require('./utils/test-dht') // connect two dhts -function connectNoSync (a, b, callback) { +function connectNoSync (a, b) { const publicPeerId = new PeerId(b.peerInfo.id.id, null, b.peerInfo.id.pubKey) const target = new PeerInfo(publicPeerId) target.multiaddrs = b.peerInfo.multiaddrs - a.switch.dial(target, callback) + return new Promise(resolve => a.switch.dial(target, resolve)) } -function find (a, b, cb) { - retry({ times: 50, interval: 100 }, (cb) => { - a.routingTable.find(b.peerInfo.id, (err, match) => { - if (err) { - return cb(err) - } - if (!match) { - return cb(new Error('not found')) - } +function find (a, b) { + return kadUtils.retry({ times: 50, interval: 100 }, async () => { + const match = await a.routingTable.find(b.peerInfo.id) + if (!match) { + throw new Error('not found') + } - try { - expect(a.peerBook.get(b.peerInfo).multiaddrs.toArray()[0].toString()) - .to.eql(b.peerInfo.multiaddrs.toArray()[0].toString()) - } catch (err) { - return cb(err) - } + expect(a.peerBook.get(b.peerInfo).multiaddrs.toArray()[0].toString()) + .to.eql(b.peerInfo.multiaddrs.toArray()[0].toString()) + }) +} - cb() - }) - }, cb) +function promiseMap (n, fn) { + return Promise.all([...new Array(n)].map((a, i) => fn(i))) +} + +async function withDHTs (n, fn) { + const tdht = new TestDHT() + const dhts = await tdht.spawn(n) + await fn(dhts) + await tdht.teardown() } // connect two dhts and wait for them to have each other // in their routing table -function connect (a, b, callback) { - series([ - (cb) => connectNoSync(a, b, cb), - (cb) => find(a, b, cb), - (cb) => find(b, a, cb) - ], (err) => callback(err)) +async function connect (a, b) { + await connectNoSync(a, b) + await find(a, b) + await find(b, a) } function bootstrap (dhts) { - dhts.forEach((dht) => { - // dht.randomWalk._walk(3, 10000, () => {}) // don't need to know when it finishes + for (const dht of dhts) { dht.randomWalk.start(1, 1000) // don't need to know when it finishes - }) + } } -function waitForWellFormedTables (dhts, minPeers, avgPeers, waitTimeout, callback) { - timeout((cb) => { - retry({ times: 50, interval: 200 }, (cb) => { - let totalPeers = 0 +function waitForWellFormedTables (dhts, minPeers, avgPeers, waitTimeout) { + return kadUtils.promiseTimeout(kadUtils.retry({ times: 50, interval: 200 }, () => { + let totalPeers = 0 - const ready = dhts.map((dht) => { - const rtlen = dht.routingTable.size - totalPeers += rtlen - if (minPeers > 0 && rtlen < minPeers) { - return false - } - const actualAvgPeers = totalPeers / dhts.length - if (avgPeers > 0 && actualAvgPeers < avgPeers) { - return false - } - return true - }) + const ready = dhts.map((dht) => { + const rtlen = dht.routingTable.size + totalPeers += rtlen + if (minPeers > 0 && rtlen < minPeers) { + return false + } + const actualAvgPeers = totalPeers / dhts.length + if (avgPeers > 0 && actualAvgPeers < avgPeers) { + return false + } + return true + }) - const done = ready.every(Boolean) - cb(done ? null : new Error('not done yet')) - }, cb) - }, waitTimeout)(callback) + const done = ready.every(Boolean) + if (!done) { + throw new Error('not done yet') + } + }), waitTimeout) } function countDiffPeers (a, b) { @@ -114,18 +105,16 @@ describe('KadDHT', () => { let peerInfos let values - before(function (done) { + before(async function () { this.timeout(10 * 1000) - parallel([ - (cb) => createPeerInfo(3, cb), - (cb) => createValues(20, cb) - ], (err, res) => { - expect(err).to.not.exist() - peerInfos = res[0] - values = res[1] - done() - }) + const res = await Promise.all([ + createPeerInfo(3), + createValues(20) + ]) + + peerInfos = res[0] + values = res[1] }) it('create', () => { @@ -149,7 +138,7 @@ describe('KadDHT', () => { const dht = new KadDHT(sw, { validators: { ipns: { - func: (key, record, cb) => cb() + func: (key, record) => {} } }, selectors: { @@ -164,7 +153,7 @@ describe('KadDHT', () => { expect(dht.selectors).to.have.property('ipns') }) - it('should be able to start and stop', function (done) { + it('should be able to start and stop', async function () { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -177,25 +166,18 @@ describe('KadDHT', () => { sinon.spy(dht.network, 'stop') sinon.spy(dht.randomWalk, 'stop') - series([ - (cb) => dht.start(cb), - (cb) => { - expect(dht.network.start.calledOnce).to.equal(true) - expect(dht.randomWalk.start.calledOnce).to.equal(true) + await dht.start() - cb() - }, - (cb) => dht.stop(cb) - ], (err) => { - expect(err).to.not.exist() - expect(dht.network.stop.calledOnce).to.equal(true) - expect(dht.randomWalk.stop.calledOnce).to.equal(true) + expect(dht.network.start.calledOnce).to.equal(true) + expect(dht.randomWalk.start.calledOnce).to.equal(true) - done() - }) + await dht.stop() + + expect(dht.network.stop.calledOnce).to.equal(true) + expect(dht.randomWalk.stop.calledOnce).to.equal(true) }) - it('should be able to start with random-walk disabled', function (done) { + it('should be able to start with random-walk disabled', async function () { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -208,64 +190,52 @@ describe('KadDHT', () => { sinon.spy(dht.network, 'stop') sinon.spy(dht.randomWalk, 'stop') - series([ - (cb) => dht.start(cb), - (cb) => { - expect(dht.network.start.calledOnce).to.equal(true) - expect(dht.randomWalk.start.calledOnce).to.equal(false) + await dht.start() - cb() - }, - (cb) => dht.stop(cb) - ], (err) => { - expect(err).to.not.exist() - expect(dht.network.stop.calledOnce).to.equal(true) - expect(dht.randomWalk.stop.calledOnce).to.equal(true) // Should be always disabled, as it can be started using the instance + expect(dht.network.start.calledOnce).to.equal(true) + expect(dht.randomWalk.start.calledOnce).to.equal(false) - done() - }) + await dht.stop() + + expect(dht.network.stop.calledOnce).to.equal(true) + // Should always be disabled, as it can be started using the instance + expect(dht.randomWalk.stop.calledOnce).to.equal(true) }) - it('should fail to start when already started', function (done) { + it('should fail to start when already started', async function () { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) sw.connection.reuse() const dht = new KadDHT(sw, { enabledDiscovery: false }) - series([ - (cb) => dht.start(cb), - (cb) => dht.start(cb) - ], (err) => { - expect(err).to.exist() - done() - }) + await dht.start() + try { + await dht.start() + } catch (err) { + return + } + expect.fail('did not throw error') }) - it('should fail to stop when was not started', function (done) { + it('should fail to stop when was not started', async function () { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) sw.connection.reuse() const dht = new KadDHT(sw, { enabledDiscovery: false }) - series([ - (cb) => dht.stop(cb) - ], (err) => { - expect(err).to.exist() - done() - }) + try { + await dht.stop() + } catch (err) { + return + } + expect.fail('did not throw error') }) - it('should emit a peer event when a peer is connected', function (done) { + it('should emit a peer event when a peer is connected', function () { this.timeout(10 * 1000) - const tdht = new TestDHT() - - tdht.spawn(2, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - + return withDHTs(2, async ([dhtA, dhtB]) => { dhtA.on('peer', (peerInfo) => { expect(peerInfo).to.exist().mark() }) @@ -274,340 +244,231 @@ describe('KadDHT', () => { expect(peerInfo).to.exist().mark() }) - connect(dhtA, dhtB, (err) => { - expect(err).to.not.exist() - }) - }) + connect(dhtA, dhtB) - expect(2).checks(done) + return new Promise((resolve) => expect(2).checks(resolve)) + }) }) - it('put - get', function (done) { + it('put - get', function () { this.timeout(10 * 1000) - const tdht = new TestDHT() + return withDHTs(2, async ([dhtA, dhtB]) => { + await connect(dhtA, dhtB) - tdht.spawn(2, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.put(Buffer.from('/v/hello'), Buffer.from('world'), cb), - (cb) => dhtB.get(Buffer.from('/v/hello'), { timeout: 1000 }, cb), - (res, cb) => { - expect(res).to.eql(Buffer.from('world')) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + await dhtA.put(Buffer.from('/v/hello'), Buffer.from('world')) + + const res = await dhtB.get(Buffer.from('/v/hello'), { timeout: 1000 }) + + expect(res).to.eql(Buffer.from('world')) }) }) - it('put - get using key with no prefix (no selector available)', function (done) { + it('put - get using key with no prefix (no selector available)', function () { this.timeout(10 * 1000) - const tdht = new TestDHT() + return withDHTs(2, async ([dhtA, dhtB]) => { + await connect(dhtA, dhtB) - tdht.spawn(2, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.put(Buffer.from('hello'), Buffer.from('world'), cb), - (cb) => dhtB.get(Buffer.from('hello'), { timeout: 1000 }, cb), - (res, cb) => { - expect(res).to.eql(Buffer.from('world')) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + await dhtA.put(Buffer.from('hello'), Buffer.from('world')) + + const res = await dhtB.get(Buffer.from('hello'), { timeout: 1000 }) + + expect(res).to.eql(Buffer.from('world')) }) }) - it('put - get using key from provided validator and selector', function (done) { + it('put - get using key from provided validator and selector', async function () { this.timeout(10 * 1000) const tdht = new TestDHT() - tdht.spawn(2, { + const dhts = await tdht.spawn(2, { validators: { ipns: { - func: (key, record, cb) => cb() + func: (key, record) => {} } }, selectors: { ipns: (key, records) => 0 } - }, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.put(Buffer.from('/ipns/hello'), Buffer.from('world'), cb), - (cb) => dhtB.get(Buffer.from('/ipns/hello'), { timeout: 1000 }, cb), - (res, cb) => { - expect(res).to.eql(Buffer.from('world')) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) }) + + const dhtA = dhts[0] + const dhtB = dhts[1] + + await connect(dhtA, dhtB) + + await dhtA.put(Buffer.from('/ipns/hello'), Buffer.from('world')) + + const res = await dhtB.get(Buffer.from('/ipns/hello'), { timeout: 1000 }) + + expect(res).to.eql(Buffer.from('world')) + + await tdht.teardown() }) - it('put - get should fail if unrecognized key prefix in get', function (done) { + it('put - get should fail if unrecognized key prefix in get', function () { this.timeout(10 * 1000) - const tdht = new TestDHT() + return withDHTs(2, async ([dhtA, dhtB]) => { + await connect(dhtA, dhtB) - tdht.spawn(2, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.put(Buffer.from('/v2/hello'), Buffer.from('world'), cb), - (cb) => dhtB.get(Buffer.from('/v2/hello'), { timeout: 1000 }, cb) - ], (err) => { - expect(err).to.exist() + await dhtA.put(Buffer.from('/v2/hello'), Buffer.from('world')) + + try { + await dhtB.get(Buffer.from('/v2/hello'), { timeout: 1000 }) + } catch (err) { expect(err.code).to.eql('ERR_UNRECOGNIZED_KEY_PREFIX') - tdht.teardown(done) - }) + return + } + expect.fail('did not throw error') }) }) - it('put - get with update', function (done) { + it('put - get with update', function () { this.timeout(20 * 1000) - const tdht = new TestDHT() + return withDHTs(2, async ([dhtA, dhtB]) => { + const dhtASpy = sinon.spy(dhtA, '_putValueToPeer') - tdht.spawn(2, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] + await dhtA.put(Buffer.from('/v/hello'), Buffer.from('worldA')) + await dhtB.put(Buffer.from('/v/hello'), Buffer.from('worldB')) + await connect(dhtA, dhtB) - const dhtASpy = sinon.spy(dhtA, '_putValueToPeer') + const results = [] + results.push(await dhtA.get(Buffer.from('/v/hello'), { timeout: 1000 })) + results.push(await dhtB.get(Buffer.from('/v/hello'), { timeout: 1000 })) - series([ - (cb) => dhtA.put(Buffer.from('/v/hello'), Buffer.from('worldA'), cb), - (cb) => dhtB.put(Buffer.from('/v/hello'), Buffer.from('worldB'), cb), - (cb) => connect(dhtA, dhtB, cb) - ], (err) => { - expect(err).to.not.exist() - - series([ - (cb) => dhtA.get(Buffer.from('/v/hello'), { timeout: 1000 }, cb), - (cb) => dhtB.get(Buffer.from('/v/hello'), { timeout: 1000 }, cb) - ], (err, results) => { - expect(err).to.not.exist() - results.forEach((res) => { - expect(res).to.eql(Buffer.from('worldA')) // first is selected - }) - expect(dhtASpy.callCount).to.eql(1) - expect(dhtASpy.getCall(0).args[2].isEqual(dhtB.peerInfo.id)).to.eql(true) // inform B - tdht.teardown(done) - }) - }) + for (const res of results) { + expect(res).to.eql(Buffer.from('worldA')) // first is selected + } + + expect(dhtASpy.callCount).to.eql(1) + expect(dhtASpy.getCall(0).args[2].isEqual(dhtB.peerInfo.id)).to.eql(true) // inform B }) }) - it('provides', function (done) { + it('provides', function () { this.timeout(20 * 1000) - const tdht = new TestDHT() - - tdht.spawn(4, (err, dhts) => { - expect(err).to.not.exist() + return withDHTs(4, async (dhts) => { const addrs = dhts.map((d) => d.peerInfo.multiaddrs.toArray()[0]) const ids = dhts.map((d) => d.peerInfo.id) - series([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => connect(dhts[1], dhts[2], cb), - (cb) => connect(dhts[2], dhts[3], cb), - (cb) => each(values, (v, cb) => { - dhts[3].provide(v.cid, cb) - }, cb), - (cb) => { - let n = 0 - each(values, (v, cb) => { - n = (n + 1) % 3 - dhts[n].findProviders(v.cid, { timeout: 5000 }, (err, provs) => { - expect(err).to.not.exist() - expect(provs).to.have.length(1) - expect(provs[0].id.id).to.be.eql(ids[3].id) - expect( - provs[0].multiaddrs.toArray()[0].toString() - ).to.equal( - addrs[3].toString() - ) - cb() - }) - }, cb) - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + await connect(dhts[0], dhts[1]) + await connect(dhts[1], dhts[2]) + await connect(dhts[2], dhts[3]) + await Promise.all(values.map((v) => dhts[3].provide(v.cid))) + + let n = 0 + await Promise.all(values.map(async (v) => { + n = (n + 1) % 3 + const provs = await dhts[n].findProviders(v.cid, { timeout: 5000 }) + + expect(provs).to.have.length(1) + expect(provs[0].id.id).to.be.eql(ids[3].id) + expect( + provs[0].multiaddrs.toArray()[0].toString() + ).to.equal( + addrs[3].toString() + ) + })) }) }) - it('find providers', function (done) { + it('find providers', function () { this.timeout(20 * 1000) + return withDHTs(3, async (dhts) => { + const val = values[0] - const val = values[0] - const tdht = new TestDHT() + await connect(dhts[0], dhts[1]) + await connect(dhts[1], dhts[2]) + await Promise.all(dhts.map((dht) => dht.provide(val.cid))) - tdht.spawn(3, (err, dhts) => { - expect(err).to.not.exist() + const res1 = await dhts[0].findProviders(val.cid, {}) + const res2 = await dhts[0].findProviders(val.cid, { maxNumProviders: 2 }) - series([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => connect(dhts[1], dhts[2], cb), - (cb) => each(dhts, (dht, cb) => dht.provide(val.cid, cb), cb), - (cb) => dhts[0].findProviders(val.cid, {}, cb), - (cb) => dhts[0].findProviders(val.cid, { maxNumProviders: 2 }, cb) - ], (err, res) => { - expect(err).to.not.exist() + // find providers find all the 3 providers + expect(res1).to.exist() + expect(res1).to.have.length(3) - // find providers find all the 3 providers - expect(res[3]).to.exist() - expect(res[3]).to.have.length(3) - - // find providers limited to a maxium of 2 providers - expect(res[4]).to.exist() - expect(res[4]).to.have.length(2) - - done() - }) + // find providers limited to a maxium of 2 providers + expect(res2).to.exist() + expect(res2).to.have.length(2) }) }) - it('random-walk', function (done) { + it('random-walk', async function () { this.timeout(40 * 1000) const nDHTs = 20 const tdht = new TestDHT() // random walk disabled for a manual usage - tdht.spawn(nDHTs, { enabledDiscovery: false }, (err, dhts) => { - expect(err).to.not.exist() - - series([ - // ring connect - (cb) => times(nDHTs, (i, cb) => { - connect(dhts[i], dhts[(i + 1) % nDHTs], cb) - }, (err) => cb(err)), - (cb) => { - bootstrap(dhts) - waitForWellFormedTables(dhts, 7, 0, 20 * 1000, cb) - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + const dhts = await tdht.spawn(nDHTs, { enabledDiscovery: false }) + + // ring connect + await promiseMap(nDHTs, (i) => { + return connect(dhts[i], dhts[(i + 1) % nDHTs]) }) + + await bootstrap(dhts) + + await waitForWellFormedTables(dhts, 7, 0, 20 * 1000) + + await tdht.teardown() }) - it('layered get', function (done) { + it('layered get', function () { this.timeout(40 * 1000) - const nDHTs = 4 - const tdht = new TestDHT() + return withDHTs(4, async (dhts) => { + await connect(dhts[0], dhts[1]) + await connect(dhts[1], dhts[2]) + await connect(dhts[2], dhts[3]) - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - - waterfall([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => connect(dhts[1], dhts[2], cb), - (cb) => connect(dhts[2], dhts[3], cb), - (cb) => dhts[3].put( - Buffer.from('/v/hello'), - Buffer.from('world'), - cb - ), - (cb) => dhts[0].get(Buffer.from('/v/hello'), { timeout: 1000 }, cb), - (res, cb) => { - expect(res).to.eql(Buffer.from('world')) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + await dhts[3].put( + Buffer.from('/v/hello'), + Buffer.from('world') + ) + + const res = await dhts[0].get(Buffer.from('/v/hello'), { timeout: 1000 }) + expect(res).to.eql(Buffer.from('world')) }) }) - it('findPeer', function (done) { + it('findPeer', async function () { this.timeout(40 * 1000) - const nDHTs = 4 - const tdht = new TestDHT() + return withDHTs(4, async (dhts) => { + const ids = dhts.map((d) => d.peerInfo.id) - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() + await connect(dhts[0], dhts[1]) + await connect(dhts[1], dhts[2]) + await connect(dhts[2], dhts[3]) - const ids = dhts.map((d) => d.peerInfo.id) + const res = await dhts[0].findPeer(ids[3], { timeout: 1000 }) - waterfall([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => connect(dhts[1], dhts[2], cb), - (cb) => connect(dhts[2], dhts[3], cb), - (cb) => dhts[0].findPeer(ids[3], { timeout: 1000 }, cb), - (res, cb) => { - expect(res.id.isEqual(ids[3])).to.eql(true) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + expect(res.id.isEqual(ids[3])).to.eql(true) }) }) - it('connect by id to with address in the peerbook ', function (done) { + it('connect by id to peer with address in the peerbook ', function () { this.timeout(20 * 1000) - const nDHTs = 2 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - const dhtA = dhts[0] - const dhtB = dhts[1] - + return withDHTs(3, async ([dhtA, dhtB]) => { const peerA = dhtA.peerInfo const peerB = dhtB.peerInfo dhtA.peerBook.put(peerB) dhtB.peerBook.put(peerA) - parallel([ - (cb) => dhtA.switch.dial(peerB.id, cb), - (cb) => dhtB.switch.dial(peerA.id, cb) - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + return Promise.all([ + new Promise((resolve) => dhtA.switch.dial(peerB.id, resolve)), + new Promise((resolve) => dhtB.switch.dial(peerA.id, resolve)) + ]) }) }) - it('find peer query', function (done) { + it('find peer query', function () { this.timeout(40 * 1000) - const nDHTs = 101 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - + return withDHTs(101, async (dhts) => { const ids = dhts.map((d) => d.peerInfo.id) const guy = dhts[0] @@ -615,139 +476,97 @@ describe('KadDHT', () => { const val = Buffer.from('foobar') const connected = {} // indexes in others that are reachable from guy - series([ - (cb) => times(20, (i, cb) => { - times(16, (j, cb) => { - const t = 20 + random(79) - connected[t] = true - connect(others[i], others[t], cb) - }, cb) - }, cb), - (cb) => times(20, (i, cb) => { - connected[i] = true - connect(guy, others[i], cb) - }, cb), - (cb) => kadUtils.convertBuffer(val, (err, rtval) => { - expect(err).to.not.exist() - const rtablePeers = guy.routingTable.closestPeers(rtval, c.ALPHA) - expect(rtablePeers).to.have.length(3) - - const netPeers = guy.peerBook.getAllArray().filter((p) => p.isConnected()) - expect(netPeers).to.have.length(20) - - const rtableSet = {} - rtablePeers.forEach((p) => { - rtableSet[p.toB58String()] = true - }) - - const connectedIds = ids.slice(1).filter((id, i) => connected[i]) - - series([ - (cb) => guy.getClosestPeers(val, cb), - (cb) => kadUtils.sortClosestPeers(connectedIds, rtval, cb) - ], (err, res) => { - expect(err).to.not.exist() - const out = res[0] - const actualClosest = res[1] - - expect(out.filter((p) => !rtableSet[p.toB58String()])) - .to.not.be.empty() - - expect(out).to.have.length(20) - const exp = actualClosest.slice(0, 20) - - kadUtils.sortClosestPeers(out, rtval, (err, got) => { - expect(err).to.not.exist() - expect(countDiffPeers(exp, got)).to.eql(0) - - cb() - }) - }) + await promiseMap(20, (i) => { + return promiseMap(16, (j) => { + const t = 20 + random(79) + connected[t] = true + return connect(others[i], others[t]) }) - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) }) + + await promiseMap(20, (i) => { + connected[i] = true + return connect(guy, others[i]) + }) + + const rtval = await kadUtils.convertBuffer(val) + + const rtablePeers = guy.routingTable.closestPeers(rtval, c.ALPHA) + expect(rtablePeers).to.have.length(3) + + const netPeers = guy.peerBook.getAllArray().filter((p) => p.isConnected()) + expect(netPeers).to.have.length(20) + + const rtableSet = {} + rtablePeers.forEach((p) => { + rtableSet[p.toB58String()] = true + }) + + const connectedIds = ids.slice(1).filter((id, i) => connected[i]) + + const out = await guy.getClosestPeers(val) + const actualClosest = await kadUtils.sortClosestPeers(connectedIds, rtval) + + expect(out.filter((p) => !rtableSet[p.toB58String()])) + .to.not.be.empty() + + expect(out).to.have.length(20) + const exp = actualClosest.slice(0, 20) + + const got = await kadUtils.sortClosestPeers(out, rtval) + + expect(countDiffPeers(exp, got)).to.eql(0) }) }) - it('getClosestPeers', function (done) { + it('getClosestPeers', function () { this.timeout(40 * 1000) - const nDHTs = 30 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - - // ring connect - series([ - (cb) => times(dhts.length, (i, cb) => { - connect(dhts[i], dhts[(i + 1) % dhts.length], cb) - }, cb), - (cb) => dhts[1].getClosestPeers(Buffer.from('foo'), cb) - ], (err, res) => { - expect(err).to.not.exist() - expect(res[1]).to.have.length(c.K) - tdht.teardown(done) + return withDHTs(30, async (dhts) => { + await promiseMap(dhts.length, (i) => { + return connect(dhts[i], dhts[(i + 1) % dhts.length]) }) + + const res = await dhts[1].getClosestPeers(Buffer.from('foo')) + + expect(res).to.have.length(c.K) }) }) describe('getPublicKey', () => { - it('already known', function (done) { + it('already known', function () { this.timeout(20 * 1000) - const nDHTs = 2 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - + return withDHTs(2, async (dhts) => { const ids = dhts.map((d) => d.peerInfo.id) dhts[0].peerBook.put(dhts[1].peerInfo) - dhts[0].getPublicKey(ids[1], (err, key) => { - expect(err).to.not.exist() - expect(key).to.eql(dhts[1].peerInfo.id.pubKey) - tdht.teardown(done) - }) + const key = await dhts[0].getPublicKey(ids[1]) + + expect(key).to.eql(dhts[1].peerInfo.id.pubKey) }) }) - it('connected node', function (done) { + it('connected node', function () { this.timeout(30 * 1000) - const nDHTs = 2 - const tdht = new TestDHT() + return withDHTs(2, async (dhts) => { + const ids = dhts.map((d) => d.peerInfo.id) - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() + await connect(dhts[0], dhts[1]) - const ids = dhts.map((d) => d.peerInfo.id) + // remove the pub key to be sure it is fetched + const p = dhts[0].peerBook.get(ids[1]) + p.id._pubKey = null + dhts[0].peerBook.put(p, true) + const key = await dhts[0].getPublicKey(ids[1]) - waterfall([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => { - // remove the pub key to be sure it is fetched - const p = dhts[0].peerBook.get(ids[1]) - p.id._pubKey = null - dhts[0].peerBook.put(p, true) - dhts[0].getPublicKey(ids[1], cb) - }, - (key, cb) => { - expect(key.equals(dhts[1].peerInfo.id.pubKey)).to.eql(true) - cb() - } - ], (err) => { - expect(err).to.not.exist() - tdht.teardown(done) - }) + expect(key.equals(dhts[1].peerInfo.id.pubKey)).to.eql(true) }) }) }) - it('_nearestPeersToQuery', (done) => { + it('_nearestPeersToQuery', async () => { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -755,17 +574,14 @@ describe('KadDHT', () => { const dht = new KadDHT(sw) dht.peerBook.put(peerInfos[1]) - series([ - (cb) => dht._add(peerInfos[1], cb), - (cb) => dht._nearestPeersToQuery({ key: 'hello' }, cb) - ], (err, res) => { - expect(err).to.not.exist() - expect(res[1]).to.be.eql([peerInfos[1]]) - done() - }) + + await dht._add(peerInfos[1]) + const res = await dht._nearestPeersToQuery({ key: 'hello' }) + + expect(res).to.be.eql([peerInfos[1]]) }) - it('_betterPeersToQuery', (done) => { + it('_betterPeersToQuery', async () => { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -775,19 +591,16 @@ describe('KadDHT', () => { dht.peerBook.put(peerInfos[1]) dht.peerBook.put(peerInfos[2]) - series([ - (cb) => dht._add(peerInfos[1], cb), - (cb) => dht._add(peerInfos[2], cb), - (cb) => dht._betterPeersToQuery({ key: 'hello' }, peerInfos[1], cb) - ], (err, res) => { - expect(err).to.not.exist() - expect(res[2]).to.be.eql([peerInfos[2]]) - done() - }) + await dht._add(peerInfos[1]) + await dht._add(peerInfos[2]) + + const res = await dht._betterPeersToQuery({ key: 'hello' }, peerInfos[1]) + + expect(res).to.be.eql([peerInfos[2]]) }) describe('_checkLocalDatastore', () => { - it('allow a peer record from store if recent', (done) => { + it('allow a peer record from store if recent', async () => { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -800,17 +613,14 @@ describe('KadDHT', () => { ) record.timeReceived = new Date() - waterfall([ - (cb) => dht._putLocal(record.key, record.serialize(), cb), - (cb) => dht._checkLocalDatastore(record.key, cb) - ], (err, rec) => { - expect(err).to.not.exist() - expect(rec).to.exist('Record should not have expired') - expect(rec.value.toString()).to.equal(record.value.toString()) - done() - }) + await dht._putLocal(record.key, record.serialize()) + const rec = await dht._checkLocalDatastore(record.key) + + expect(rec).to.exist('Record should not have expired') + expect(rec.value.toString()).to.equal(record.value.toString()) }) - it('delete entries received from peers that have expired', (done) => { + + it('delete entries received from peers that have expired', async () => { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -826,29 +636,26 @@ describe('KadDHT', () => { record.timeReceived = received - waterfall([ - (cb) => dht._putLocal(record.key, record.serialize(), cb), - (cb) => dht.datastore.get(kadUtils.bufferToKey(record.key), cb), - (lookup, cb) => { - expect(lookup).to.exist('Record should be in the local datastore') - cb() - }, - (cb) => dht._checkLocalDatastore(record.key, cb) - ], (err, rec) => { - expect(err).to.not.exist() - expect(rec).to.not.exist('Record should have expired') - - dht.datastore.get(kadUtils.bufferToKey(record.key), (err, lookup) => { - expect(err).to.exist('Should throw error for not existing') - expect(lookup).to.not.exist('Record should be removed from datastore') - done() - }) - }) + await dht._putLocal(record.key, record.serialize()) + const lookup = await dht.datastore.get(kadUtils.bufferToKey(record.key)) + + expect(lookup).to.exist('Record should be in the local datastore') + + const rec = await dht._checkLocalDatastore(record.key) + + expect(rec).to.not.exist('Record should have expired') + + try { + await dht.datastore.get(kadUtils.bufferToKey(record.key)) + } catch (err) { + return + } + expect.fail('Should throw error for not existing') }) }) describe('_verifyRecordLocally', () => { - it('valid record', (done) => { + it('valid record', async () => { const sw = new Switch(peerInfos[0], new PeerBook()) sw.transport.add('tcp', new TCP()) sw.connection.addStreamMuxer(Mplex) @@ -862,114 +669,94 @@ describe('KadDHT', () => { Buffer.from('world') ) - waterfall([ - (cb) => cb(null, record.serialize()), - (enc, cb) => dht._verifyRecordLocally(Record.deserialize(enc), cb) - ], done) + const enc = await record.serialize() + await dht._verifyRecordLocally(Record.deserialize(enc)) }) }) describe('errors', () => { - it('get many should fail if only has one peer', function (done) { + it('get many should fail if only has one peer', function () { this.timeout(20 * 1000) - const nDHTs = 1 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() - - dhts[0].getMany('/v/hello', 5, (err) => { - expect(err).to.exist() + return withDHTs(1, async (dhts) => { + try { + await dhts[0].getMany('/v/hello', 5) + } catch (err) { expect(err.code).to.be.eql('ERR_NO_PEERS_IN_ROUTING_TABLE') - tdht.teardown(done) - }) + return + } + expect.fail('did not throw error') }) }) - it('get should handle correctly an unexpected error', function (done) { + it('get should handle correctly an unexpected error', function () { this.timeout(20 * 1000) const errCode = 'ERR_INVALID_RECORD_FAKE' const error = errcode(new Error('fake error'), errCode) - const nDHTs = 2 - const tdht = new TestDHT() - - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() + return withDHTs(2, async ([dhtA, dhtB]) => { + const stub = sinon.stub(dhtA, '_getValueOrPeers').throws(error) - const dhtA = dhts[0] - const dhtB = dhts[1] - const stub = sinon.stub(dhtA, '_getValueOrPeers').callsArgWithAsync(2, error) + await connect(dhtA, dhtB) - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.get(Buffer.from('/v/hello'), { timeout: 1000 }, cb) - ], (err) => { + try { + await dhtA.get(Buffer.from('/v/hello'), { timeout: 1000 }) + } catch (err) { expect(err).to.exist() expect(err.code).to.be.eql(errCode) stub.restore() - tdht.teardown(done) - }) + return + } + expect.fail('did not throw error') }) }) - it('get should handle correctly an invalid record error and return not found', function (done) { + it('get should handle correctly an invalid record error and return not found', function () { this.timeout(20 * 1000) const error = errcode(new Error('invalid record error'), 'ERR_INVALID_RECORD') - const nDHTs = 2 - const tdht = new TestDHT() + return withDHTs(2, async ([dhtA, dhtB]) => { + const stub = sinon.stub(dhtA, '_getValueOrPeers').throws(error) - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() + await connect(dhtA, dhtB) - const dhtA = dhts[0] - const dhtB = dhts[1] - const stub = sinon.stub(dhtA, '_getValueOrPeers').callsArgWithAsync(2, error) - - waterfall([ - (cb) => connect(dhtA, dhtB, cb), - (cb) => dhtA.get(Buffer.from('/v/hello'), cb) - ], (err) => { + try { + await dhtA.get(Buffer.from('/v/hello')) + } catch (err) { expect(err).to.exist() expect(err.code).to.be.eql('ERR_NOT_FOUND') stub.restore() - tdht.teardown(done) - }) + return + } + expect.fail('did not throw error') }) }) - it('findPeer should fail if no closest peers available', function (done) { + it('findPeer should fail if no closest peers available', async function () { this.timeout(40 * 1000) - const nDHTs = 4 - const tdht = new TestDHT() + return withDHTs(4, async (dhts) => { + const ids = dhts.map((d) => d.peerInfo.id) - tdht.spawn(nDHTs, (err, dhts) => { - expect(err).to.not.exist() + await connect(dhts[0], dhts[1]) + await connect(dhts[1], dhts[2]) + await connect(dhts[2], dhts[3]) - const ids = dhts.map((d) => d.peerInfo.id) + const stub = sinon.stub(dhts[0].routingTable, 'closestPeers').returns([]) - waterfall([ - (cb) => connect(dhts[0], dhts[1], cb), - (cb) => connect(dhts[1], dhts[2], cb), - (cb) => connect(dhts[2], dhts[3], cb) - ], (err) => { - expect(err).to.not.exist() - const stub = sinon.stub(dhts[0].routingTable, 'closestPeers').returns([]) - - dhts[0].findPeer(ids[3], { timeout: 1000 }, (err) => { - expect(err).to.exist() - expect(err.code).to.eql('ERR_LOOKUP_FAILED') - stub.restore() - tdht.teardown(done) - }) - }) + try { + await dhts[0].findPeer(ids[3], { timeout: 1000 }) + } catch (err) { + expect(err).to.exist() + expect(err.code).to.eql('ERR_LOOKUP_FAILED') + stub.restore() + return + } + expect.fail('did not throw error') }) }) }) diff --git a/test/kad-utils.spec.js b/test/kad-utils.spec.js index 59a8d81b..c0bafd1c 100644 --- a/test/kad-utils.spec.js +++ b/test/kad-utils.spec.js @@ -7,7 +7,6 @@ const expect = chai.expect const base32 = require('base32.js') const PeerId = require('peer-id') const distance = require('xor-distance') -const waterfall = require('async/waterfall') const utils = require('../src/utils') const createPeerInfo = require('./utils/create-peer-info') @@ -26,21 +25,18 @@ describe('kad utils', () => { }) describe('convertBuffer', () => { - it('returns the sha2-256 hash of the buffer', (done) => { + it('returns the sha2-256 hash of the buffer', async () => { const buf = Buffer.from('hello world') - utils.convertBuffer(buf, (err, digest) => { - expect(err).to.not.exist() + const digest = await utils.convertBuffer(buf) - expect(digest) - .to.eql(Buffer.from('b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9', 'hex')) - done() - }) + expect(digest) + .to.eql(Buffer.from('b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9', 'hex')) }) }) describe('sortClosestPeers', () => { - it('sorts a list of PeerInfos', (done) => { + it('sorts a list of PeerInfos', async () => { const rawIds = [ '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31', '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32', @@ -48,9 +44,7 @@ describe('kad utils', () => { '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34' ] - const ids = rawIds.map((raw) => { - return new PeerId(Buffer.from(raw)) - }) + const ids = rawIds.map((raw) => new PeerId(Buffer.from(raw))) const input = [ ids[2], @@ -59,21 +53,17 @@ describe('kad utils', () => { ids[0] ] - waterfall([ - (cb) => utils.convertPeerId(ids[0], cb), - (id, cb) => utils.sortClosestPeers(input, id, cb), - (out, cb) => { - expect( - out.map((m) => m.toB58String()) - ).to.eql([ - ids[0], - ids[3], - ids[2], - ids[1] - ].map((m) => m.toB58String())) - done() - } - ], done) + const id = await utils.convertPeerId(ids[0]) + const out = await utils.sortClosestPeers(input, id) + + expect( + out.map((m) => m.toB58String()) + ).to.eql([ + ids[0], + ids[3], + ids[2], + ids[1] + ].map((m) => m.toB58String())) }) }) @@ -94,32 +84,83 @@ describe('kad utils', () => { }) describe('keyForPublicKey', () => { - it('works', (done) => { - createPeerInfo(1, (err, peers) => { - expect(err).to.not.exist() + it('works', async () => { + const peers = await createPeerInfo(1) - expect(utils.keyForPublicKey(peers[0].id)) - .to.eql(Buffer.concat([Buffer.from('/pk/'), peers[0].id.id])) - done() - }) + expect(utils.keyForPublicKey(peers[0].id)) + .to.eql(Buffer.concat([Buffer.from('/pk/'), peers[0].id.id])) }) }) describe('fromPublicKeyKey', () => { - it('round trips', function (done) { + it('round trips', async function () { this.timeout(40 * 1000) - createPeerInfo(50, (err, peers) => { - expect(err).to.not.exist() + const peers = await createPeerInfo(50) - peers.forEach((p, i) => { - const id = p.id - expect(utils.isPublicKeyKey(utils.keyForPublicKey(id))).to.eql(true) - expect(utils.fromPublicKeyKey(utils.keyForPublicKey(id)).id) - .to.eql(id.id) - }) - done() + peers.forEach((p, i) => { + const id = p.id + expect(utils.isPublicKeyKey(utils.keyForPublicKey(id))).to.eql(true) + expect(utils.fromPublicKeyKey(utils.keyForPublicKey(id)).id) + .to.eql(id.id) }) }) }) + + describe('promiseTimeout', () => { + it('does not throw if promise resolves within timeout', async function () { + await utils.promiseTimeout(new Promise((resolve) => { + setTimeout(resolve(), 100) + }), 200) + }) + + it('throws if promise does not resolve within timeout', async function () { + try { + await utils.promiseTimeout(new Promise((resolve) => { + setTimeout(resolve(), 200) + }), 100) + } catch (err) { + expect(err.message).to.eql('Promise timed out') + expect(err.code).to.eql('ETIMEDOUT') + } + }) + + it('throws with custom error message', async function () { + try { + await utils.promiseTimeout(new Promise((resolve) => { + setTimeout(resolve(), 200) + }), 100, 'hello') + } catch (err) { + expect(err.message).to.eql('hello') + expect(err.code).to.eql('ETIMEDOUT') + } + }) + }) + + describe('retry', () => { + it('does not throw if function completes successfully before attempts', async function () { + let waiting = true + utils.retry({ times: 2, interval: 100 }, () => { + if (waiting) throw new Error('fail') + }) + setTimeout(() => { + waiting = false + }, 150) + }) + + it('throws if function does not complete successfully before attempts', async function () { + const timeout = setTimeout(() => { + expect.fail('did not throw') + }, 400) + + try { + await utils.retry({ times: 2, interval: 100 }, () => { + throw new Error('fail') + }) + } catch (err) { + clearTimeout(timeout) + expect(err.message).to.eql('fail') + } + }) + }) }) diff --git a/test/limited-peer-list.spec.js b/test/limited-peer-list.spec.js index fbbf2210..60b987f4 100644 --- a/test/limited-peer-list.spec.js +++ b/test/limited-peer-list.spec.js @@ -12,16 +12,10 @@ const createPeerInfo = require('./utils/create-peer-info') describe('LimitedPeerList', () => { let peers - before(function (done) { + before(async function () { this.timeout(10 * 1000) - createPeerInfo(5, (err, p) => { - if (err) { - return done(err) - } - peers = p - done() - }) + peers = await createPeerInfo(5) }) it('basics', () => { diff --git a/test/message.spec.js b/test/message.spec.js index f38a3f33..ac1da02b 100644 --- a/test/message.spec.js +++ b/test/message.spec.js @@ -6,7 +6,6 @@ chai.use(require('dirty-chai')) const expect = chai.expect const PeerInfo = require('peer-info') const PeerId = require('peer-id') -const map = require('async/map') const range = require('lodash.range') const random = require('lodash.random') const Record = require('libp2p-record').Record @@ -27,62 +26,59 @@ describe('Message', () => { expect(msg).to.have.property('clusterLevel', 4) }) - it('serialize & deserialize', function (done) { + it('serialize & deserialize', async function () { this.timeout(10 * 1000) - map(range(5), (n, cb) => PeerId.create({ bits: 1024 }, cb), (err, peers) => { - expect(err).to.not.exist() - - const closer = peers.slice(0, 5).map((p) => { - const info = new PeerInfo(p) - const addr = `/ip4/198.176.1.${random(198)}/tcp/1234` - info.multiaddrs.add(addr) - info.multiaddrs.add(`/ip4/100.176.1.${random(198)}`) - info.connect(addr) - - return info - }) - - const provider = peers.slice(0, 5).map((p) => { - const info = new PeerInfo(p) - info.multiaddrs.add(`/ip4/98.176.1.${random(198)}/tcp/1234`) - info.multiaddrs.add(`/ip4/10.176.1.${random(198)}`) - - return info - }) - - const msg = new Message(Message.TYPES.GET_VALUE, Buffer.from('hello'), 5) - const record = new Record(Buffer.from('hello'), Buffer.from('world')) - - msg.closerPeers = closer - msg.providerPeers = provider - msg.record = record - - const enc = msg.serialize() - const dec = Message.deserialize(enc) - - expect(dec.type).to.be.eql(msg.type) - expect(dec.key).to.be.eql(msg.key) - expect(dec.clusterLevel).to.be.eql(msg.clusterLevel) - expect(dec.record.serialize()).to.be.eql(record.serialize()) - expect(dec.record.key).to.eql(Buffer.from('hello')) - - expect(dec.closerPeers).to.have.length(5) - dec.closerPeers.forEach((peer, i) => { - expect(peer.id.isEqual(msg.closerPeers[i].id)).to.eql(true) - expect(peer.multiaddrs.toArray()) - .to.eql(msg.closerPeers[i].multiaddrs.toArray()) - - expect(peer.isConnected()).to.eql(peer.multiaddrs.toArray()[0]) - }) - - expect(dec.providerPeers).to.have.length(5) - dec.providerPeers.forEach((peer, i) => { - expect(peer.id.isEqual(msg.providerPeers[i].id)).to.equal(true) - expect(peer.multiaddrs.toArray()) - .to.eql(msg.providerPeers[i].multiaddrs.toArray()) - }) - - done() + + const peers = await Promise.all([...new Array(5)].map(() => PeerId.create({ bits: 1024 }))) + + const closer = peers.slice(0, 5).map((p) => { + const info = new PeerInfo(p) + const addr = `/ip4/198.176.1.${random(198)}/tcp/1234` + info.multiaddrs.add(addr) + info.multiaddrs.add(`/ip4/100.176.1.${random(198)}`) + info.connect(addr) + + return info + }) + + const provider = peers.slice(0, 5).map((p) => { + const info = new PeerInfo(p) + info.multiaddrs.add(`/ip4/98.176.1.${random(198)}/tcp/1234`) + info.multiaddrs.add(`/ip4/10.176.1.${random(198)}`) + + return info + }) + + const msg = new Message(Message.TYPES.GET_VALUE, Buffer.from('hello'), 5) + const record = new Record(Buffer.from('hello'), Buffer.from('world')) + + msg.closerPeers = closer + msg.providerPeers = provider + msg.record = record + + const enc = msg.serialize() + const dec = Message.deserialize(enc) + + expect(dec.type).to.be.eql(msg.type) + expect(dec.key).to.be.eql(msg.key) + expect(dec.clusterLevel).to.be.eql(msg.clusterLevel) + expect(dec.record.serialize()).to.be.eql(record.serialize()) + expect(dec.record.key).to.eql(Buffer.from('hello')) + + expect(dec.closerPeers).to.have.length(5) + dec.closerPeers.forEach((peer, i) => { + expect(peer.id.isEqual(msg.closerPeers[i].id)).to.eql(true) + expect(peer.multiaddrs.toArray()) + .to.eql(msg.closerPeers[i].multiaddrs.toArray()) + + expect(peer.isConnected()).to.eql(peer.multiaddrs.toArray()[0]) + }) + + expect(dec.providerPeers).to.have.length(5) + dec.providerPeers.forEach((peer, i) => { + expect(peer.id.isEqual(msg.providerPeers[i].id)).to.equal(true) + expect(peer.multiaddrs.toArray()) + .to.eql(msg.providerPeers[i].multiaddrs.toArray()) }) }) diff --git a/test/network.spec.js b/test/network.spec.js index 2bd5806a..493ed8a5 100644 --- a/test/network.spec.js +++ b/test/network.spec.js @@ -7,7 +7,6 @@ const expect = chai.expect const Connection = require('interface-connection').Connection const pull = require('pull-stream') const lp = require('pull-length-prefixed') -const series = require('async/series') const PeerBook = require('peer-book') const Switch = require('libp2p-switch') const TCP = require('libp2p-tcp') @@ -22,111 +21,108 @@ describe('Network', () => { let dht let peerInfos - before(function (done) { + before(async function () { this.timeout(10 * 1000) - createPeerInfo(3, (err, result) => { - if (err) { - return done(err) - } - - peerInfos = result - const sw = new Switch(peerInfos[0], new PeerBook()) - sw.transport.add('tcp', new TCP()) - sw.connection.addStreamMuxer(Mplex) - sw.connection.reuse() - dht = new KadDHT(sw) - - series([ - (cb) => sw.start(cb), - (cb) => dht.start(cb) - ], done) - }) + const result = await createPeerInfo(3) + + peerInfos = result + const sw = new Switch(peerInfos[0], new PeerBook()) + sw.transport.add('tcp', new TCP()) + sw.connection.addStreamMuxer(Mplex) + sw.connection.reuse() + dht = new KadDHT(sw) + + await new Promise(resolve => sw.start(resolve)) + await dht.start() }) - after(function (done) { + after(async function () { this.timeout(10 * 1000) - series([ - (cb) => dht.stop(cb), - (cb) => dht.switch.stop(cb) - ], done) + + await dht.stop() + await new Promise(resolve => dht.switch.stop(resolve)) }) describe('sendRequest', () => { - it('send and response', (done) => { - let i = 0 - const finish = () => { - if (i++ === 1) { - done() + it('send and response', () => { + return new Promise(async (resolve) => { + let i = 0 + const finish = () => { + if (i++ === 1) { + resolve() + } } - } - - const msg = new Message(Message.TYPES.PING, Buffer.from('hello'), 0) - - // mock it - dht.switch.dial = (peer, protocol, callback) => { - expect(protocol).to.eql('/ipfs/kad/1.0.0') - const msg = new Message(Message.TYPES.FIND_NODE, Buffer.from('world'), 0) - - const rawConn = { - source: pull( - pull.values([msg.serialize()]), - lp.encode() - ), - sink: pull( - lp.decode(), - pull.collect((err, res) => { - expect(err).to.not.exist() - expect(Message.deserialize(res[0]).type).to.eql(Message.TYPES.PING) - finish() - }) - ) + + const msg = new Message(Message.TYPES.PING, Buffer.from('hello'), 0) + + // mock it + dht.switch.dial = (peer, protocol, callback) => { + expect(protocol).to.eql('/ipfs/kad/1.0.0') + const msg = new Message(Message.TYPES.FIND_NODE, Buffer.from('world'), 0) + + const rawConn = { + source: pull( + pull.values([msg.serialize()]), + lp.encode() + ), + sink: pull( + lp.decode(), + pull.collect((err, res) => { + expect(err).to.not.exist() + expect(Message.deserialize(res[0]).type).to.eql(Message.TYPES.PING) + finish() + }) + ) + } + const conn = new Connection(rawConn) + callback(null, conn) } - const conn = new Connection(rawConn) - callback(null, conn) - } - dht.network.sendRequest(peerInfos[0].id, msg, (err, response) => { - expect(err).to.not.exist() + const response = await dht.network.sendRequest(peerInfos[0].id, msg) + expect(response.type).to.eql(Message.TYPES.FIND_NODE) finish() }) }) - it('timeout on no message', (done) => { - let i = 0 - const finish = () => { - if (i++ === 1) { - done() + it('timeout on no message', () => { + return new Promise(async (resolve) => { + let i = 0 + const finish = () => { + if (i++ === 1) { + resolve() + } } - } - - const msg = new Message(Message.TYPES.PING, Buffer.from('hello'), 0) - - // mock it - dht.switch.dial = (peer, protocol, callback) => { - expect(protocol).to.eql('/ipfs/kad/1.0.0') - const rawConn = { - // hanging - source: (end, cb) => {}, - sink: pull( - lp.decode(), - pull.collect((err, res) => { - expect(err).to.not.exist() - expect(Message.deserialize(res[0]).type).to.eql(Message.TYPES.PING) - finish() - }) - ) + + const msg = new Message(Message.TYPES.PING, Buffer.from('hello'), 0) + + // mock it + dht.switch.dial = (peer, protocol, callback) => { + expect(protocol).to.eql('/ipfs/kad/1.0.0') + const rawConn = { + // hanging + source: (end, cb) => {}, + sink: pull( + lp.decode(), + pull.collect((err, res) => { + expect(err).to.not.exist() + expect(Message.deserialize(res[0]).type).to.eql(Message.TYPES.PING) + finish() + }) + ) + } + const conn = new Connection(rawConn) + callback(null, conn) } - const conn = new Connection(rawConn) - callback(null, conn) - } - dht.network.readMessageTimeout = 100 + dht.network.readMessageTimeout = 100 - dht.network.sendRequest(peerInfos[0].id, msg, (err, response) => { - expect(err).to.exist() - expect(err.message).to.match(/timed out/) + try { + await dht.network.sendRequest(peerInfos[0].id, msg) + } catch (err) { + expect(err.message).to.match(/timed out/) + } finish() }) diff --git a/test/peer-list.spec.js b/test/peer-list.spec.js index 8deb5c2c..87809304 100644 --- a/test/peer-list.spec.js +++ b/test/peer-list.spec.js @@ -12,14 +12,8 @@ const createPeerInfo = require('./utils/create-peer-info') describe('PeerList', () => { let peers - before((done) => { - createPeerInfo(3, (err, p) => { - if (err) { - return done(err) - } - peers = p - done() - }) + before(async () => { + peers = await createPeerInfo(3) }) it('basics', () => { diff --git a/test/peer-queue.spec.js b/test/peer-queue.spec.js index 1c5eebfa..9ec16354 100644 --- a/test/peer-queue.spec.js +++ b/test/peer-queue.spec.js @@ -5,12 +5,11 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const PeerId = require('peer-id') -const series = require('async/series') const PeerQueue = require('../src/peer-queue') describe('PeerQueue', () => { - it('basics', (done) => { + it('basics', async () => { const p1 = new PeerId(Buffer.from('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31')) const p2 = new PeerId(Buffer.from('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32')) const p3 = new PeerId(Buffer.from('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33')) @@ -19,33 +18,26 @@ describe('PeerQueue', () => { const peer = new PeerId(Buffer.from('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31')) - PeerQueue.fromPeerId(peer, (err, pq) => { - expect(err).to.not.exist() - - series([ - (cb) => pq.enqueue(p3, cb), - (cb) => pq.enqueue(p1, cb), - (cb) => pq.enqueue(p2, cb), - (cb) => pq.enqueue(p4, cb), - (cb) => pq.enqueue(p5, cb), - (cb) => pq.enqueue(p1, cb) - ], (err) => { - expect(err).to.not.exist() - - expect([ - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue() - ].map((m) => m.toB58String())).to.be.eql([ - p1, p1, p1, p4, p3, p2 - ].map((m) => m.toB58String())) - - expect(pq.length).to.be.eql(0) - done() - }) - }) + const pq = await PeerQueue.fromPeerId(peer) + + await pq.enqueue(p3) + await pq.enqueue(p1) + await pq.enqueue(p2) + await pq.enqueue(p4) + await pq.enqueue(p5) + await pq.enqueue(p1) + + expect([ + pq.dequeue(), + pq.dequeue(), + pq.dequeue(), + pq.dequeue(), + pq.dequeue(), + pq.dequeue() + ].map((m) => m.toB58String())).to.be.eql([ + p1, p1, p1, p4, p3, p2 + ].map((m) => m.toB58String())) + + expect(pq.length).to.be.eql(0) }) }) diff --git a/test/providers.spec.js b/test/providers.spec.js index 4f7324e6..a2f79e7e 100644 --- a/test/providers.spec.js +++ b/test/providers.spec.js @@ -5,15 +5,8 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const Store = require('interface-datastore').MemoryDatastore -const parallel = require('async/parallel') -const waterfall = require('async/waterfall') const CID = require('cids') const multihashing = require('multihashing-async') -const map = require('async/map') -const timesSeries = require('async/timesSeries') -const each = require('async/each') -const eachSeries = require('async/eachSeries') -const range = require('lodash.range') const LevelStore = require('datastore-level') const path = require('path') const os = require('os') @@ -26,103 +19,72 @@ const createValues = require('./utils/create-values') describe('Providers', () => { let infos - before(function (done) { + before(async function () { this.timeout(10 * 1000) - createPeerInfo(3, (err, peers) => { - if (err) { - return done(err) - } - - infos = peers - done() - }) + infos = await createPeerInfo(3) }) - it('simple add and get of providers', (done) => { + it('simple add and get of providers', async () => { const providers = new Providers(new Store(), infos[2].id) const cid = new CID('QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') - parallel([ - (cb) => providers.addProvider(cid, infos[0].id, cb), - (cb) => providers.addProvider(cid, infos[1].id, cb) - ], (err) => { - expect(err).to.not.exist() - providers.getProviders(cid, (err, provs) => { - expect(err).to.not.exist() - expect(provs).to.be.eql([infos[0].id, infos[1].id]) - providers.stop() - - done() - }) - }) + await Promise.all([ + providers.addProvider(cid, infos[0].id), + providers.addProvider(cid, infos[1].id) + ]) + + const provs = await providers.getProviders(cid) + + expect(provs).to.be.eql([infos[0].id, infos[1].id]) + providers.stop() }) - it('more providers than space in the lru cache', (done) => { + it('more providers than space in the lru cache', async () => { const providers = new Providers(new Store(), infos[2].id, 10) - waterfall([ - (cb) => map( - range(100), - (i, cb) => multihashing(Buffer.from(`hello ${i}`), 'sha2-256', cb), - cb - ), - (hashes, cb) => { - const cids = hashes.map((h) => new CID(h)) - - map(cids, (cid, cb) => { - providers.addProvider(cid, infos[0].id, cb) - }, (err) => cb(err, cids)) - }, - (cids, cb) => { - map(cids, (cid, cb) => { - providers.getProviders(cid, cb) - }, (err, provs) => { - expect(err).to.not.exist() - expect(provs).to.have.length(100) - provs.forEach((p) => { - expect(p[0].id).to.be.eql(infos[0].id.id) - }) - providers.stop() - cb() - }) - } - ], done) + const hashes = await Promise.all([...new Array(100)].map((i) => { + return multihashing(Buffer.from(`hello ${i}`), 'sha2-256') + })) + + const cids = hashes.map((h) => new CID(h)) + + await Promise.all(cids.map(cid => providers.addProvider(cid, infos[0].id))) + const provs = await Promise.all(cids.map(cid => providers.getProviders(cid))) + + expect(provs).to.have.length(100) + for (const p of provs) { + expect(p[0].id).to.be.eql(infos[0].id.id) + } + providers.stop() }) - it('expires', (done) => { + it('expires', async () => { const providers = new Providers(new Store(), infos[2].id) providers.cleanupInterval = 100 providers.provideValidity = 200 const cid = new CID('QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') - parallel([ - (cb) => providers.addProvider(cid, infos[0].id, cb), - (cb) => providers.addProvider(cid, infos[1].id, cb) - ], (err) => { - expect(err).to.not.exist() - - providers.getProviders(cid, (err, provs) => { - expect(err).to.not.exist() - expect(provs).to.have.length(2) - expect(provs[0].id).to.be.eql(infos[0].id.id) - expect(provs[1].id).to.be.eql(infos[1].id.id) - }) - - setTimeout(() => { - providers.getProviders(cid, (err, provs) => { - expect(err).to.not.exist() - expect(provs).to.have.length(0) - providers.stop() - done() - }) - // TODO: this is a timeout based check, make cleanup monitorable - }, 400) - }) + await Promise.all([ + providers.addProvider(cid, infos[0].id), + providers.addProvider(cid, infos[1].id) + ]) + + const provs = await providers.getProviders(cid) + + expect(provs).to.have.length(2) + expect(provs[0].id).to.be.eql(infos[0].id.id) + expect(provs[1].id).to.be.eql(infos[1].id.id) + + await new Promise(resolve => setTimeout(resolve, 400)) + + const provsAfter = await providers.getProviders(cid) + expect(provsAfter).to.have.length(0) + providers.stop() }) // slooow so only run when you need to - it.skip('many', (done) => { + it.skip('many', async function () { const p = path.join( os.tmpdir(), (Math.random() * 100).toString() ) @@ -130,39 +92,33 @@ describe('Providers', () => { const providers = new Providers(store, infos[2].id, 10) console.log('starting') - waterfall([ - (cb) => parallel([ - (cb) => createValues(100, cb), - (cb) => createPeerInfo(600, cb) - ], cb), - (res, cb) => { - console.log('got values and peers') - const values = res[0] - const peers = res[1] - let total = Date.now() - eachSeries(values, (v, cb) => { - eachSeries(peers, (p, cb) => { - providers.addProvider(v.cid, p.id, cb) - }, cb) - }, (err) => { - console.log('addProvider %s peers %s cids in %sms', peers.length, values.length, Date.now() - total) - expect(err).to.not.exist() - console.log('starting profile with %s peers and %s cids', peers.length, values.length) - timesSeries(3, (i, cb) => { - const start = Date.now() - each(values, (v, cb) => { - providers.getProviders(v.cid, cb) - }, (err) => { - expect(err).to.not.exist() - console.log('query %sms', (Date.now() - start)) - cb() - }) - }, cb) - }) + const res = await Promise.all([ + createValues(100), + createPeerInfo(600) + ]) + + console.log('got values and peers') + const values = res[0] + const peers = res[1] + let total = Date.now() + + for (const v of values) { + for (const p of peers) { + await providers.addProvider(v.cid, p.id) } - ], (err) => { - expect(err).to.not.exist() - store.close(done) - }) + } + + console.log('addProvider %s peers %s cids in %sms', peers.length, values.length, Date.now() - total) + console.log('starting profile with %s peers and %s cids', peers.length, values.length) + + for (let i = 0; i < 3; i++) { + const start = Date.now() + for (const v of values) { + await providers.getProviders(v.cid) + console.log('query %sms', (Date.now() - start)) + } + } + + await store.close() }) }) diff --git a/test/query.spec.js b/test/query.spec.js index 778ee684..4e1c52d0 100644 --- a/test/query.spec.js +++ b/test/query.spec.js @@ -19,90 +19,105 @@ describe('Query', () => { let peerInfos let dht - before(function (done) { + before(async function () { this.timeout(5 * 1000) - createPeerInfo(10, (err, result) => { - if (err) { - return done(err) - } - - peerInfos = result - const sw = new Switch(peerInfos[0], new PeerBook()) - sw.transport.add('tcp', new TCP()) - sw.connection.addStreamMuxer(Mplex) - sw.connection.reuse() - dht = new DHT(sw) + peerInfos = await createPeerInfo(10) - done() - }) + const sw = new Switch(peerInfos[0], new PeerBook()) + sw.transport.add('tcp', new TCP()) + sw.connection.addStreamMuxer(Mplex) + sw.connection.reuse() + dht = new DHT(sw) }) - it('simple run', (done) => { + it('simple run', async () => { const peer = peerInfos[0] // mock this so we can dial non existing peers dht.switch.dial = (peer, callback) => callback() let i = 0 - const query = (p, cb) => { + const query = (p) => { if (i++ === 1) { expect(p.id).to.eql(peerInfos[2].id.id) - return cb(null, { + return { value: Buffer.from('cool'), success: true - }) + } } expect(p.id).to.eql(peerInfos[1].id.id) - cb(null, { + return { closerPeers: [peerInfos[2]] - }) + } } const q = new Query(dht, peer.id.id, () => query) - q.run([peerInfos[1].id], (err, res) => { - expect(err).to.not.exist() - expect(res.paths[0].value).to.eql(Buffer.from('cool')) - expect(res.paths[0].success).to.eql(true) - expect(res.finalSet.size).to.eql(2) - done() - }) + const res = await q.run([peerInfos[1].id]) + + expect(res.paths[0].value).to.eql(Buffer.from('cool')) + expect(res.paths[0].success).to.eql(true) + expect(res.finalSet.size).to.eql(2) }) - it('returns an error if all queries error', (done) => { + it('does not throw an error if only some queries error', async () => { const peer = peerInfos[0] // mock this so we can dial non existing peers dht.switch.dial = (peer, callback) => callback() - const query = (p, cb) => cb(new Error('fail')) + let i = 0 + const query = (p) => { + if (i++ === 1) { + throw new Error('fail') + } + return { + closerPeers: [peerInfos[2]] + } + } const q = new Query(dht, peer.id.id, () => query) - q.run([peerInfos[1].id], (err, res) => { - expect(err).to.exist() + + await q.run([peerInfos[1].id]) + }) + + it('throws an error if all queries error', async () => { + const peer = peerInfos[0] + + // mock this so we can dial non existing peers + dht.switch.dial = (peer, callback) => callback() + + const query = (p) => { + throw new Error('fail') + } + + const q = new Query(dht, peer.id.id, () => query) + + try { + await q.run([peerInfos[1].id]) + } catch (err) { expect(err.message).to.eql('fail') - done() - }) + return + } + expect.fail('No error thrown') }) - it('only closerPeers', (done) => { + it('only closerPeers', async () => { const peer = peerInfos[0] // mock this so we can dial non existing peers dht.switch.dial = (peer, callback) => callback() - const query = (p, cb) => { - cb(null, { + const query = (p) => { + return { closerPeers: [peerInfos[2]] - }) + } } const q = new Query(dht, peer.id.id, () => query) - q.run([peerInfos[1].id], (err, res) => { - expect(err).to.not.exist() - expect(res.finalSet.size).to.eql(2) - done() - }) + const res = await q.run([peerInfos[1].id]) + + expect(res.finalSet.size).to.eql(2) }) /* @@ -123,38 +138,36 @@ describe('Query', () => { * ... * */ - it('uses disjoint paths', (done) => { + it('uses disjoint paths', async () => { const goodLength = 3 - createDisjointTracks(peerInfos, goodLength, (err, targetId, starts, getResponse) => { - expect(err).to.not.exist() - // mock this so we can dial non existing peers - dht.switch.dial = (peer, callback) => callback() - let badEndVisited = false - - const q = new Query(dht, targetId, (trackNum) => { - return (p, cb) => { - const response = getResponse(p, trackNum) - expect(response).to.exist() // or we aren't on the right track - if (response.end && !response.success) { - badEndVisited = true - } - if (response.success) { - expect(badEndVisited).to.eql(false) - } - cb(null, response) + const { targetId, tracks, getResponse } = await createDisjointTracks(peerInfos, goodLength) + + // mock this so we can dial non existing peers + dht.switch.dial = (peer, callback) => callback() + let badEndVisited = false + + const q = new Query(dht, targetId, (trackNum) => { + return (p) => { + const response = getResponse(p, trackNum) + expect(response).to.exist() // or we aren't on the right track + if (response.end && !response.success) { + badEndVisited = true } - }) - q.concurrency = 1 - // due to round-robin allocation of peers from starts, first - // path is good, second bad - q.run(starts, (err, res) => { - expect(err).to.not.exist() - // we should visit all nodes (except the target) - expect(res.finalSet.size).to.eql(peerInfos.length - 1) - // there should be one successful path - expect(res.paths.length).to.eql(1) - done() - }) + if (response.success) { + expect(badEndVisited).to.eql(false) + } + return response + } }) + q.concurrency = 1 + + // due to round-robin allocation of peers from tracks, first + // path is good, second bad + const res = await q.run(tracks) + + // we should visit all nodes (except the target) + expect(res.finalSet.size).to.eql(peerInfos.length - 1) + // there should be one successful path + expect(res.paths.length).to.eql(1) }) }) diff --git a/test/routing.spec.js b/test/routing.spec.js index 1da7f622..e68d815b 100644 --- a/test/routing.spec.js +++ b/test/routing.spec.js @@ -5,119 +5,87 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect const PeerId = require('peer-id') -const map = require('async/map') -const each = require('async/each') -const series = require('async/series') -const range = require('lodash.range') const random = require('lodash.random') const RoutingTable = require('../src/routing') const kadUtils = require('../src/utils') -function createPeerId (n, callback) { - map(range(n), (i, cb) => PeerId.create({ bits: 512 }, cb), callback) +function createPeerId (n) { + return Promise.all([...new Array(n)].map(() => PeerId.create({ bits: 512 }))) } describe('Routing Table', () => { let table - beforeEach(function (done) { + beforeEach(async function () { this.timeout(20 * 1000) - PeerId.create({ bits: 512 }, (err, id) => { - expect(err).to.not.exist() - table = new RoutingTable(id, 20) - done() - }) + const id = await PeerId.create({ bits: 512 }) + table = new RoutingTable(id, 20) }) - it('add', function (done) { + it('add', async function () { this.timeout(20 * 1000) - createPeerId(20, (err, ids) => { - expect(err).to.not.exist() - - series([ - (cb) => each(range(1000), (n, cb) => { - table.add(ids[random(ids.length - 1)], cb) - }, cb), - (cb) => each(range(20), (n, cb) => { - const id = ids[random(ids.length - 1)] - - kadUtils.convertPeerId(id, (err, key) => { - expect(err).to.not.exist() - expect(table.closestPeers(key, 5).length) - .to.be.above(0) - cb() - }) - }, cb) - ], done) - }) + const ids = await createPeerId(20) + + await Promise.all([...new Array(1000)].map(() => { + return table.add(ids[random(ids.length - 1)]) + })) + + await Promise.all([...new Array(20)].map(async () => { + const id = ids[random(ids.length - 1)] + + const key = await kadUtils.convertPeerId(id) + + expect(table.closestPeers(key, 5).length) + .to.be.above(0) + })) }) - it('remove', function (done) { + it('remove', async function () { this.timeout(20 * 1000) - createPeerId(10, (err, peers) => { - expect(err).to.not.exist() - - let k - series([ - (cb) => each(peers, (peer, cbEach) => table.add(peer, cbEach), cb), - (cb) => { - const id = peers[2] - kadUtils.convertPeerId(id, (err, key) => { - expect(err).to.not.exist() - k = key - expect(table.closestPeers(key, 10)).to.have.length(10) - cb() - }) - }, - (cb) => table.remove(peers[5], cb), - (cb) => { - expect(table.closestPeers(k, 10)).to.have.length(9) - expect(table.size).to.be.eql(9) - cb() - } - ], done) - }) + const peers = await createPeerId(10) + + let k + await Promise.all(peers.map((peer) => table.add(peer))) + + const id = peers[2] + const key = await kadUtils.convertPeerId(id) + + k = key + expect(table.closestPeers(key, 10)).to.have.length(10) + + await table.remove(peers[5]) + + expect(table.closestPeers(k, 10)).to.have.length(9) + expect(table.size).to.be.eql(9) }) - it('closestPeer', function (done) { + it('closestPeer', async function () { this.timeout(10 * 1000) - createPeerId(4, (err, peers) => { - expect(err).to.not.exist() - series([ - (cb) => each(peers, (peer, cb) => table.add(peer, cb), cb), - (cb) => { - const id = peers[2] - kadUtils.convertPeerId(id, (err, key) => { - expect(err).to.not.exist() - expect(table.closestPeer(key)).to.eql(id) - cb() - }) - } - ], done) - }) + const peers = await createPeerId(4) + + await Promise.all(peers.map((peer) => table.add(peer))) + + const id = peers[2] + const key = await kadUtils.convertPeerId(id) + + expect(table.closestPeer(key)).to.eql(id) }) - it('closestPeers', function (done) { + it('closestPeers', async function () { this.timeout(20 * 1000) - createPeerId(18, (err, peers) => { - expect(err).to.not.exist() - series([ - (cb) => each(peers, (peer, cb) => table.add(peer, cb), cb), - (cb) => { - const id = peers[2] - kadUtils.convertPeerId(id, (err, key) => { - expect(err).to.not.exist() - expect(table.closestPeers(key, 15)).to.have.length(15) - cb() - }) - } - ], done) - }) + const peers = await createPeerId(19) + + await Promise.all(peers.map((peer) => table.add(peer))) + + const id = peers[2] + const key = await kadUtils.convertPeerId(id) + + expect(table.closestPeers(key, 15)).to.have.length(15) }) }) diff --git a/test/rpc/handlers/add-provider.spec.js b/test/rpc/handlers/add-provider.spec.js index 42942ac7..095b629c 100644 --- a/test/rpc/handlers/add-provider.spec.js +++ b/test/rpc/handlers/add-provider.spec.js @@ -5,8 +5,6 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const parallel = require('async/parallel') -const waterfall = require('async/waterfall') const _ = require('lodash') const Message = require('../../../src/message') @@ -22,31 +20,22 @@ describe('rpc - handlers - AddProvider', () => { let tdht let dht - before((done) => { - parallel([ - (cb) => createPeerInfo(3, cb), - (cb) => createValues(2, cb) - ], (err, res) => { - expect(err).to.not.exist() - peers = res[0] - values = res[1] - done() - }) + before(async () => { + const res = await Promise.all([ + createPeerInfo(3), + createValues(2) + ]) + peers = res[0] + values = res[1] }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) describe('invalid messages', () => { const tests = [{ @@ -60,16 +49,18 @@ describe('rpc - handlers - AddProvider', () => { error: 'ERR_INVALID_CID' }] - tests.forEach((t) => it(t.error.toString(), (done) => { - handler(dht)(peers[0], t.message, (err) => { - expect(err).to.exist() + tests.forEach((t) => it(t.error.toString(), async () => { + try { + await handler(dht)(peers[0], t.message) + } catch (err) { expect(err.code).to.eql(t.error) - done() - }) + return + } + expect.fail('expected error') })) }) - it('ignore providers not from the originator', (done) => { + it('ignore providers not from the originator', async () => { const cid = values[0].cid const msg = new Message(Message.TYPES.ADD_PROVIDER, cid.buffer, 0) @@ -82,37 +73,25 @@ describe('rpc - handlers - AddProvider', () => { other ] - waterfall([ - (cb) => handler(dht)(sender, msg, cb), - (cb) => dht.providers.getProviders(cid, cb), - (provs, cb) => { - expect(provs).to.have.length(1) - expect(provs[0].id).to.eql(sender.id.id) - const bookEntry = dht.peerBook.get(sender.id) - expect(bookEntry.multiaddrs.toArray()).to.eql( - sender.multiaddrs.toArray() - ) - cb() - } - ], done) + await handler(dht)(sender, msg) + const provs = await dht.providers.getProviders(cid) + expect(provs).to.have.length(1) + expect(provs[0].id).to.eql(sender.id.id) + const bookEntry = dht.peerBook.get(sender.id) + expect(bookEntry.multiaddrs.toArray()).to.eql(sender.multiaddrs.toArray()) }) - it('ignore providers with no multiaddrs', (done) => { + it('ignore providers with no multiaddrs', async () => { const cid = values[0].cid const msg = new Message(Message.TYPES.ADD_PROVIDER, cid.buffer, 0) const sender = _.cloneDeep(peers[0]) sender.multiaddrs.clear() msg.providerPeers = [sender] - waterfall([ - (cb) => handler(dht)(sender, msg, cb), - (cb) => dht.providers.getProviders(cid, cb), - (provs, cb) => { - expect(provs).to.have.length(1) - expect(provs[0].id).to.eql(sender.id.id) - expect(dht.peerBook.has(sender.id)).to.equal(false) - cb() - } - ], done) + await handler(dht)(sender, msg) + const provs = await dht.providers.getProviders(cid) + expect(provs).to.have.length(1) + expect(provs[0].id).to.eql(sender.id.id) + expect(dht.peerBook.has(sender.id)).to.equal(false) }) }) diff --git a/test/rpc/handlers/find-node.spec.js b/test/rpc/handlers/find-node.spec.js index 985a794b..4c62f6ad 100644 --- a/test/rpc/handlers/find-node.spec.js +++ b/test/rpc/handlers/find-node.spec.js @@ -4,7 +4,6 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const waterfall = require('async/waterfall') const Message = require('../../../src/message') const handler = require('../../../src/rpc/handlers/find-node') @@ -12,7 +11,6 @@ const handler = require('../../../src/rpc/handlers/find-node') const T = Message.TYPES.FIND_NODE const createPeerInfo = require('../../utils/create-peer-info') -// const createValues = require('../../utils/create-values') const TestDHT = require('../../utils/test-dht') describe('rpc - handlers - FindNode', () => { @@ -20,71 +18,50 @@ describe('rpc - handlers - FindNode', () => { let tdht let dht - before((done) => { - createPeerInfo(3, (err, res) => { - expect(err).to.not.exist() - peers = res - done() - }) + before(async () => { + peers = await createPeerInfo(3) }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) - it('returns self, if asked for self', (done) => { + it('returns self, if asked for self', async () => { const msg = new Message(T, dht.peerInfo.id.id, 0) - handler(dht)(peers[1], msg, (err, response) => { - expect(err).to.not.exist() - expect(response.closerPeers).to.have.length(1) - const peer = response.closerPeers[0] + const response = await handler(dht)(peers[1], msg) + expect(response.closerPeers).to.have.length(1) + const peer = response.closerPeers[0] - expect(peer.id.id).to.be.eql(dht.peerInfo.id.id) - done() - }) + expect(peer.id.id).to.be.eql(dht.peerInfo.id.id) }) - it('returns closer peers', (done) => { + it('returns closer peers', async () => { const msg = new Message(T, Buffer.from('hello'), 0) const other = peers[1] - waterfall([ - (cb) => dht._add(other, cb), - (cb) => handler(dht)(peers[2], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.closerPeers).to.have.length(1) - const peer = response.closerPeers[0] - - expect(peer.id.id).to.be.eql(peers[1].id.id) - expect( - peer.multiaddrs.toArray() - ).to.be.eql( - peers[1].multiaddrs.toArray() - ) - - done() - }) + await dht._add(other) + const response = await handler(dht)(peers[2], msg) + + expect(response.closerPeers).to.have.length(1) + const peer = response.closerPeers[0] + + expect(peer.id.id).to.be.eql(peers[1].id.id) + expect( + peer.multiaddrs.toArray() + ).to.be.eql( + peers[1].multiaddrs.toArray() + ) }) - it('handles no peers found', (done) => { + it('handles no peers found', async () => { const msg = new Message(T, Buffer.from('hello'), 0) - handler(dht)(peers[2], msg, (err, response) => { - expect(err).to.not.exist() - expect(response.closerPeers).to.have.length(0) - done() - }) + const response = await handler(dht)(peers[2], msg) + expect(response.closerPeers).to.have.length(0) }) }) diff --git a/test/rpc/handlers/get-providers.spec.js b/test/rpc/handlers/get-providers.spec.js index f4db2cda..b48de695 100644 --- a/test/rpc/handlers/get-providers.spec.js +++ b/test/rpc/handlers/get-providers.spec.js @@ -4,8 +4,6 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const parallel = require('async/parallel') -const waterfall = require('async/waterfall') const Message = require('../../../src/message') const utils = require('../../../src/utils') @@ -23,86 +21,68 @@ describe('rpc - handlers - GetProviders', () => { let tdht let dht - before((done) => { - parallel([ - (cb) => createPeerInfo(3, cb), - (cb) => createValues(2, cb) - ], (err, res) => { - expect(err).to.not.exist() - peers = res[0] - values = res[1] - done() - }) + before(async () => { + const res = await Promise.all([ + createPeerInfo(3), + createValues(2) + ]) + peers = res[0] + values = res[1] }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) - it('errors with an invalid key ', (done) => { + it('errors with an invalid key ', async () => { const msg = new Message(T, Buffer.from('hello'), 0) - handler(dht)(peers[0], msg, (err, response) => { + try { + await handler(dht)(peers[0], msg) + } catch (err) { expect(err.code).to.eql('ERR_INVALID_CID') - expect(response).to.not.exist() - done() - }) + return + } + expect.fail('did not throw') }) - it('responds with self if the value is in the datastore', (done) => { + it('responds with self if the value is in the datastore', async () => { const v = values[0] const msg = new Message(T, v.cid.buffer, 0) const dsKey = utils.bufferToKey(v.cid.buffer) - waterfall([ - (cb) => dht.datastore.put(dsKey, v.value, cb), - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - - expect(response.key).to.be.eql(v.cid.buffer) - expect(response.providerPeers).to.have.length(1) - expect(response.providerPeers[0].id.toB58String()) - .to.eql(dht.peerInfo.id.toB58String()) + await dht.datastore.put(dsKey, v.value) + const response = await handler(dht)(peers[0], msg) - done() - }) + expect(response.key).to.be.eql(v.cid.buffer) + expect(response.providerPeers).to.have.length(1) + expect(response.providerPeers[0].id.toB58String()) + .to.eql(dht.peerInfo.id.toB58String()) }) - it('responds with listed providers and closer peers', (done) => { + it('responds with listed providers and closer peers', async () => { const v = values[0] const msg = new Message(T, v.cid.buffer, 0) const prov = peers[1].id const closer = peers[2] - waterfall([ - (cb) => dht._add(closer, cb), - (cb) => dht.providers.addProvider(v.cid, prov, cb), - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - - expect(response.key).to.be.eql(v.cid.buffer) - expect(response.providerPeers).to.have.length(1) - expect(response.providerPeers[0].id.toB58String()) - .to.eql(prov.toB58String()) - - expect(response.closerPeers).to.have.length(1) - expect(response.closerPeers[0].id.toB58String()) - .to.eql(closer.id.toB58String()) - done() - }) + await dht._add(closer) + await dht.providers.addProvider(v.cid, prov) + const response = await handler(dht)(peers[0], msg) + + expect(response.key).to.be.eql(v.cid.buffer) + expect(response.providerPeers).to.have.length(1) + expect(response.providerPeers[0].id.toB58String()) + .to.eql(prov.toB58String()) + + expect(response.closerPeers).to.have.length(1) + expect(response.closerPeers[0].id.toB58String()) + .to.eql(closer.id.toB58String()) }) }) diff --git a/test/rpc/handlers/get-value.spec.js b/test/rpc/handlers/get-value.spec.js index 660cfcdf..c931cab6 100644 --- a/test/rpc/handlers/get-value.spec.js +++ b/test/rpc/handlers/get-value.spec.js @@ -4,7 +4,7 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect -const waterfall = require('async/waterfall') + const Message = require('../../../src/message') const handler = require('../../../src/rpc/handlers/get-value') const utils = require('../../../src/utils') @@ -20,123 +20,95 @@ describe('rpc - handlers - GetValue', () => { let tdht let dht - before((done) => { - createPeerInfo(2, (err, res) => { - expect(err).to.not.exist() - peers = res - done() - }) + before(async () => { + peers = await createPeerInfo(2) }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) - it('errors when missing key', (done) => { + it('errors when missing key', async () => { const msg = new Message(T, Buffer.alloc(0), 0) - handler(dht)(peers[0], msg, (err, response) => { + try { + await handler(dht)(peers[0], msg) + } catch (err) { expect(err.code).to.eql('ERR_INVALID_KEY') - expect(response).to.not.exist() - done() - }) + return + } + expect.fail('did not throw') }) - it('responds with a local value', (done) => { + it('responds with a local value', async () => { const key = Buffer.from('hello') const value = Buffer.from('world') const msg = new Message(T, key, 0) - waterfall([ - (cb) => dht.put(key, value, cb), - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.record).to.exist() - expect(response.record.key).to.eql(key) - expect(response.record.value).to.eql(value) - done() - }) + + await dht.put(key, value) + const response = await handler(dht)(peers[0], msg) + + expect(response.record).to.exist() + expect(response.record.key).to.eql(key) + expect(response.record.value).to.eql(value) }) - it('responds with closerPeers returned from the dht', (done) => { + it('responds with closerPeers returned from the dht', async () => { const key = Buffer.from('hello') const msg = new Message(T, key, 0) const other = peers[1] - waterfall([ - (cb) => dht._add(other, cb), - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.closerPeers).to.have.length(1) - expect( - response.closerPeers[0].id.toB58String() - ).to.be.eql(other.id.toB58String()) - done() - }) + await dht._add(other) + const response = await handler(dht)(peers[0], msg) + + expect(response.closerPeers).to.have.length(1) + expect( + response.closerPeers[0].id.toB58String() + ).to.be.eql(other.id.toB58String()) }) describe('public key', () => { - it('self', (done) => { + it('self', async () => { const key = utils.keyForPublicKey(dht.peerInfo.id) const msg = new Message(T, key, 0) - waterfall([ - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.record).to.exist() - expect(response.record.value).to.eql( - dht.peerInfo.id.pubKey.bytes - ) - done() - }) + const response = await handler(dht)(peers[0], msg) + + expect(response.record).to.exist() + expect(response.record.value).to.eql( + dht.peerInfo.id.pubKey.bytes + ) }) - it('other in peerstore', (done) => { + it('other in peerstore', async () => { const other = peers[1] const key = utils.keyForPublicKey(other.id) const msg = new Message(T, key, 0) - waterfall([ - (cb) => dht._add(other, cb), - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.record).to.exist() - expect(response.record.value).to.eql( - other.id.pubKey.bytes - ) - done() - }) + await dht._add(other) + const response = await handler(dht)(peers[0], msg) + + expect(response.record).to.exist() + expect(response.record.value).to.eql( + other.id.pubKey.bytes + ) }) - it('other unkown', (done) => { + it('other unkown', async () => { const other = peers[1] const key = utils.keyForPublicKey(other.id) const msg = new Message(T, key, 0) - waterfall([ - (cb) => handler(dht)(peers[0], msg, cb) - ], (err, response) => { - expect(err).to.not.exist() - expect(response.record).to.not.exist() + const response = await handler(dht)(peers[0], msg) - done() - }) + expect(response.record).to.not.exist() }) }) }) diff --git a/test/rpc/handlers/ping.spec.js b/test/rpc/handlers/ping.spec.js index f8c7d45f..d36c1aad 100644 --- a/test/rpc/handlers/ping.spec.js +++ b/test/rpc/handlers/ping.spec.js @@ -17,35 +17,23 @@ describe('rpc - handlers - Ping', () => { let tdht let dht - before((done) => { - createPeerInfo(2, (err, res) => { - expect(err).to.not.exist() - peers = res - done() - }) + before(async () => { + peers = await createPeerInfo(2) }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) - it('replies with the same message', (done) => { + it('replies with the same message', async () => { const msg = new Message(T, Buffer.from('hello'), 5) - handler(dht)(peers[0], msg, (err, response) => { - expect(err).to.not.exist() - expect(response).to.be.eql(msg) - done() - }) + const response = await handler(dht)(peers[0], msg) + + expect(response).to.be.eql(msg) }) }) diff --git a/test/rpc/handlers/put-value.spec.js b/test/rpc/handlers/put-value.spec.js index 3a2de5c4..7be2c5aa 100644 --- a/test/rpc/handlers/put-value.spec.js +++ b/test/rpc/handlers/put-value.spec.js @@ -5,6 +5,7 @@ const chai = require('chai') chai.use(require('dirty-chai')) const expect = chai.expect + const Record = require('libp2p-record').Record const Message = require('../../../src/message') @@ -22,37 +23,30 @@ describe('rpc - handlers - PutValue', () => { let tdht let dht - before((done) => { - createPeerInfo(2, (err, res) => { - expect(err).to.not.exist() - peers = res - done() - }) + before(async () => { + peers = await createPeerInfo(2) }) - beforeEach((done) => { + beforeEach(async () => { tdht = new TestDHT() - - tdht.spawn(1, (err, dhts) => { - expect(err).to.not.exist() - dht = dhts[0] - done() - }) + const dhts = await tdht.spawn(1) + dht = dhts[0] }) - afterEach((done) => { - tdht.teardown(done) - }) + afterEach(() => tdht.teardown()) - it('errors on missing record', (done) => { + it('errors on missing record', async () => { const msg = new Message(T, Buffer.from('hello'), 5) - handler(dht)(peers[0], msg, (err) => { + try { + await handler(dht)(peers[0], msg) + } catch (err) { expect(err.code).to.eql('ERR_EMPTY_RECORD') - done() - }) + return + } + expect.fail('did not throw') }) - it('stores the record in the datastore', (done) => { + it('stores the record in the datastore', async () => { const msg = new Message(T, Buffer.from('hello'), 5) const record = new Record( Buffer.from('hello'), @@ -60,23 +54,20 @@ describe('rpc - handlers - PutValue', () => { ) msg.record = record - handler(dht)(peers[1], msg, (err, response) => { - expect(err).to.not.exist() - expect(response).to.be.eql(msg) + const response = await handler(dht)(peers[1], msg) + + expect(response).to.be.eql(msg) + + const key = utils.bufferToKey(Buffer.from('hello')) + const res = await dht.datastore.get(key) + + const rec = Record.deserialize(res) - const key = utils.bufferToKey(Buffer.from('hello')) - dht.datastore.get(key, (err, res) => { - expect(err).to.not.exist() - const rec = Record.deserialize(res) + expect(rec).to.have.property('key').eql(Buffer.from('hello')) - expect(rec).to.have.property('key').eql(Buffer.from('hello')) + // make sure some time has passed + await new Promise(resolve => setTimeout(resolve, 10)) - // make sure some time has passed - setTimeout(() => { - expect(rec.timeReceived < new Date()).to.be.eql(true) - done() - }, 10) - }) - }) + expect(rec.timeReceived < new Date()).to.be.eql(true) }) }) diff --git a/test/rpc/index.spec.js b/test/rpc/index.spec.js index 16627267..35bd25ff 100644 --- a/test/rpc/index.spec.js +++ b/test/rpc/index.spec.js @@ -21,40 +21,35 @@ const createPeerInfo = require('../utils/create-peer-info') describe('rpc', () => { let peerInfos - before((done) => { - createPeerInfo(2, (err, peers) => { - if (err) { - return done(err) - } - - peerInfos = peers - done() - }) + before(async () => { + peerInfos = await createPeerInfo(2) }) describe('protocolHandler', () => { - it('calls back with the response', (done) => { - const sw = new Switch(peerInfos[0], new PeerBook()) - sw.transport.add('tcp', new TCP()) - sw.connection.addStreamMuxer(Mplex) - sw.connection.reuse() - const dht = new KadDHT(sw, { kBucketSize: 5 }) + it('returns the response', () => { + return new Promise((resolve) => { + const sw = new Switch(peerInfos[0], new PeerBook()) + sw.transport.add('tcp', new TCP()) + sw.connection.addStreamMuxer(Mplex) + sw.connection.reuse() + const dht = new KadDHT(sw, { kBucketSize: 5 }) - dht.peerBook.put(peerInfos[1]) + dht.peerBook.put(peerInfos[1]) - const msg = new Message(Message.TYPES.GET_VALUE, Buffer.from('hello'), 5) + const msg = new Message(Message.TYPES.GET_VALUE, Buffer.from('hello'), 5) - const conn = makeConnection(msg, peerInfos[1], (err, res) => { - expect(err).to.not.exist() - expect(res).to.have.length(1) - const msg = Message.deserialize(res[0]) - expect(msg).to.have.property('key').eql(Buffer.from('hello')) - expect(msg).to.have.property('closerPeers').eql([]) + const conn = makeConnection(msg, peerInfos[1], (err, res) => { + expect(err).to.not.exist() + expect(res).to.have.length(1) + const msg = Message.deserialize(res[0]) + expect(msg).to.have.property('key').eql(Buffer.from('hello')) + expect(msg).to.have.property('closerPeers').eql([]) - done() - }) + resolve() + }) - rpc(dht)('protocol', conn) + rpc(dht)('protocol', conn) + }) }) }) }) diff --git a/test/utils/create-disjoint-tracks.js b/test/utils/create-disjoint-tracks.js index 6dbdb7ef..96cfd7a4 100644 --- a/test/utils/create-disjoint-tracks.js +++ b/test/utils/create-disjoint-tracks.js @@ -2,32 +2,20 @@ const multihashing = require('multihashing-async') const distance = require('xor-distance') -const waterfall = require('async/waterfall') -const map = require('async/map') -function convertPeerId (peer, callback) { - multihashing.digest(peer.id, 'sha2-256', callback) +function convertPeerId (peer) { + return multihashing.digest(peer.id, 'sha2-256') } -function sortClosestPeers (peers, target, callback) { - map(peers, (peer, cb) => { - convertPeerId(peer, (err, id) => { - if (err) { - return cb(err) - } - - cb(null, { - peer: peer, - distance: distance(id, target) - }) - }) - }, (err, distances) => { - if (err) { - return callback(err) +async function sortClosestPeers (peers, target) { + const distances = await Promise.all(peers.map(async (peer) => { + const id = await convertPeerId(peer) + return { + peer: peer, + distance: distance(id, target) } - - callback(null, distances.sort(xorCompare).map((d) => d.peer)) - }) + })) + return distances.sort(xorCompare).map((d) => d.peer) } function xorCompare (a, b) { @@ -39,59 +27,57 @@ function xorCompare (a, b) { * "next", a successor function for the query to use. See comment * where this is called for details. */ -function createDisjointTracks (peerInfos, goodLength, callback) { +async function createDisjointTracks (peerInfos, goodLength) { const ids = peerInfos.map((info) => info.id) const us = ids[0] let target - waterfall([ - (cb) => convertPeerId(us, cb), - (ourId, cb) => { - sortClosestPeers(ids, ourId, cb) - }, - (sorted, cb) => { - target = sorted[sorted.length - 1] - sorted = sorted.slice(1) // remove our id - const goodTrack = sorted.slice(0, goodLength) - goodTrack.push(target) // push on target - const badTrack = sorted.slice(goodLength, -1) - if (badTrack.length <= goodTrack.length) { - return cb(new Error(`insufficient number of peers; good length: ${goodTrack.length}, bad length: ${badTrack.length}`)) - } - const tracks = [goodTrack, badTrack] // array of arrays of nodes + const ourId = await convertPeerId(us) + let sorted = await sortClosestPeers(ids, ourId) + target = sorted[sorted.length - 1] + sorted = sorted.slice(1) // remove our id + const goodTrack = sorted.slice(0, goodLength) + goodTrack.push(target) // push on target + const badTrack = sorted.slice(goodLength, -1) + if (badTrack.length <= goodTrack.length) { + throw new Error(`insufficient number of peers; good length: ${goodTrack.length}, bad length: ${badTrack.length}`) + } + const tracks = [goodTrack, badTrack] // array of arrays of nodes - const next = (peer, trackNum) => { - const track = tracks[trackNum] - const pos = track.indexOf(peer) - if (pos < 0) { - return null // peer not on expected track - } + const getResponse = (peer, trackNum) => { + const track = tracks[trackNum] + const pos = track.indexOf(peer) + if (pos < 0) { + return null // peer not on expected track + } - const nextPos = pos + 1 - // if we're at the end of the track - if (nextPos === track.length) { - if (trackNum === 0) { // good track; success - return { - end: true, - success: true - } - } else { // bad track; dead end - return { - end: true, - closerPeers: [] - } - } - } else { - const infoIdx = ids.indexOf(track[nextPos]) - return { - closerPeers: [peerInfos[infoIdx]] - } + const nextPos = pos + 1 + // if we're at the end of the track + if (nextPos === track.length) { + if (trackNum === 0) { // good track; success + return { + end: true, + success: true + } + } else { // bad track; dead end + return { + end: true, + closerPeers: [] } } - - cb(null, target.id, [goodTrack[0], badTrack[0]], next) + } else { + const infoIdx = ids.indexOf(track[nextPos]) + return { + closerPeers: [peerInfos[infoIdx]] + } } - ], callback) + } + + return { + targetId: target.id, + tracks: [goodTrack[0], badTrack[0]], + getResponse + } } module.exports = createDisjointTracks diff --git a/test/utils/create-peer-info.js b/test/utils/create-peer-info.js index 40973deb..8d1a6fcd 100644 --- a/test/utils/create-peer-info.js +++ b/test/utils/create-peer-info.js @@ -1,15 +1,14 @@ 'use strict' -const times = require('async/times') const PeerId = require('peer-id') const PeerInfo = require('peer-info') // Creates multiple PeerInfos -function createPeerInfo (n, callback) { - times(n, (i, cb) => PeerId.create({ bits: 512 }, cb), (err, ids) => { - if (err) { return callback(err) } - callback(null, ids.map((i) => new PeerInfo(i))) - }) +function createPeerInfo (n) { + return Promise.all([...new Array(n)].map(async () => { + const id = await PeerId.create({ bits: 512 }) + return new PeerInfo(id) + })) } module.exports = createPeerInfo diff --git a/test/utils/create-values.js b/test/utils/create-values.js index 2073615e..b6f59e86 100644 --- a/test/utils/create-values.js +++ b/test/utils/create-values.js @@ -1,20 +1,15 @@ 'use strict' -const times = require('async/times') const multihashing = require('multihashing-async') -const waterfall = require('async/waterfall') const CID = require('cids') const crypto = require('libp2p-crypto') function createValues (n, callback) { - times(n, (i, cb) => { + return Promise.all([...new Array(n)].map(async () => { const bytes = crypto.randomBytes(32) - - waterfall([ - (cb) => multihashing(bytes, 'sha2-256', cb), - (h, cb) => cb(null, { cid: new CID(h), value: bytes }) - ], cb) - }, callback) + const h = await multihashing(bytes, 'sha2-256') + return { cid: new CID(h), value: bytes } + })) } module.exports = createValues diff --git a/test/utils/test-dht.js b/test/utils/test-dht.js index a9498c27..df939fb0 100644 --- a/test/utils/test-dht.js +++ b/test/utils/test-dht.js @@ -1,13 +1,9 @@ 'use strict' -const each = require('async/each') -const series = require('async/series') -const setImmediate = require('async/setImmediate') const PeerBook = require('peer-book') const Switch = require('libp2p-switch') const TCP = require('libp2p-tcp') const Mplex = require('libp2p-mplex') -const times = require('async/times') const createPeerInfo = require('./create-peer-info') @@ -18,69 +14,45 @@ class TestDHT { this.nodes = [] } - spawn (n, options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } - - times(n, (i, cb) => this._spawnOne(options, cb), (err, dhts) => { - if (err) { return callback(err) } - callback(null, dhts) - }) + spawn (n, options) { + return Promise.all([...new Array(n)].map(() => this._spawnOne(options))) } - _spawnOne (options, callback) { - if (typeof options === 'function') { - callback = options - options = {} - } + async _spawnOne (options) { + const peers = await createPeerInfo(1) - createPeerInfo(1, (err, peers) => { - if (err) { return callback(err) } + const p = peers[0] + p.multiaddrs.add('/ip4/127.0.0.1/tcp/0') - const p = peers[0] - p.multiaddrs.add('/ip4/127.0.0.1/tcp/0') + const sw = new Switch(p, new PeerBook()) + sw.transport.add('tcp', new TCP()) + sw.connection.addStreamMuxer(Mplex) + sw.connection.reuse() - const sw = new Switch(p, new PeerBook()) - sw.transport.add('tcp', new TCP()) - sw.connection.addStreamMuxer(Mplex) - sw.connection.reuse() + const dht = new KadDHT(sw, options) - const dht = new KadDHT(sw, options) + dht.validators.v = { + func (key, publicKey) {}, + sign: false + } - dht.validators.v = { - func (key, publicKey, callback) { - setImmediate(callback) - }, - sign: false - } + dht.validators.v2 = dht.validators.v // added to simulate just validators available - dht.validators.v2 = dht.validators.v // added to simulate just validators available + dht.selectors.v = (k, records) => 0 - dht.selectors.v = (k, records) => 0 + await new Promise((resolve, reject) => sw.start((err) => err ? reject(err) : resolve())) + await dht.start() - series([ - (cb) => sw.start(cb), - (cb) => dht.start(cb) - ], (err) => { - if (err) { return callback(err) } - this.nodes.push(dht) - callback(null, dht) - }) - }) + this.nodes.push(dht) + return dht } - teardown (callback) { - each(this.nodes, (n, cb) => { - series([ - (cb) => n.stop(cb), - (cb) => n.switch.stop(cb) - ], cb) - }, (err) => { - this.nodes = [] - callback(err) - }) + async teardown () { + await Promise.all(this.nodes.map(async (n) => { + await n.stop() + return new Promise((resolve, reject) => n.switch.stop((err) => err ? reject(err) : resolve())) + })) + this.nodes = [] } } diff --git a/test/worker-queue.spec.js b/test/worker-queue.spec.js new file mode 100644 index 00000000..67147b95 --- /dev/null +++ b/test/worker-queue.spec.js @@ -0,0 +1,127 @@ +/* eslint-env mocha */ +'use strict' + +const chai = require('chai') +chai.use(require('dirty-chai')) +const expect = chai.expect + +const WorkerQueue = require('../src/worker-queue') + +describe('WorkerQueue', () => { + it('basics', async () => { + const items = [1, 2, 3, 4] + const taskQueue = { + dequeue () { + return items.shift() + }, + get length () { + return items.length + } + } + + const processTask = (q, task) => null + const wq = new WorkerQueue(taskQueue, processTask) + + await wq.onComplete() + + expect(items.length).to.be.eql(0) + }) + + it('can stop before completion', async () => { + const items = [1, 2, 3, 4] + const taskQueue = { + dequeue () { + return new Promise((resolve) => { + const item = items.shift() + setTimeout(() => resolve(item), 100) + }) + }, + get length () { + return items.length + } + } + + const processTask = async (q, task) => { + await task + } + const wq = new WorkerQueue(taskQueue, processTask) + + // Tasks take 100ms to process, so if we stop after 200ms + // there should be at least one left in the queue + setTimeout(() => wq.stop(), 200) + + await wq.onComplete() + expect(items.length).to.be.gte(1) + }) + + it('can run multiple tasks concurrently', async () => { + const items = [1, 2, 3, 4, 5, 6, 7, 8, 9] + const taskQueue = { + dequeue () { + return new Promise((resolve) => { + const item = items.shift() + setTimeout(() => resolve(item), 0) + }) + }, + get length () { + return items.length + } + } + + const processTask = async (q, task) => { + await task + } + const wq = new WorkerQueue(taskQueue, processTask, { + concurrency: 4 + }) + + await wq.onComplete() + expect(items.length).to.be.eql(0) + }) + + it('can return true to stop before completion', async () => { + const items = [1, 2, 3, 4] + const taskQueue = { + dequeue () { + return items.shift() + }, + get length () { + return items.length + } + } + + const processTask = async (q, task) => { + return (await task) === 2 + } + const wq = new WorkerQueue(taskQueue, processTask) + + await wq.onComplete() + expect(items.length).to.be.eql(2) + }) + + it('error thrown from task is thrown from onComplete()', async () => { + const items = [1, 2, 3, 4] + const taskQueue = { + dequeue () { + return items.shift() + }, + get length () { + return items.length + } + } + + const processTask = async (q, task) => { + if ((await task) === 2) { + throw new Error('Bad task') + } + } + const wq = new WorkerQueue(taskQueue, processTask) + + try { + await wq.onComplete() + } catch (err) { + expect(err.message).to.be.eql('Bad task') + } + expect(items.length).to.be.eql(2) + }) +})