diff --git a/.eslintrc b/.eslintrc index 5b6e8f7532..bc8d334e5e 100644 --- a/.eslintrc +++ b/.eslintrc @@ -111,6 +111,7 @@ "@typescript-eslint/no-misused-promises": ["error", { "checksVoidReturn": false }], + "@typescript-eslint/await-thenable": ["error"], "@typescript-eslint/naming-convention": [ "error", { diff --git a/package-lock.json b/package-lock.json index ed112e333e..d0a5a87658 100644 --- a/package-lock.json +++ b/package-lock.json @@ -55,7 +55,7 @@ "@types/jest": "^27.0.2", "@types/nexpect": "^0.4.31", "@types/node": "^16.11.7", - "@types/node-forge": "^0.9.7", + "@types/node-forge": "^0.10.4", "@types/pako": "^1.0.2", "@types/prompts": "^2.0.13", "@types/readable-stream": "^2.3.11", @@ -2647,9 +2647,10 @@ "license": "MIT" }, "node_modules/@types/node-forge": { - "version": "0.9.10", + "version": "0.10.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.10.10.tgz", + "integrity": "sha512-iixn5bedlE9fm/5mN7fPpXraXlxCVrnNWHZekys8c5fknridLVWGnNRqlaWpenwaijIuB3bNI0lEOm+JD6hZUA==", "dev": true, - "license": "MIT", "dependencies": { "@types/node": "*" } @@ -3374,6 +3375,7 @@ }, "node_modules/balanced-match": { "version": "1.0.2", + "dev": true, "license": "MIT" }, "node_modules/base64-js": { @@ -3454,6 +3456,7 @@ }, "node_modules/brace-expansion": { "version": "1.1.11", + "dev": true, "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", @@ -3774,6 +3777,7 @@ }, "node_modules/concat-map": { "version": "0.0.1", + "dev": true, "license": "MIT" }, "node_modules/console-control-strings": { @@ -5174,6 +5178,7 @@ }, "node_modules/fs.realpath": { "version": "1.0.0", + "dev": true, "license": "ISC" }, "node_modules/function-bind": { @@ -5309,6 +5314,7 @@ }, "node_modules/glob": { "version": "7.2.0", + "dev": true, "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", @@ -5645,6 +5651,7 @@ }, "node_modules/inflight": { "version": "1.0.6", + "dev": true, "license": "ISC", "dependencies": { "once": "^1.3.0", @@ -8103,6 +8110,7 @@ }, "node_modules/minimatch": { "version": "3.1.2", + "dev": true, "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" @@ -8628,6 +8636,7 @@ }, "node_modules/path-is-absolute": { "version": "1.0.1", + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -9372,6 +9381,7 @@ }, "node_modules/rimraf": { "version": "3.0.2", + "dev": true, "license": "ISC", "dependencies": { "glob": "^7.1.3" @@ -12455,7 +12465,9 @@ "version": "16.11.35" }, "@types/node-forge": { - "version": "0.9.10", + "version": "0.10.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.10.10.tgz", + "integrity": "sha512-iixn5bedlE9fm/5mN7fPpXraXlxCVrnNWHZekys8c5fknridLVWGnNRqlaWpenwaijIuB3bNI0lEOm+JD6hZUA==", "dev": true, "requires": { "@types/node": "*" @@ -12918,7 +12930,8 @@ } }, "balanced-match": { - "version": "1.0.2" + "version": "1.0.2", + "dev": true }, "base64-js": { "version": "1.5.1" @@ -12964,6 +12977,7 @@ }, "brace-expansion": { "version": "1.1.11", + "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -13156,7 +13170,8 @@ "version": "8.3.0" }, "concat-map": { - "version": "0.0.1" + "version": "0.0.1", + "dev": true }, "console-control-strings": { "version": "1.1.0", @@ -14086,7 +14101,8 @@ } }, "fs.realpath": { - "version": "1.0.0" + "version": "1.0.0", + "dev": true }, "function-bind": { "version": "1.1.1" @@ -14170,6 +14186,7 @@ }, "glob": { "version": "7.2.0", + "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -14363,6 +14380,7 @@ }, "inflight": { "version": "1.0.6", + "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -15912,6 +15930,7 @@ }, "minimatch": { "version": "3.1.2", + "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -16235,7 +16254,8 @@ "dev": true }, "path-is-absolute": { - "version": "1.0.1" + "version": "1.0.1", + "dev": true }, "path-key": { "version": "3.1.1" @@ -16694,6 +16714,7 @@ }, "rimraf": { "version": "3.0.2", + "dev": true, "requires": { "glob": "^7.1.3" } diff --git a/package.json b/package.json index 292dff2e19..83a5806a29 100644 --- a/package.json +++ b/package.json @@ -116,7 +116,7 @@ "@types/jest": "^27.0.2", "@types/nexpect": "^0.4.31", "@types/node": "^16.11.7", - "@types/node-forge": "^0.9.7", + "@types/node-forge": "^0.10.4", "@types/pako": "^1.0.2", "@types/prompts": "^2.0.13", "@types/readable-stream": "^2.3.11", diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 68e3b55718..e3f033c710 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -1,6 +1,6 @@ import type { FileSystem } from './types'; import type { PolykeyWorkerManagerInterface } from './workers/types'; -import type { Host, Port } from './network/types'; +import type { ConnectionData, Host, Port } from './network/types'; import type { SeedNodes } from './nodes/types'; import type { KeyManagerChangeData } from './keys/types'; import path from 'path'; @@ -8,6 +8,8 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; +import Queue from './nodes/Queue'; +import * as networkUtils from './network/utils'; import KeyManager from './keys/KeyManager'; import Status from './status/Status'; import Schema from './schema/Schema'; @@ -59,8 +61,10 @@ class PolykeyAgent { */ public static readonly eventSymbols = { [KeyManager.name]: Symbol(KeyManager.name), + [Proxy.name]: Symbol(Proxy.name), } as { readonly KeyManager: unique symbol; + readonly Proxy: unique symbol; }; public static async createPolykeyAgent({ @@ -84,6 +88,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -129,6 +134,7 @@ class PolykeyAgent { gestaltGraph?: GestaltGraph; proxy?: Proxy; nodeGraph?: NodeGraph; + queue?: Queue; nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; @@ -266,6 +272,8 @@ class PolykeyAgent { proxy ?? new Proxy({ ...proxyConfig_, + connectionEstablishedCallback: (data) => + events.emitAsync(PolykeyAgent.eventSymbols.Proxy, data), logger: logger.getChild(Proxy.name), }); nodeGraph = @@ -276,12 +284,18 @@ class PolykeyAgent { keyManager, logger: logger.getChild(NodeGraph.name), })); + queue = + queue ?? + new Queue({ + logger: logger.getChild(Queue.name), + }); nodeConnectionManager = nodeConnectionManager ?? new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, ...nodeConnectionManagerConfig_, logger: logger.getChild(NodeConnectionManager.name), @@ -294,8 +308,10 @@ class PolykeyAgent { keyManager, nodeGraph, nodeConnectionManager, + queue, logger: logger.getChild(NodeManager.name), }); + await nodeManager.start(); discovery = discovery ?? (await Discovery.createDiscovery({ @@ -379,6 +395,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -411,6 +428,7 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly proxy: Proxy; public readonly nodeGraph: NodeGraph; + public readonly queue: Queue; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; @@ -435,6 +453,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -458,6 +477,7 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; proxy: Proxy; nodeGraph: NodeGraph; + queue: Queue; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; @@ -483,6 +503,7 @@ class PolykeyAgent { this.proxy = proxy; this.discovery = discovery; this.nodeGraph = nodeGraph; + this.queue = queue; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.vaultManager = vaultManager; @@ -530,7 +551,7 @@ class PolykeyAgent { await this.status.updateStatusLive({ nodeId: data.nodeId, }); - await this.nodeManager.refreshBuckets(); + await this.nodeManager.resetBuckets(); const tlsConfig = { keyPrivatePem: keysUtils.privateKeyToPem( data.rootKeyPair.privateKey, @@ -542,6 +563,31 @@ class PolykeyAgent { this.logger.info(`${KeyManager.name} change propagated`); }, ); + this.events.on( + PolykeyAgent.eventSymbols.Proxy, + async (data: ConnectionData) => { + if (data.type === 'reverse') { + const address = networkUtils.buildAddress( + data.remoteHost, + data.remotePort, + ); + const nodeIdEncoded = nodesUtils.encodeNodeId(data.remoteNodeId); + this.logger.info( + `Reverse connection adding ${nodeIdEncoded}:${address} to ${NodeGraph.name}`, + ); + // Reverse connection was established and authenticated, + // add it to the node graph + await this.nodeManager.setNode( + data.remoteNodeId, + { + host: data.remoteHost, + port: data.remotePort, + }, + false, + ); + } + }, + ); const networkConfig_ = { ...config.defaults.networkConfig, ...utils.filterEmptyObject(networkConfig), @@ -620,9 +666,11 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); - await this.nodeConnectionManager.start(); + await this.queue.start(); + await this.nodeManager.start(); + await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); - await this.nodeConnectionManager.syncNodeGraph(); + await this.nodeConnectionManager.syncNodeGraph(false); await this.discovery.start({ fresh }); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); @@ -642,6 +690,7 @@ class PolykeyAgent { this.logger.info(`Started ${this.constructor.name}`); } catch (e) { this.logger.warn(`Failed Starting ${this.constructor.name}`); + this.events.removeAllListeners(); await this.status?.beginStop({ pid: process.pid }); await this.sessionManager?.stop(); await this.notificationsManager?.stop(); @@ -658,7 +707,6 @@ class PolykeyAgent { await this.keyManager?.stop(); await this.schema?.stop(); await this.status?.stop({}); - this.events.removeAllListeners(); throw e; } } @@ -668,6 +716,7 @@ class PolykeyAgent { */ public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); + this.events.removeAllListeners(); await this.status.beginStop({ pid: process.pid }); await this.sessionManager.stop(); await this.notificationsManager.stop(); @@ -675,6 +724,8 @@ class PolykeyAgent { await this.discovery.stop(); await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); + await this.nodeManager.stop(); + await this.queue.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); @@ -686,7 +737,6 @@ class PolykeyAgent { await this.keyManager.stop(); await this.schema.stop(); await this.status.stop({}); - this.events.removeAllListeners(); this.logger.info(`Stopped ${this.constructor.name}`); } diff --git a/src/PolykeyClient.ts b/src/PolykeyClient.ts index b124feefae..bea2b830b6 100644 --- a/src/PolykeyClient.ts +++ b/src/PolykeyClient.ts @@ -1,4 +1,4 @@ -import type { FileSystem } from './types'; +import type { FileSystem, Timer } from './types'; import type { NodeId } from './nodes/types'; import type { Host, Port } from './network/types'; @@ -29,7 +29,7 @@ class PolykeyClient { nodePath = config.defaults.nodePath, session, grpcClient, - timeout, + timer, fs = require('fs'), logger = new Logger(this.name), fresh = false, @@ -38,7 +38,7 @@ class PolykeyClient { host: Host; port: Port; nodePath?: string; - timeout?: number; + timer?: Timer; session?: Session; grpcClient?: GRPCClientClient; fs?: FileSystem; @@ -66,7 +66,7 @@ class PolykeyClient { port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, session, - timeout, + timer, logger: logger.getChild(GRPCClientClient.name), })); const pkClient = new PolykeyClient({ diff --git a/src/agent/GRPCClientAgent.ts b/src/agent/GRPCClientAgent.ts index bb45937855..db94979db7 100644 --- a/src/agent/GRPCClientAgent.ts +++ b/src/agent/GRPCClientAgent.ts @@ -10,6 +10,7 @@ import type * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import type * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; import type * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; import type * as notificationsPB from '../proto/js/polykey/v1/notifications/notifications_pb'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as agentErrors from './errors'; @@ -32,7 +33,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -41,7 +42,7 @@ class GRPCClientAgent extends GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -53,7 +54,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, logger, }); const grpcClientAgent = new GRPCClientAgent({ diff --git a/src/agent/service/nodesClosestLocalNodesGet.ts b/src/agent/service/nodesClosestLocalNodesGet.ts index 5c2c0e2049..36a172b125 100644 --- a/src/agent/service/nodesClosestLocalNodesGet.ts +++ b/src/agent/service/nodesClosestLocalNodesGet.ts @@ -1,6 +1,6 @@ import type * as grpc from '@grpc/grpc-js'; +import type { NodeGraph } from '../../nodes'; import type { DB } from '@matrixai/db'; -import type NodeConnectionManager from '../../nodes/NodeConnectionManager'; import type { NodeId } from '../../nodes/types'; import type Logger from '@matrixai/logger'; import * as grpcUtils from '../../grpc/utils'; @@ -16,11 +16,11 @@ import * as agentUtils from '../utils'; * to some provided node ID. */ function nodesClosestLocalNodesGet({ - nodeConnectionManager, + nodeGraph, db, logger, }: { - nodeConnectionManager: NodeConnectionManager; + nodeGraph: NodeGraph; db: DB; logger: Logger; }) { @@ -48,20 +48,16 @@ function nodesClosestLocalNodesGet({ // Get all local nodes that are closest to the target node from the request const closestNodes = await db.withTransactionF( async (tran) => - await nodeConnectionManager.getClosestLocalNodes( - nodeId, - undefined, - tran, - ), + await nodeGraph.getClosestNodes(nodeId, undefined, tran), ); - for (const node of closestNodes) { + for (const [nodeId, nodeData] of closestNodes) { const addressMessage = new nodesPB.Address(); - addressMessage.setHost(node.address.host); - addressMessage.setPort(node.address.port); + addressMessage.setHost(nodeData.address.host); + addressMessage.setPort(nodeData.address.port); // Add the node to the response's map (mapping of node ID -> node address) response .getNodeTableMap() - .set(nodesUtils.encodeNodeId(node.id), addressMessage); + .set(nodesUtils.encodeNodeId(nodeId), addressMessage); } callback(null, response); return; diff --git a/src/bin/nodes/CommandAdd.ts b/src/bin/nodes/CommandAdd.ts index fdf49f48e8..49ea3105a3 100644 --- a/src/bin/nodes/CommandAdd.ts +++ b/src/bin/nodes/CommandAdd.ts @@ -18,6 +18,8 @@ class CommandAdd extends CommandPolykey { this.addOption(binOptions.nodeId); this.addOption(binOptions.clientHost); this.addOption(binOptions.clientPort); + this.addOption(binOptions.forceNodeAdd); + this.addOption(binOptions.noPing); this.action(async (nodeId: NodeId, host: Host, port: Port, options) => { const { default: PolykeyClient } = await import('../../PolykeyClient'); const nodesUtils = await import('../../nodes/utils'); @@ -46,13 +48,15 @@ class CommandAdd extends CommandPolykey { port: clientOptions.clientPort, logger: this.logger.getChild(PolykeyClient.name), }); - const nodeAddressMessage = new nodesPB.NodeAddress(); - nodeAddressMessage.setNodeId(nodesUtils.encodeNodeId(nodeId)); - nodeAddressMessage.setAddress( + const nodeAddMessage = new nodesPB.NodeAdd(); + nodeAddMessage.setNodeId(nodesUtils.encodeNodeId(nodeId)); + nodeAddMessage.setAddress( new nodesPB.Address().setHost(host).setPort(port), ); + nodeAddMessage.setForce(options.force); + nodeAddMessage.setPing(options.ping); await binUtils.retryAuthentication( - (auth) => pkClient.grpcClient.nodesAdd(nodeAddressMessage, auth), + (auth) => pkClient.grpcClient.nodesAdd(nodeAddMessage, auth), meta, ); } finally { diff --git a/src/bin/nodes/CommandGetAll.ts b/src/bin/nodes/CommandGetAll.ts new file mode 100644 index 0000000000..243991fc98 --- /dev/null +++ b/src/bin/nodes/CommandGetAll.ts @@ -0,0 +1,77 @@ +import type PolykeyClient from '../../PolykeyClient'; +import type nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; +import CommandPolykey from '../CommandPolykey'; +import * as binUtils from '../utils'; +import * as binOptions from '../utils/options'; +import * as binProcessors from '../utils/processors'; + +class CommandGetAll extends CommandPolykey { + constructor(...args: ConstructorParameters) { + super(...args); + this.name('getall'); + this.description('Get all Nodes from Node Graph'); + this.addOption(binOptions.nodeId); + this.addOption(binOptions.clientHost); + this.addOption(binOptions.clientPort); + this.action(async (options) => { + const { default: PolykeyClient } = await import('../../PolykeyClient'); + const utilsPB = await import('../../proto/js/polykey/v1/utils/utils_pb'); + + const clientOptions = await binProcessors.processClientOptions( + options.nodePath, + options.nodeId, + options.clientHost, + options.clientPort, + this.fs, + this.logger.getChild(binProcessors.processClientOptions.name), + ); + const meta = await binProcessors.processAuthentication( + options.passwordFile, + this.fs, + ); + let pkClient: PolykeyClient; + this.exitHandlers.handlers.push(async () => { + if (pkClient != null) await pkClient.stop(); + }); + let result: nodesPB.NodeBuckets; + try { + pkClient = await PolykeyClient.createPolykeyClient({ + nodePath: options.nodePath, + nodeId: clientOptions.nodeId, + host: clientOptions.clientHost, + port: clientOptions.clientPort, + logger: this.logger.getChild(PolykeyClient.name), + }); + const emptyMessage = new utilsPB.EmptyMessage(); + result = await binUtils.retryAuthentication( + (auth) => pkClient.grpcClient.nodesGetAll(emptyMessage, auth), + meta, + ); + let output: any = {}; + for (const [bucketIndex, bucket] of result.getBucketsMap().entries()) { + output[bucketIndex] = {}; + for (const [encodedId, address] of bucket + .getNodeTableMap() + .entries()) { + output[bucketIndex][encodedId] = {}; + output[bucketIndex][encodedId].host = address.getHost(); + output[bucketIndex][encodedId].port = address.getPort(); + } + } + if (options.format === 'human') { + output = [result.getBucketsMap().getEntryList()]; + } + process.stdout.write( + binUtils.outputFormatter({ + type: options.format === 'json' ? 'json' : 'list', + data: output, + }), + ); + } finally { + if (pkClient! != null) await pkClient.stop(); + } + }); + } +} + +export default CommandGetAll; diff --git a/src/bin/nodes/CommandNodes.ts b/src/bin/nodes/CommandNodes.ts index 6827d01f32..0866a088fa 100644 --- a/src/bin/nodes/CommandNodes.ts +++ b/src/bin/nodes/CommandNodes.ts @@ -2,6 +2,7 @@ import CommandAdd from './CommandAdd'; import CommandClaim from './CommandClaim'; import CommandFind from './CommandFind'; import CommandPing from './CommandPing'; +import CommandGetAll from './CommandGetAll'; import CommandPolykey from '../CommandPolykey'; class CommandNodes extends CommandPolykey { @@ -13,6 +14,7 @@ class CommandNodes extends CommandPolykey { this.addCommand(new CommandClaim(...args)); this.addCommand(new CommandFind(...args)); this.addCommand(new CommandPing(...args)); + this.addCommand(new CommandGetAll(...args)); } } diff --git a/src/bin/utils/options.ts b/src/bin/utils/options.ts index bed18d65af..f2da17b8ca 100644 --- a/src/bin/utils/options.ts +++ b/src/bin/utils/options.ts @@ -154,6 +154,15 @@ const pullVault = new commander.Option( 'Name or Id of the vault to pull from', ); +const forceNodeAdd = new commander.Option( + '--force', + 'Force adding node to nodeGraph', +).default(false); + +const noPing = new commander.Option('--no-ping', 'Skip ping step').default( + true, +); + export { nodePath, format, @@ -176,4 +185,6 @@ export { network, workers, pullVault, + forceNodeAdd, + noPing, }; diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 422709b01e..60844fc197 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -4,6 +4,7 @@ import path from 'path'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import * as bootstrapErrors from './errors'; +import Queue from '../nodes/Queue'; import { IdentitiesManager } from '../identities'; import { SessionManager } from '../sessions'; import { Status } from '../status'; @@ -141,10 +142,12 @@ async function bootstrapState({ keyManager, logger: logger.getChild(NodeGraph.name), }); + const queue = new Queue({ logger }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger: logger.getChild(NodeConnectionManager.name), }); const nodeManager = new NodeManager({ @@ -153,6 +156,7 @@ async function bootstrapState({ nodeGraph, nodeConnectionManager, sigchain, + queue, logger: logger.getChild(NodeManager.name), }); const notificationsManager = diff --git a/src/claims/utils.ts b/src/claims/utils.ts index faee8ea4b9..ea5ecf15d0 100644 --- a/src/claims/utils.ts +++ b/src/claims/utils.ts @@ -62,7 +62,7 @@ async function createClaim({ const byteEncoder = new TextEncoder(); const claim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); claim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await claim.sign(); return signedClaim as ClaimEncoded; @@ -83,14 +83,14 @@ async function signExistingClaim({ kid: NodeIdEncoded; alg?: string; }): Promise { - const decodedClaim = await decodeClaim(claim); + const decodedClaim = decodeClaim(claim); // Reconstruct the claim with our own signature // Make the payload contents deterministic const canonicalizedPayload = canonicalize(decodedClaim.payload); const byteEncoder = new TextEncoder(); const newClaim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); newClaim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await newClaim.sign(); // Add our signature to the existing claim diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index c69d58d89e..8b98165363 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -3,7 +3,7 @@ import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; import type { AsyncGeneratorReadableStreamClient } from '../grpc/types'; import type { Session } from '../sessions'; import type { NodeId } from '../nodes/types'; -import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; +import type { Host, Port, ProxyConfig, TLSConfig } from '../network/types'; import type * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import type * as agentPB from '../proto/js/polykey/v1/agent/agent_pb'; import type * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; @@ -14,6 +14,7 @@ import type * as identitiesPB from '../proto/js/polykey/v1/identities/identities import type * as keysPB from '../proto/js/polykey/v1/keys/keys_pb'; import type * as permissionsPB from '../proto/js/polykey/v1/permissions/permissions_pb'; import type * as secretsPB from '../proto/js/polykey/v1/secrets/secrets_pb'; +import type { Timer } from '../types'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import Logger from '@matrixai/logger'; import * as clientErrors from './errors'; @@ -38,7 +39,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -48,7 +49,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig?: Partial; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -64,11 +65,11 @@ class GRPCClientClient extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); - const grpcClientClient = new GRPCClientClient({ + return new GRPCClientClient({ client, nodeId, host, @@ -80,7 +81,6 @@ class GRPCClientClient extends GRPCClient { destroyCallback, logger, }); - return grpcClientClient; } public async destroy() { @@ -901,6 +901,20 @@ class GRPCClientClient extends GRPCClient { )(...args); } + @ready(new clientErrors.ErrorClientClientDestroyed()) + public nodesGetAll(...args) { + return grpcUtils.promisifyUnaryCall( + this.client, + { + nodeId: this.nodeId, + host: this.host, + port: this.port, + command: this.identitiesAuthenticate.name, + }, + this.client.nodesGetAll, + )(...args); + } + @ready(new clientErrors.ErrorClientClientDestroyed()) public identitiesAuthenticate(...args) { return grpcUtils.promisifyReadableStreamCall( diff --git a/src/client/service/index.ts b/src/client/service/index.ts index 1e74eb9d84..d6b1dff6f8 100644 --- a/src/client/service/index.ts +++ b/src/client/service/index.ts @@ -59,6 +59,7 @@ import nodesAdd from './nodesAdd'; import nodesClaim from './nodesClaim'; import nodesFind from './nodesFind'; import nodesPing from './nodesPing'; +import nodesGetAll from './nodesGetAll'; import notificationsClear from './notificationsClear'; import notificationsRead from './notificationsRead'; import notificationsSend from './notificationsSend'; @@ -165,6 +166,7 @@ function createService({ nodesClaim: nodesClaim(container), nodesFind: nodesFind(container), nodesPing: nodesPing(container), + nodesGetAll: nodesGetAll(container), notificationsClear: notificationsClear(container), notificationsRead: notificationsRead(container), notificationsSend: notificationsSend(container), diff --git a/src/client/service/nodesAdd.ts b/src/client/service/nodesAdd.ts index 079d2eee2d..3b6043219c 100644 --- a/src/client/service/nodesAdd.ts +++ b/src/client/service/nodesAdd.ts @@ -6,6 +6,7 @@ import type { NodeId, NodeAddress } from '../../nodes/types'; import type { Host, Hostname, Port } from '../../network/types'; import type * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; import type Logger from '@matrixai/logger'; +import * as nodeErrors from '../../nodes/errors'; import * as grpcUtils from '../../grpc/utils'; import { validateSync } from '../../validation'; import * as validationUtils from '../../validation/utils'; @@ -14,7 +15,7 @@ import * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '../utils'; /** - * Adds a node ID -> node address mapping into the buckets database. + * Adds a node ID -> node address mapping into the buckets' database. * This is an unrestricted add: no validity checks are made for the correctness * of the passed ID or host/port. */ @@ -30,12 +31,13 @@ function nodesAdd({ logger: Logger; }) { return async ( - call: grpc.ServerUnaryCall, + call: grpc.ServerUnaryCall, callback: grpc.sendUnaryData, ): Promise => { try { const response = new utilsPB.EmptyMessage(); const metadata = await authenticate(call.metadata); + const request = call.request; call.sendMetadata(metadata); const { nodeId, @@ -55,11 +57,21 @@ function nodesAdd({ ); }, { - nodeId: call.request.getNodeId(), - host: call.request.getAddress()?.getHost(), - port: call.request.getAddress()?.getPort(), + nodeId: request.getNodeId(), + host: request.getAddress()?.getHost(), + port: request.getAddress()?.getPort(), }, ); + // Pinging to authenticate the node + if ( + request.getPing() && + !(await nodeManager.pingNode(nodeId, { host, port })) + ) { + throw new nodeErrors.ErrorNodePingFailed( + 'Failed to authenticate target node', + ); + } + await db.withTransactionF(async (tran) => nodeManager.setNode( nodeId, @@ -67,6 +79,9 @@ function nodesAdd({ host, port, } as NodeAddress, + true, + request.getForce(), + undefined, tran, ), ); diff --git a/src/client/service/nodesFind.ts b/src/client/service/nodesFind.ts index 6c2061719c..324e1c0e9f 100644 --- a/src/client/service/nodesFind.ts +++ b/src/client/service/nodesFind.ts @@ -50,6 +50,7 @@ function nodesFind({ }, ); const address = await nodeConnectionManager.findNode(nodeId); + if (address == null) throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); response .setNodeId(nodesUtils.encodeNodeId(nodeId)) .setAddress( diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts new file mode 100644 index 0000000000..8c021a2482 --- /dev/null +++ b/src/client/service/nodesGetAll.ts @@ -0,0 +1,70 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { Authenticate } from '../types'; +import type KeyManager from '../../keys/KeyManager'; +import type { NodeId } from '../../nodes/types'; +import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; +import type NodeGraph from '../../nodes/NodeGraph'; +import { IdInternal } from '@matrixai/id'; +import { utils as nodesUtils } from '../../nodes'; +import { utils as grpcUtils } from '../../grpc'; +import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; + +/** + * Retrieves all nodes from all buckets in the NodeGraph. + */ +function nodesGetAll({ + nodeGraph, + keyManager, + authenticate, +}: { + nodeGraph: NodeGraph; + keyManager: KeyManager; + authenticate: Authenticate; +}) { + return async ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData, + ): Promise => { + try { + const response = new nodesPB.NodeBuckets(); + const metadata = await authenticate(call.metadata); + call.sendMetadata(metadata); + const buckets = nodeGraph.getBuckets(); + for await (const b of buckets) { + let index; + for (const id of Object.keys(b)) { + const encodedId = nodesUtils.encodeNodeId( + IdInternal.fromString(id), + ); + const address = new nodesPB.Address() + .setHost(b[id].address.host) + .setPort(b[id].address.port); + // For every node in every bucket, add it to our message + if (!index) { + index = nodesUtils.bucketIndex( + keyManager.getNodeId(), + IdInternal.fromString(id), + ); + } + // Need to either add node to an existing bucket, or create a new + // bucket (if it doesn't exist) + const bucket = response.getBucketsMap().get(index); + if (bucket) { + bucket.getNodeTableMap().set(encodedId, address); + } else { + const newBucket = new nodesPB.NodeTable(); + newBucket.getNodeTableMap().set(encodedId, address); + response.getBucketsMap().set(index, newBucket); + } + } + } + callback(null, response); + return; + } catch (e) { + callback(grpcUtils.fromError(e)); + return; + } + }; +} + +export default nodesGetAll; diff --git a/src/grpc/GRPCClient.ts b/src/grpc/GRPCClient.ts index 4e88291a10..0434a17532 100644 --- a/src/grpc/GRPCClient.ts +++ b/src/grpc/GRPCClient.ts @@ -9,6 +9,7 @@ import type { import type { NodeId } from '../nodes/types'; import type { Certificate } from '../keys/types'; import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; +import type { Timer } from '../types'; import http2 from 'http2'; import Logger from '@matrixai/logger'; import * as grpc from '@grpc/grpc-js'; @@ -44,7 +45,7 @@ abstract class GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, interceptors = [], logger = new Logger(this.name), }: { @@ -58,7 +59,7 @@ abstract class GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; interceptors?: Array; logger?: Logger; }): Promise<{ @@ -123,9 +124,17 @@ abstract class GRPCClient { } const waitForReady = promisify(client.waitForReady).bind(client); // Add the current unix time because grpc expects the milliseconds since unix epoch - timeout += Date.now(); try { - await waitForReady(timeout); + if (timer != null) { + await Promise.race([timer.timerP, waitForReady(Infinity)]); + // If the timer resolves first we throw a timeout error + if (timer?.timedOut === true) { + throw new grpcErrors.ErrorGRPCClientTimeout(); + } + } else { + // No timer given so we wait forever + await waitForReady(Infinity); + } } catch (e) { // If we fail here then we leak the client object... client.close(); diff --git a/src/identities/providers/github/GitHubProvider.ts b/src/identities/providers/github/GitHubProvider.ts index 8fd0a79fe8..bfbce77661 100644 --- a/src/identities/providers/github/GitHubProvider.ts +++ b/src/identities/providers/github/GitHubProvider.ts @@ -514,7 +514,7 @@ class GitHubProvider extends Provider { ); } const data = await response.text(); - const claimIds = await this.extractClaimIds(data); + const claimIds = this.extractClaimIds(data); for (const claimId of claimIds) { const claim = await this.getClaim(authIdentityId, claimId); if (claim != null) { diff --git a/src/keys/utils.ts b/src/keys/utils.ts index e36849f479..14b82a92dd 100644 --- a/src/keys/utils.ts +++ b/src/keys/utils.ts @@ -508,7 +508,16 @@ function publicKeyBitSize(publicKey: PublicKey): number { } async function getRandomBytes(size: number): Promise { - return Buffer.from(await random.getBytes(size), 'binary'); + const p = new Promise((resolve, reject) => { + random.getBytes(size, (e, bytes) => { + if (e != null) { + reject(e); + } else { + resolve(bytes); + } + }); + }); + return Buffer.from(await p, 'binary'); } function getRandomBytesSync(size: number): Buffer { diff --git a/src/network/Proxy.ts b/src/network/Proxy.ts index 1a6ff46f16..973c7f525a 100644 --- a/src/network/Proxy.ts +++ b/src/network/Proxy.ts @@ -1,5 +1,12 @@ import type { AddressInfo, Socket } from 'net'; -import type { Host, Port, Address, ConnectionInfo, TLSConfig } from './types'; +import type { + Host, + Port, + Address, + ConnectionInfo, + TLSConfig, + ConnectionEstablishedCallback, +} from './types'; import type { ConnectionsForward } from './ConnectionForward'; import type { NodeId } from '../nodes/types'; import type { Timer } from '../types'; @@ -48,6 +55,7 @@ class Proxy { proxy: new Map(), reverse: new Map(), }; + protected connectionEstablishedCallback: ConnectionEstablishedCallback; constructor({ authToken, @@ -56,6 +64,7 @@ class Proxy { connEndTime = 1000, connPunchIntervalTime = 1000, connKeepAliveIntervalTime = 1000, + connectionEstablishedCallback = () => {}, logger, }: { authToken: string; @@ -64,6 +73,7 @@ class Proxy { connEndTime?: number; connPunchIntervalTime?: number; connKeepAliveIntervalTime?: number; + connectionEstablishedCallback?: ConnectionEstablishedCallback; logger?: Logger; }) { this.logger = logger ?? new Logger(Proxy.name); @@ -77,6 +87,7 @@ class Proxy { this.server = http.createServer(); this.server.on('request', this.handleRequest); this.server.on('connect', this.handleConnectForward); + this.connectionEstablishedCallback = connectionEstablishedCallback; this.logger.info(`Created ${Proxy.name}`); } @@ -521,6 +532,14 @@ class Proxy { timer, ); conn.compose(clientSocket); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getServerNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'forward', + }); } protected async establishConnectionForward( @@ -687,6 +706,14 @@ class Proxy { timer, ); await conn.compose(utpConn, timer); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getClientNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'reverse', + }); } protected async establishConnectionReverse( diff --git a/src/network/types.ts b/src/network/types.ts index 40d672a85a..a5a62b4c20 100644 --- a/src/network/types.ts +++ b/src/network/types.ts @@ -55,6 +55,15 @@ type ConnectionInfo = { remotePort: Port; }; +type ConnectionData = { + remoteNodeId: NodeId; + remoteHost: Host; + remotePort: Port; + type: 'forward' | 'reverse'; +}; + +type ConnectionEstablishedCallback = (data: ConnectionData) => any; + type PingMessage = { type: 'ping'; }; @@ -73,6 +82,8 @@ export type { TLSConfig, ProxyConfig, ConnectionInfo, + ConnectionData, + ConnectionEstablishedCallback, PingMessage, PongMessage, NetworkMessage, diff --git a/src/network/utils.ts b/src/network/utils.ts index 1df7faa7fe..c5786a754f 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -45,10 +45,12 @@ function isHostname(hostname: any): hostname is Hostname { /** * Ports must be numbers between 0 and 65535 inclusive + * If connect is true, then port must be a number between 1 and 65535 inclusive */ -function isPort(port: any): port is Port { +function isPort(port: any, connect: boolean = false): port is Port { if (typeof port !== 'number') return false; if (port < 0 || port > 65535) return false; + if (connect && port === 0) return false; return true; } diff --git a/src/nodes/NodeConnection.ts b/src/nodes/NodeConnection.ts index f792724137..f4f4fbfb3a 100644 --- a/src/nodes/NodeConnection.ts +++ b/src/nodes/NodeConnection.ts @@ -5,6 +5,7 @@ import type { Certificate, PublicKey, PublicKeyPem } from '../keys/types'; import type Proxy from '../network/Proxy'; import type GRPCClient from '../grpc/GRPCClient'; import type NodeConnectionManager from './NodeConnectionManager'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as asyncInit from '@matrixai/async-init'; @@ -38,7 +39,7 @@ class NodeConnection { targetHost, targetPort, targetHostname, - connConnectTime = 20000, + timer, proxy, keyManager, clientFactory, @@ -50,7 +51,7 @@ class NodeConnection { targetHost: Host; targetPort: Port; targetHostname?: Hostname; - connConnectTime?: number; + timer?: Timer; proxy: Proxy; keyManager: KeyManager; clientFactory: (...args) => Promise; @@ -125,7 +126,7 @@ class NodeConnection { await nodeConnection.destroy(); } }, - timeout: connConnectTime, + timer: timer, }), holePunchPromises, ]); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index e51ccc8038..f39f333d85 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -4,6 +4,7 @@ import type Proxy from '../network/Proxy'; import type { Host, Hostname, Port } from '../network/types'; import type { Timer } from '../types'; import type NodeGraph from './NodeGraph'; +import type Queue from './Queue'; import type { NodeAddress, NodeData, @@ -11,7 +12,7 @@ import type { NodeIdString, SeedNodes, } from './types'; -import type { DBTransaction } from '@matrixai/db'; +import type NodeManager from './NodeManager'; import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; @@ -27,6 +28,7 @@ import * as networkUtils from '../network/utils'; import * as agentErrors from '../agent/errors'; import * as grpcErrors from '../grpc/errors'; import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; +import { timerStart } from '../utils'; type ConnectionAndTimer = { connection: NodeConnection; @@ -55,6 +57,9 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; + protected queue: Queue; + // NodeManager has to be passed in during start to allow co-dependency + protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; /** * Data structure to store all NodeConnections. If a connection to a node n does @@ -73,6 +78,7 @@ class NodeConnectionManager { keyManager, nodeGraph, proxy, + queue, seedNodes = {}, initialClosestNodes = 3, connConnectTime = 20000, @@ -82,6 +88,7 @@ class NodeConnectionManager { nodeGraph: NodeGraph; keyManager: KeyManager; proxy: Proxy; + queue: Queue; seedNodes?: SeedNodes; initialClosestNodes?: number; connConnectTime?: number; @@ -92,23 +99,31 @@ class NodeConnectionManager { this.keyManager = keyManager; this.nodeGraph = nodeGraph; this.proxy = proxy; + this.queue = queue; this.seedNodes = seedNodes; this.initialClosestNodes = initialClosestNodes; this.connConnectTime = connConnectTime; this.connTimeoutTime = connTimeoutTime; } - public async start() { + public async start({ nodeManager }: { nodeManager: NodeManager }) { this.logger.info(`Starting ${this.constructor.name}`); + this.nodeManager = nodeManager; for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; - await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); + await this.nodeManager.setNode( + nodeId, + this.seedNodes[nodeIdEncoded], + true, + true, + ); } this.logger.info(`Started ${this.constructor.name}`); } public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); + this.nodeManager = undefined; for (const [nodeId, connAndLock] of this.connections) { if (connAndLock == null) continue; if (connAndLock.connection == null) continue; @@ -124,14 +139,19 @@ class NodeConnectionManager { * itself is such that we can pass targetNodeId as a parameter (as opposed to * an acquire function with no parameters). * @param targetNodeId Id of target node to communicate with + * @param timer Connection timeout timer * @returns ResourceAcquire Resource API for use in with contexts */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async acquireConnection( targetNodeId: NodeId, + timer?: Timer, ): Promise>> { return async () => { - const { connection, timer } = await this.getConnection(targetNodeId); + const { connection, timer: timeToLiveTimer } = await this.getConnection( + targetNodeId, + timer, + ); // Acquire the read lock and the release function const [release] = await this.connectionLocks.lock([ targetNodeId.toString(), @@ -139,7 +159,7 @@ class NodeConnectionManager { 'write', ])(); // Resetting TTL timer - timer?.refresh(); + timeToLiveTimer?.refresh(); // Return tuple of [ResourceRelease, Resource] return [ async (e) => { @@ -165,14 +185,16 @@ class NodeConnectionManager { * for use with normal arrow function * @param targetNodeId Id of target node to communicate with * @param f Function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async withConnF( targetNodeId: NodeId, f: (conn: NodeConnection) => Promise, + timer?: Timer, ): Promise { return await withF( - [await this.acquireConnection(targetNodeId)], + [await this.acquireConnection(targetNodeId, timer)], async ([conn]) => { this.logger.info( `withConnF calling function with connection to ${nodesUtils.encodeNodeId( @@ -191,6 +213,7 @@ class NodeConnectionManager { * for use with a generator function * @param targetNodeId Id of target node to communicate with * @param g Generator function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async *withConnG( @@ -198,12 +221,13 @@ class NodeConnectionManager { g: ( conn: NodeConnection, ) => AsyncGenerator, + timer?: Timer, ): AsyncGenerator { - const acquire = await this.acquireConnection(targetNodeId); + const acquire = await this.acquireConnection(targetNodeId, timer); const [release, conn] = await acquire(); let caughtError; try { - return yield* await g(conn!); + return yield* g(conn!); } catch (e) { caughtError = e; throw e; @@ -217,10 +241,12 @@ class NodeConnectionManager { * Create a connection to another node (without performing any function). * This is a NOOP if a connection already exists. * @param targetNodeId Id of node we are creating connection to - * @returns ConnectionAndLock that was created or exists in the connection map. + * @param timer Connection timeout timer + * @returns ConnectionAndLock that was created or exists in the connection map */ protected async getConnection( targetNodeId: NodeId, + timer?: Timer, ): Promise { this.logger.info( `Getting connection to ${nodesUtils.encodeNodeId(targetNodeId)}`, @@ -243,6 +269,9 @@ class NodeConnectionManager { ); // Creating the connection and set in map const targetAddress = await this.findNode(targetNodeId); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + } // If the stored host is not a valid host (IP address), // then we assume it to be a hostname const targetHostname = !networkUtils.isHost(targetAddress.host) @@ -273,21 +302,24 @@ class NodeConnectionManager { keyManager: this.keyManager, nodeConnectionManager: this, destroyCallback, - connConnectTime: this.connConnectTime, + timer: timer ?? timerStart(this.connConnectTime), logger: this.logger.getChild( `${NodeConnection.name} ${targetHost}:${targetAddress.port}`, ), clientFactory: async (args) => GRPCClientAgent.createGRPCClientAgent(args), }); + // We can assume connection was established and destination was valid, + // we can add the target to the nodeGraph + await this.nodeManager?.setNode(targetNodeId, targetAddress, false); // Creating TTL timeout - const timer = setTimeout(async () => { + const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); }, this.connTimeoutTime); const newConnAndTimer: ConnectionAndTimer = { connection: newConnection, - timer: timer, + timer: timeToLiveTimer, }; this.connections.set(targetNodeIdString, newConnAndTimer); return newConnAndTimer; @@ -362,69 +394,26 @@ class NodeConnectionManager { * Retrieves the node address. If an entry doesn't exist in the db, then * proceeds to locate it using Kademlia. * @param targetNodeId Id of the node we are tying to find + * @param options */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async findNode(targetNodeId: NodeId): Promise { + public async findNode( + targetNodeId: NodeId, + options: { signal?: AbortSignal } = {}, + ): Promise { + const { signal } = { ...options }; // First check if we already have an existing ID -> address record - - let address = await this.nodeGraph.getNode(targetNodeId); + let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network - if (address == null) { - address = await this.getClosestGlobalNodes(targetNodeId); - // TODO: This currently just does one iteration - // If not found in this single iteration, we throw an exception - if (address == null) { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); - } - } - // We ensure that we always return a NodeAddress (either by lookup, or - // network search) - if we can't locate it from either, we throw an exception + address = + address ?? + (await this.getClosestGlobalNodes(targetNodeId, undefined, { + signal, + })); + // TODO: This currently just does one iteration return address; } - /** - * Finds the set of nodes (of size k) known by the current node (i.e. in its - * bucket's database) that have the smallest distance to the target node (i.e. - * are closest to the target node). - * i.e. FIND_NODE RPC from Kademlia spec - * - * Used by the RPC service. - * - * @param targetNodeId the node ID to find other nodes closest to it - * @param numClosest the number of the closest nodes to return (by default, returns - * according to the maximum number of nodes per bucket) - * @param tran - * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the - * current node has less than k nodes in all of its buckets, in which case it - * returns all nodes it has knowledge of) - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async getClosestLocalNodes( - targetNodeId: NodeId, - numClosest: number = this.nodeGraph.maxNodesPerBucket, - tran?: DBTransaction, - ): Promise> { - // Retrieve all nodes from buckets in database - const buckets = await this.nodeGraph.getAllBuckets(tran); - // Iterate over all the nodes in each bucket - const distanceToNodes: Array = []; - buckets.forEach(function (bucket) { - for (const nodeIdString of Object.keys(bucket)) { - // Compute the distance from the node, and add it to the array - const nodeId = IdInternal.fromString(nodeIdString); - distanceToNodes.push({ - id: nodeId, - address: bucket[nodeId].address, - distance: nodesUtils.calculateDistance(nodeId, targetNodeId), - }); - } - }); - // Sort the array (based on the distance at index 1) - distanceToNodes.sort(nodesUtils.sortByDistance); - // Return the closest k nodes (i.e. the first k), or all nodes if < k in array - return distanceToNodes.slice(0, numClosest); - } - /** * Attempts to locate a target node in the network (using Kademlia). * Adds all discovered, active nodes to the current node's database (up to k @@ -437,16 +426,25 @@ class NodeConnectionManager { * port). * @param targetNodeId ID of the node attempting to be found (i.e. attempting * to find its IP address and port) + * @param timer Connection timeout timer + * @param options * @returns whether the target node was located in the process */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getClosestGlobalNodes( targetNodeId: NodeId, + timer?: Timer, + options: { signal?: AbortSignal } = {}, ): Promise { + const localNodeId = this.keyManager.getNodeId(); + const { signal } = { ...options }; // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) - const shortlist: Array = await this.getClosestLocalNodes( + // FIXME? this is an array. Shouldn't it be a set? + // It's possible for this to grow faster than we can consume it, + // doubly so if we allow duplicates + const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, ); @@ -461,7 +459,8 @@ class NodeConnectionManager { // getClosestGlobalNodes()? const contacted: { [nodeId: string]: boolean } = {}; // Iterate until we've found and contacted k nodes - while (Object.keys(contacted).length <= this.nodeGraph.maxNodesPerBucket) { + while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); // While (!foundTarget) { // Remove the node from the front of the array const nextNode = shortlist.shift(); @@ -469,50 +468,66 @@ class NodeConnectionManager { if (nextNode == null) { break; } + const [nextNodeId, nextNodeAddress] = nextNode; // Skip if the node has already been contacted - if (contacted[nextNode.id]) { + if (contacted[nextNodeId]) { continue; } // Connect to the node (check if pre-existing connection exists, otherwise // create a new one) - try { - // Add the node to the database so that we can find its address in - // call to getConnectionToNode - await this.nodeGraph.setNode(nextNode.id, nextNode.address); - await this.getConnection(nextNode.id); - } catch (e) { - // If we can't connect to the node, then skip it + if ( + await this.pingNode( + nextNodeId, + nextNodeAddress.address.host, + nextNodeAddress.address.port, + ) + ) { + await this.nodeManager!.setNode(nextNodeId, nextNodeAddress.address); + } else { continue; } - contacted[nextNode.id] = true; + contacted[nextNodeId] = true; // Ask the node to get their own closest nodes to the target const foundClosest = await this.getRemoteNodeClosestNodes( - nextNode.id, + nextNodeId, targetNodeId, + timer, ); // Check to see if any of these are the target node. At the same time, add // them to the shortlist - for (const nodeData of foundClosest) { - // Ignore any nodes that have been contacted - if (contacted[nodeData.id]) { + for (const [nodeId, nodeData] of foundClosest) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); + // Ignore any nodes that have been contacted or our own node + if (contacted[nodeId] || localNodeId.equals(nodeId)) { continue; } - if (nodeData.id.equals(targetNodeId)) { - await this.nodeGraph.setNode(nodeData.id, nodeData.address); + if ( + nodeId.equals(targetNodeId) && + (await this.pingNode( + nodeId, + nodeData.address.host, + nodeData.address.port, + )) + ) { + await this.nodeManager!.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it // in the shortlist break; } - shortlist.push(nodeData); + shortlist.push([nodeId, nodeData]); } // To make the number of jumps relatively short, should connect to the nodes // closest to the target first, and ask if they know of any closer nodes // than we can simply unshift the first (closest) element from the shortlist - shortlist.sort(function (a: NodeData, b: NodeData) { - if (a.distance > b.distance) { + const distance = (nodeId: NodeId) => + nodesUtils.nodeDistance(targetNodeId, nodeId); + shortlist.sort(function ([nodeIdA], [nodeIdB]) { + const distanceA = distance(nodeIdA); + const distanceB = distance(nodeIdB); + if (distanceA > distanceB) { return 1; - } else if (a.distance < b.distance) { + } else if (distanceA < distanceB) { return -1; } else { return 0; @@ -527,69 +542,106 @@ class NodeConnectionManager { * target node ID. * @param nodeId the node ID to search on * @param targetNodeId the node ID to find other nodes closest to it + * @param timer Connection timeout timer * @returns list of nodes and their IP/port that are closest to the target */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, - ): Promise> { + timer?: Timer, + ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); // Send through client - return this.withConnF(nodeId, async (connection) => { - const client = await connection.getClient(); - const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array = []; - // Loop over each map element (from the returned response) and populate nodes - response.getNodeTableMap().forEach((address, nodeIdString: string) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdString); - // If the nodeId is not valid we don't add it to the list of nodes - if (nodeId != null) { - nodes.push({ - id: nodeId, - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, - }, - distance: nodesUtils.calculateDistance(targetNodeId, nodeId), - }); - } - }); - return nodes; - }); + return this.withConnF( + nodeId, + async (connection) => { + const client = connection.getClient(); + const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); + const nodes: Array<[NodeId, NodeData]> = []; + // Loop over each map element (from the returned response) and populate nodes + response.getNodeTableMap().forEach((address, nodeIdString: string) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdString); + // If the nodeId is not valid we don't add it to the list of nodes + if (nodeId != null) { + nodes.push([ + nodeId, + { + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + // Not really needed + // But if it's needed then we need to add the information to the proto definition + lastUpdated: 0, + }, + ]); + } + }); + return nodes; + }, + timer, + ); } /** * Perform an initial database synchronisation: get k of the closest nodes * from each seed node and add them to this database - * For now, we also attempt to establish a connection to each of them. - * If these nodes are offline, this will impose a performance penalty, - * so we should investigate performing this in the background if possible. - * Alternatively, we can also just add the nodes to our database without - * establishing connection. - * This has been removed from start() as there's a chicken-egg scenario - * where we require the NodeGraph instance to be created in order to get - * connections. + * Establish a proxy connection to each node before adding it + * By default this operation is blocking, set `block` to false to make it + * non-blocking */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async syncNodeGraph() { + public async syncNodeGraph(block: boolean = true, timer?: Timer) { + this.logger.info('Syncing nodeGraph'); for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { - await this.getConnection(seedNodeId); + await this.getConnection(seedNodeId, timer); } catch (e) { if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; throw e; } - const nodes = await this.getRemoteNodeClosestNodes( seedNodeId, this.keyManager.getNodeId(), + timer, ); - for (const n of nodes) { - await this.nodeGraph.setNode(n.id, n.address); + for (const [nodeId, nodeData] of nodes) { + const pingAndAddNode = async () => { + const port = nodeData.address.port; + const host = await networkUtils.resolveHost(nodeData.address.host); + if (await this.pingNode(nodeId, host, port)) { + await this.nodeManager!.setNode(nodeId, nodeData.address, true); + } + }; + + if (!block) { + this.queue.push(pingAndAddNode); + } else { + try { + await pingAndAddNode(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + } + // Refreshing every bucket above the closest node + const refreshBuckets = async () => { + const [closestNode] = ( + await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) + ).pop()!; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + this.nodeManager?.refreshBucketQueueAdd(i); + } + }; + if (!block) { + this.queue.push(refreshBuckets); + } else { + await refreshBuckets(); } } } @@ -603,6 +655,7 @@ class NodeConnectionManager { * @param targetNodeId node ID of the target node to hole punch * @param proxyAddress string of address in the form `proxyHost:proxyPort` * @param signature signature to verify source node is sender (signature based + * @param timer Connection timeout timer * on proxyAddress as message) */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @@ -612,16 +665,21 @@ class NodeConnectionManager { targetNodeId: NodeId, proxyAddress: string, signature: Buffer, + timer?: Timer, ): Promise { const relayMsg = new nodesPB.Relay(); relayMsg.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMsg.setTargetId(nodesUtils.encodeNodeId(targetNodeId)); relayMsg.setProxyAddress(proxyAddress); relayMsg.setSignature(signature.toString()); - await this.withConnF(relayNodeId, async (connection) => { - const client = connection.getClient(); - await client.nodesHolePunchMessageSend(relayMsg); - }); + await this.withConnF( + relayNodeId, + async (connection) => { + const client = connection.getClient(); + await client.nodesHolePunchMessageSend(relayMsg); + }, + timer, + ); } /** @@ -631,15 +689,20 @@ class NodeConnectionManager { * node). * @param message the original relay message (assumed to be created in * nodeConnection.start()) + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async relayHolePunchMessage(message: nodesPB.Relay): Promise { + public async relayHolePunchMessage( + message: nodesPB.Relay, + timer?: Timer, + ): Promise { await this.sendHolePunchMessage( validationUtils.parseNodeId(message.getTargetId()), validationUtils.parseNodeId(message.getSrcId()), validationUtils.parseNodeId(message.getTargetId()), message.getProxyAddress(), Buffer.from(message.getSignature()), + timer, ); } @@ -652,6 +715,56 @@ class NodeConnectionManager { (nodeIdEncoded) => nodesUtils.decodeNodeId(nodeIdEncoded)!, ); } + + /** + * Checks if a connection can be made to the target. Returns true if the + * connection can be authenticated, it's certificate matches the nodeId and + * the addresses match if provided. Otherwise returns false. + * @param nodeId - NodeId of the target + * @param host - Host of the target node + * @param port - Port of the target node + * @param timer Connection timeout timer + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async pingNode( + nodeId: NodeId, + host: Host | Hostname, + port: Port, + timer?: Timer, + ): Promise { + host = await networkUtils.resolveHost(host); + // If we can create a connection then we have punched though the NAT, + // authenticated and confirmed the nodeId matches + const proxyAddress = networkUtils.buildAddress( + this.proxy.getProxyHost(), + this.proxy.getProxyPort(), + ); + const signature = await this.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const holePunchPromises = Array.from(this.getSeedNodes(), (seedNodeId) => { + return this.sendHolePunchMessage( + seedNodeId, + this.keyManager.getNodeId(), + nodeId, + proxyAddress, + signature, + ); + }); + const forwardPunchPromise = this.holePunchForward( + nodeId, + host, + port, + timer, + ); + + try { + await Promise.all([forwardPunchPromise, ...holePunchPromises]); + } catch (e) { + return false; + } + return true; + } } export default NodeConnectionManager; diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 4d623dbceb..6bd6b2f2d9 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,21 +1,27 @@ import type { DB, DBTransaction, KeyPath, LevelPath } from '@matrixai/db'; -import type { NodeAddress, NodeBucket, NodeId } from './types'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeData, + NodeBucketMeta, + NodeBucketIndex, + NodeGraphSpace, +} from './types'; import type KeyManager from '../keys/KeyManager'; -import type { Host, Hostname, Port } from '../network/types'; -import lexi from 'lexicographic-integer'; import Logger from '@matrixai/logger'; import { CreateDestroyStartStop, ready, } from '@matrixai/async-init/dist/CreateDestroyStartStop'; import { IdInternal } from '@matrixai/id'; -import { withF } from '@matrixai/resources'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; +import { getUnixtime, never } from '../utils'; /** * NodeGraph is an implementation of Kademlia for maintaining peer to peer information - * We maintain a map of buckets. Where each bucket has k number of node infos + * It is a database of fixed-size buckets, where each bucket contains NodeId -> NodeData */ interface NodeGraph extends CreateDestroyStartStop {} @CreateDestroyStartStop( @@ -26,11 +32,13 @@ class NodeGraph { public static async createNodeGraph({ db, keyManager, + nodeIdBits = 256, logger = new Logger(this.name), fresh = false, }: { db: DB; keyManager: KeyManager; + nodeIdBits?: number; logger?: Logger; fresh?: boolean; }): Promise { @@ -38,6 +46,7 @@ class NodeGraph { const nodeGraph = new NodeGraph({ db, keyManager, + nodeIdBits, logger, }); await nodeGraph.start({ fresh }); @@ -46,339 +55,768 @@ class NodeGraph { } /** - * Max number of nodes in each k-bucket (a.k.a. k) + * Bit size of the NodeIds + * This equals the number of buckets */ - public readonly maxNodesPerBucket: number = 20; + public readonly nodeIdBits: number; + /** + * Max number of nodes in each k-bucket + */ + public readonly nodeBucketLimit: number = 20; protected logger: Logger; protected db: DB; protected keyManager: KeyManager; + protected space: NodeGraphSpace; protected nodeGraphDbPath: LevelPath = [this.constructor.name]; - /** - * Buckets stores NodeBucketIndex -> NodeBucket - */ - protected nodeGraphBucketsDbPath: LevelPath = [ - this.constructor.name, - 'buckets', - ]; + protected nodeGraphMetaDbPath: LevelPath; + protected nodeGraphBucketsDbPath: LevelPath; + protected nodeGraphLastUpdatedDbPath: LevelPath; constructor({ db, keyManager, + nodeIdBits, logger, }: { db: DB; keyManager: KeyManager; + nodeIdBits: number; logger: Logger; }) { this.logger = logger; this.db = db; this.keyManager = keyManager; + this.nodeIdBits = nodeIdBits; } public async start({ fresh = false, - }: { - fresh?: boolean; - } = {}) { + }: { fresh?: boolean } = {}): Promise { this.logger.info(`Starting ${this.constructor.name}`); - if (fresh) { - await this.db.clear(this.nodeGraphDbPath); - } + const space = await this.db.withTransactionF(async (tran) => { + if (fresh) { + await tran.clear(this.nodeGraphDbPath); + } + // Space key is used to create a swappable sublevel + // when remapping the buckets during `this.refreshBuckets` + return await this.setupSpace(tran); + }); + // Bucket metadata sublevel: `!meta!! -> value` + this.nodeGraphMetaDbPath = [...this.nodeGraphDbPath, 'meta' + space]; + // Bucket sublevel: `!buckets!! -> NodeData` + // The BucketIndex can range from 0 to NodeId bit-size minus 1 + // So 256 bits means 256 buckets of 0 to 255 + this.nodeGraphBucketsDbPath = [...this.nodeGraphDbPath, 'buckets' + space]; + // Last updated sublevel: `!lastUpdated!!- -> NodeId` + // This is used as a sorted index of the NodeId by `lastUpdated` timestamp + // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `lastUpdated` timestamp + this.nodeGraphLastUpdatedDbPath = [ + ...this.nodeGraphDbPath, + 'lastUpdated' + space, + ]; + this.space = space; this.logger.info(`Started ${this.constructor.name}`); } - public async stop() { + public async stop(): Promise { this.logger.info(`Stopping ${this.constructor.name}`); this.logger.info(`Stopped ${this.constructor.name}`); } - public async destroy() { + public async destroy(): Promise { this.logger.info(`Destroying ${this.constructor.name}`); + // If the DB was stopped, the existing sublevel `this.nodeGraphDb` will not be valid + // Therefore we recreate the sublevel here await this.db.clear(this.nodeGraphDbPath); this.logger.info(`Destroyed ${this.constructor.name}`); } - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async withTransactionF( - f: (tran: DBTransaction) => Promise, - ): Promise { - return withF([this.db.transaction()], ([tran]) => f(tran)); - } - /** - * Retrieves the node Address - * @param nodeId node ID of the target node - * @param tran - * @returns Node Address of the target node + * Sets up the space key + * The space string is suffixed to the `buckets` and `meta` sublevels + * This is used to allow swapping of sublevels when remapping buckets + * during `this.refreshBuckets` */ + protected async setupSpace(tran: DBTransaction): Promise { + let space = await tran.get([ + ...this.nodeGraphDbPath, + 'space', + ]); + if (space != null) { + return space; + } + space = '0'; + await tran.put([...this.nodeGraphDbPath, 'space'], space); + return space; + } + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getNode( nodeId: NodeId, tran?: DBTransaction, - ): Promise { + ): Promise { if (tran == null) { - return this.withTransactionF(async (tran) => this.getNode(nodeId, tran)); - } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket != null && nodeId in bucket) { - return bucket[nodeId].address; + return this.db.withTransactionF(async (tran) => + this.getNode(nodeId, tran), + ); } - return; - } - /** - * Determines whether a node ID -> node address mapping exists in this node's - * node table. - * @param targetNodeId the node ID of the node to find - * @param tran - * @returns true if the node exists in the table, false otherwise - */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async knowsNode( - targetNodeId: NodeId, - tran?: DBTransaction, - ): Promise { - return !!(await this.getNode(targetNodeId, tran)); + const [bucketIndex] = this.bucketIndex(nodeId); + const bucketDomain = [ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + ]; + return await tran.get(bucketDomain); } /** - * Returns the specified bucket if it exists - * @param bucketIndex - * @param tran + * Get all nodes. + * Nodes are always sorted by `NodeBucketIndex` first + * Then secondly by the node IDs + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getBucket( - bucketIndex: number, + public async *getNodes( + order: 'asc' | 'desc' = 'asc', tran?: DBTransaction, - ): Promise { + ): AsyncGenerator<[NodeId, NodeData]> { if (tran == null) { - return this.withTransactionF(async (tran) => - this.getBucket(bucketIndex, tran), - ); + const getNodes = (tran) => this.getNodes(order, tran); + return yield* this.db.withTransactionG(async function* (tran) { + return yield* getNodes(tran); + }); } - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - lexi.pack(bucketIndex, 'hex'), - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - // Cast the non-primitive types correctly (ensures type safety when using them) - for (const nodeId in bucket) { - bucket[nodeId].address.host = bucket[nodeId].address.host as - | Host - | Hostname; - bucket[nodeId].address.port = bucket[nodeId].address.port as Port; - bucket[nodeId].lastUpdated = new Date(bucket[nodeId].lastUpdated); + + for await (const [keyPath, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const { nodeId } = nodesUtils.parseBucketsDbKey(keyPath); + yield [nodeId, nodeData]; } - return bucket; } /** - * Sets a node to the bucket database - * This may delete an existing node if the bucket is filled up + * Will add a node to the node graph and increment the bucket count. + * If the node already existed it will be updated. + * @param nodeId NodeId to add to the NodeGraph + * @param nodeAddress Address information to add + * @param tran */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, tran?: DBTransaction, ): Promise { if (tran == null) { - return this.withTransactionF(async (tran) => + return this.db.withTransactionF(async (tran) => this.setNode(nodeId, nodeAddress, tran), ); } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - let bucket = await tran.get(bucketPath); - if (bucket == null) { - bucket = {}; + + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey, nodeIdKey]; + const nodeData = await tran.get(bucketPath); + if (nodeData != null) { + this.logger.debug( + `Updating node ${nodesUtils.encodeNodeId( + nodeId, + )} in bucket ${bucketIndex}`, + ); + // If the node already exists we want to remove the old `lastUpdated` + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); + } else { + this.logger.debug( + `Adding node ${nodesUtils.encodeNodeId( + nodeId, + )} to bucket ${bucketIndex}`, + ); + // It didn't exist, so we want to increment the bucket count + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); } - bucket[nodeId] = { + const lastUpdated = getUnixtime(); + await tran.put(bucketPath, { address: nodeAddress, - lastUpdated: new Date(), - }; - // Perform the check on size after we add/update the node. If it's an update, - // then we don't need to perform the deletion - let bucketEntries = Object.entries(bucket); - if (bucketEntries.length > this.maxNodesPerBucket) { - const leastActive = bucketEntries.reduce((prev, curr) => { - return new Date(prev[1].lastUpdated) < new Date(curr[1].lastUpdated) - ? prev - : curr; - }); - delete bucket[leastActive[0]]; - bucketEntries = Object.entries(bucket); - // For safety, make sure that the bucket is actually at maxNodesPerBucket - if (bucketEntries.length !== this.maxNodesPerBucket) { - throw new nodesErrors.ErrorNodeGraphOversizedBucket(); - } - } - await tran.put(bucketPath, bucket); + lastUpdated, + }); + const newLastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); + await tran.put( + [...lastUpdatedPath, newLastUpdatedKey, nodeIdKey], + nodeIdKey, + true, + ); } - /** - * Updates an existing node - * It will update the lastUpdated time - * Optionally it can replace the NodeAddress - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, + public async getOldestNode( + bucketIndex: number, + limit: number = 1, tran?: DBTransaction, - ): Promise { + ): Promise> { if (tran == null) { - return this.withTransactionF(async (tran) => - this.updateNode(nodeId, nodeAddress, tran), + return this.db.withTransactionF(async (tran) => + this.getOldestNode(bucketIndex, limit, tran), ); } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket != null && nodeId in bucket) { - bucket[nodeId].lastUpdated = new Date(); - if (nodeAddress != null) { - bucket[nodeId].address = nodeAddress; - } - await tran.put(bucketPath, bucket); - } else { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + // Remove the oldest entry in the bucket + const oldestNodeIds: Array = []; + for await (const [keyPath] of tran.iterator({ limit }, [ + ...this.nodeGraphLastUpdatedDbPath, + bucketKey, + ])) { + const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(keyPath); + oldestNodeIds.push(nodeId); } + return oldestNodeIds; } - /** - * Removes a node from the bucket database - * @param nodeId - * @param tran - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { if (tran == null) { - return this.withTransactionF(async (tran) => + return this.db.withTransactionF(async (tran) => this.unsetNode(nodeId, tran), ); } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket == null) { - return; - } - delete bucket[nodeId]; - if (Object.keys(bucket).length === 0) { - await tran.del(bucketPath); - } else { - await tran.put(bucketPath, bucket); + + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; + const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const nodeData = await tran.get([...bucketPath, nodeIdKey]); + if (nodeData != null) { + this.logger.debug( + `Removing node ${nodesUtils.encodeNodeId( + nodeId, + )} from bucket ${bucketIndex}`, + ); + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); + await tran.del([...bucketPath, nodeIdKey]); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); } } /** - * Find the correct index of the k-bucket to add a new node to (for this node's - * bucket database). Packs it as a lexicographic integer, such that the order - * of buckets in leveldb is numerical order. + * Gets a bucket + * The bucket's node IDs is sorted lexicographically by default + * Alternatively you can acquire them sorted by lastUpdated timestamp + * or by distance to the own NodeId */ - protected getBucketIndex(nodeId: NodeId): string { - const index = nodesUtils.calculateBucketIndex( - this.keyManager.getNodeId(), - nodeId, - ); - return lexi.pack(index, 'hex') as string; + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucket( + bucketIndex: NodeBucketIndex, + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucket(bucketIndex, sort, order, tran), + ); + } + + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); + } + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const [key, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + [...this.nodeGraphBucketsDbPath, bucketKey], + )) { + const nodeId = nodesUtils.parseBucketDbKey(key[0] as Buffer); + bucket.push([nodeId, nodeData]); + } + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + } else if (sort === 'lastUpdated') { + const bucketDbIterator = tran.iterator( + { valueAsBuffer: false }, + [...this.nodeGraphBucketsDbPath, bucketKey], + ); + try { + for await (const [, nodeIdBuffer] of tran.iterator( + { + reverse: order !== 'asc', + }, + [...this.nodeGraphLastUpdatedDbPath, bucketKey], + )) { + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + bucketDbIterator.seek(nodeIdBuffer); + // @ts-ignore + // eslint-disable-next-line + const iteratorResult = await bucketDbIterator.next(); + if (iteratorResult == null) never(); + const [, nodeData] = iteratorResult; + bucket.push([nodeId, nodeData]); + } + } finally { + // @ts-ignore + await bucketDbIterator.end(); + } + } + return bucket; } /** - * Returns all of the buckets in an array + * Gets all buckets. + * Buckets are always sorted by `NodeBucketIndex` first + * Then secondly by the `sort` parameter + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc + * NodeBucketIndex asc, distance asc + * NodeBucketIndex desc, distance desc + * NodeBucketIndex asc, lastUpdated asc + * NodeBucketIndex desc, lastUpdated desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getAllBuckets(tran?: DBTransaction): Promise> { + public async *getBuckets( + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + tran?: DBTransaction, + ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { + if (tran == null) { + const getBuckets = (tran) => this.getBuckets(sort, order, tran); + return yield* this.db.withTransactionG(async function* (tran) { + return yield* getBuckets(tran); + }); + } + + let bucketIndex: NodeBucketIndex | undefined = undefined; + let bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const [key, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseBucketsDbKey(key); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + } + } else if (sort === 'lastUpdated') { + const bucketsDbIterator = tran.iterator( + { valueAsBuffer: false }, + this.nodeGraphBucketsDbPath, + ); + try { + for await (const [key] of tran.iterator( + { + reverse: order !== 'asc', + }, + this.nodeGraphLastUpdatedDbPath, + )) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key); + bucketsDbIterator.seek([key[0], key[2]]); + // @ts-ignore + // eslint-disable-next-line + const iteratorResult = await bucketsDbIterator.next(); + if (iteratorResult == null) never(); + const [, nodeData] = iteratorResult; + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + yield [bucketIndex, bucket]; + } + } finally { + // @ts-ignore + await bucketsDbIterator.end(); + } + } + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async resetBuckets( + nodeIdOwn: NodeId, + tran?: DBTransaction, + ): Promise { if (tran == null) { - return this.withTransactionF(async (tran) => this.getAllBuckets(tran)); + return this.db.withTransactionF(async (tran) => + this.resetBuckets(nodeIdOwn, tran), + ); } - const buckets: Array = []; - for await (const [, bucket] of tran.iterator( - { keys: false, valueAsBuffer: false }, - [...this.nodeGraphBucketsDbPath], + + const logger = this.logger.getChild('resetBuckets'); + // Setup new space + const spaceNew = this.space === '0' ? '1' : '0'; + logger.debug('new space: ' + spaceNew); + const nodeGraphMetaDbPathNew = [...this.nodeGraphDbPath, 'meta' + spaceNew]; + const nodeGraphBucketsDbPathNew = [ + ...this.nodeGraphDbPath, + 'buckets' + spaceNew, + ]; + const nodeGraphLastUpdatedDbPathNew = [ + ...this.nodeGraphDbPath, + 'index' + spaceNew, + ]; + // Clear the new space (in case it wasn't cleaned properly last time) + await tran.clear(nodeGraphMetaDbPathNew); + await tran.clear(nodeGraphBucketsDbPathNew); + await tran.clear(nodeGraphLastUpdatedDbPathNew); + // Iterating over all entries across all buckets + + for await (const [key, nodeData] of tran.iterator( + { valueAsBuffer: false }, + this.nodeGraphBucketsDbPath, )) { - buckets.push(bucket); + // The key is a combined bucket key and node ID + const { bucketIndex: bucketIndexOld, nodeId } = + nodesUtils.parseBucketsDbKey(key); + const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + // If the new own node ID is one of the existing node IDs, it is just dropped + // We only map to the new bucket if it isn't one of the existing node IDs + if (nodeId.equals(nodeIdOwn)) { + logger.debug( + `nodeId ${nodeIdEncoded} from bucket ${bucketIndexOld} was identical to new NodeId and was dropped.`, + ); + continue; + } + const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKeyNew = nodesUtils.bucketKey(bucketIndexNew); + const metaPathNew = [...nodeGraphMetaDbPathNew, bucketKeyNew]; + const bucketPathNew = [...nodeGraphBucketsDbPathNew, bucketKeyNew]; + const indexPathNew = [...nodeGraphLastUpdatedDbPathNew, bucketKeyNew]; + const countNew = (await tran.get([...metaPathNew, 'count'])) ?? 0; + if (countNew < this.nodeBucketLimit) { + await tran.put([...metaPathNew, 'count'], countNew + 1); + } else { + let oldestIndexKey: KeyPath | undefined = undefined; + let oldestNodeId: NodeId | undefined = undefined; + for await (const [key] of tran.iterator( + { + limit: 1, + }, + indexPathNew, + )) { + oldestIndexKey = key; + ({ nodeId: oldestNodeId } = + nodesUtils.parseLastUpdatedBucketDbKey(key)); + } + await tran.del([ + ...bucketPathNew, + nodesUtils.bucketDbKey(oldestNodeId!), + ]); + await tran.del([...indexPathNew, ...oldestIndexKey!]); + } + if (bucketIndexOld !== bucketIndexNew) { + logger.debug( + `nodeId ${nodeIdEncoded} moved ${bucketIndexOld}=>${bucketIndexNew}`, + ); + } else { + logger.debug(`nodeId ${nodeIdEncoded} unchanged ${bucketIndexOld}`); + } + await tran.put([...bucketPathNew, nodeIdKey], nodeData); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + await tran.put( + [...indexPathNew, lastUpdatedKey, nodeIdKey], + nodeIdKey, + true, + ); + } + // Swap to the new space + await tran.put([...this.nodeGraphDbPath, 'space'], spaceNew); + // Clear old space + await tran.clear(this.nodeGraphMetaDbPath); + await tran.clear(this.nodeGraphBucketsDbPath); + await tran.clear(this.nodeGraphLastUpdatedDbPath); + // Swap the spaces + this.space = spaceNew; + this.nodeGraphMetaDbPath = nodeGraphMetaDbPathNew; + this.nodeGraphBucketsDbPath = nodeGraphBucketsDbPathNew; + this.nodeGraphLastUpdatedDbPath = nodeGraphLastUpdatedDbPathNew; + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucketMeta( + bucketIndex: NodeBucketIndex, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucketMeta(bucketIndex, tran), + ); + } + + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); + } + const metaDomain = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + ]; + const props = await Promise.all([ + tran.get([...metaDomain, 'count']), + ]); + const [count] = props; + // Bucket meta properties have defaults + return { + count: count ?? 0, + }; + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucketMetaProp(bucketIndex, key, tran), + ); + } + + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); + } + const metaDomain = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + ]; + // Bucket meta properties have defaults + let value; + switch (key) { + case 'count': + value = (await tran.get([...metaDomain, key])) ?? 0; + break; } - return buckets; + return value; } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. - * NOTE: original nodes may be lost in this process. If they're redistributed - * to a newly full bucket, the least active nodes in the newly full bucket - * will be removed. + * Finds the set of nodes (of size k) known by the current node (i.e. in its + * buckets' database) that have the smallest distance to the target node (i.e. + * are closest to the target node). + * i.e. FIND_NODE RPC from Kademlia spec + * + * Used by the RPC service. + * + * @param nodeId the node ID to find other nodes closest to it + * @param limit the number of the closest nodes to return (by default, returns + * according to the maximum number of nodes per bucket) + * @param tran + * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the + * current node has less than k nodes in all of its buckets, in which case it + * returns all nodes it has knowledge of) */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async refreshBuckets(tran?: DBTransaction): Promise { + public async getClosestNodes( + nodeId: NodeId, + limit: number = this.nodeBucketLimit, + tran?: DBTransaction, + ): Promise { if (tran == null) { - return this.withTransactionF(async (tran) => this.refreshBuckets(tran)); + return this.db.withTransactionF(async (tran) => + this.getClosestNodes(nodeId, limit, tran), + ); } - // Get a local copy of all the buckets - const buckets = await this.getAllBuckets(tran); - // Wrap as a batch operation. We want to rollback if we encounter any - // errors (such that we don't clear the DB without re-adding the nodes) - // 1. Delete every bucket - for await (const [keyPath] of tran.iterator({ values: false }, [ - ...this.nodeGraphBucketsDbPath, - ])) { - const key = keyPath[0].toString(); - const hexBucketPath = [...this.nodeGraphBucketsDbPath, key]; - await tran.del(hexBucketPath); + + // Buckets map to the target node in the following way; + // 1. 0, 1, ..., T-1 -> T + // 2. T -> 0, 1, ..., T-1 + // 3. T+1, T+2, ..., 255 are unchanged + // We need to obtain nodes in the following bucket order + // 1. T + // 2. iterate over 0 ---> T-1 + // 3. iterate over T+1 ---> K + // Need to work out the relevant bucket to start from + const localNodeId = this.keyManager.getNodeId(); + const startingBucket = localNodeId.equals(nodeId) + ? 0 + : nodesUtils.bucketIndex(this.keyManager.getNodeId(), nodeId); + // Getting the whole target's bucket first + const nodeIds: NodeBucket = await this.getBucket( + startingBucket, + undefined, + undefined, + tran, + ); + // We need to iterate over the key stream + // When streaming we want all nodes in the starting bucket + // The keys takes the form `!(lexpack bucketId)!(nodeId)` + // We can just use `!(lexpack bucketId)` to start from + // Less than `!(bucketId 101)!` gets us buckets 100 and lower + // greater than `!(bucketId 99)!` gets up buckets 100 and greater + if (nodeIds.length < limit) { + // Just before target bucket + const bucketIdKey = Buffer.from(nodesUtils.bucketKey(startingBucket)); + const remainingLimit = limit - nodeIds.length; + // Iterate over lower buckets + for await (const [key, nodeData] of tran.iterator( + { + lt: [bucketIdKey, ''], + limit: remainingLimit, + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const info = nodesUtils.parseBucketsDbKey(key); + nodeIds.push([info.nodeId, nodeData]); + } } - const tempBuckets: Record = {}; - // 2. Re-add all the nodes from all buckets - for (const b of buckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - const newIndex = this.getBucketIndex(nodeId); - let expectedBucket = tempBuckets[newIndex]; - // The following is more or less copied from setNodeOps - if (expectedBucket == null) { - expectedBucket = {}; - } - const bucketEntries = Object.entries(expectedBucket); - // Add the old node - expectedBucket[nodeId] = { - address: b[nodeId].address, - lastUpdated: b[nodeId].lastUpdated, - }; - // If, with the old node added, we exceed the limit - if (bucketEntries.length > this.maxNodesPerBucket) { - // Then, with the old node added, find the least active and remove - const leastActive = bucketEntries.reduce((prev, curr) => { - return prev[1].lastUpdated < curr[1].lastUpdated ? prev : curr; - }); - delete expectedBucket[leastActive[0]]; - } - // Add this reconstructed bucket (with old node) into the temp storage - tempBuckets[newIndex] = expectedBucket; + if (nodeIds.length < limit) { + // Just after target bucket + const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket + 1)); + const remainingLimit = limit - nodeIds.length; + // Iterate over ids further away + tran.iterator( + { + gt: [bucketId, ''], + limit: remainingLimit, + }, + this.nodeGraphBucketsDbPath, + ); + for await (const [key, nodeData] of tran.iterator( + { + gt: [bucketId, ''], + limit: remainingLimit, + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const info = nodesUtils.parseBucketsDbKey(key); + nodeIds.push([info.nodeId, nodeData]); } } - // Now that we've reconstructed all the buckets, perform batch operations - // on a bucket level (i.e. per bucket, instead of per node) - for (const bucketIndex in tempBuckets) { - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - await tran.put(bucketPath, tempBuckets[bucketIndex]); + // If no nodes were found, return nothing + if (nodeIds.length === 0) return []; + // Need to get the whole of the last bucket + const lastBucketIndex = nodesUtils.bucketIndex( + this.keyManager.getNodeId(), + nodeIds[nodeIds.length - 1][0], + ); + const lastBucket = await this.getBucket( + lastBucketIndex, + undefined, + undefined, + tran, + ); + // Pop off elements of the same bucket to avoid duplicates + let element = nodeIds.pop(); + while ( + element != null && + nodesUtils.bucketIndex(this.keyManager.getNodeId(), element[0]) === + lastBucketIndex + ) { + element = nodeIds.pop(); + } + if (element != null) nodeIds.push(element); + // Adding last bucket to the list + nodeIds.push(...lastBucket); + + nodesUtils.bucketSortByDistance(nodeIds, nodeId, 'asc'); + return nodeIds.slice(0, limit); + } + + /** + * Sets a bucket meta property + * This is protected because users cannot directly manipulate bucket meta + */ + protected async setBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + value: NodeBucketMeta[Key], + tran: DBTransaction, + ): Promise { + const metaKey = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + key, + ]; + await tran.put(metaKey, value); + return; + } + + /** + * Derive the bucket index of the k-buckets from the new `NodeId` + * The bucket key is the string encoded version of bucket index + * that preserves lexicographic order + */ + public bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + const nodeIdOwn = this.keyManager.getNodeId(); + if (nodeId.equals(nodeIdOwn)) { + throw new nodesErrors.ErrorNodeGraphSameNodeId(); } + const bucketIndex = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + return [bucketIndex, bucketKey]; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 6adf778679..bb264de4ff 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,23 +1,34 @@ import type { DB, DBTransaction } from '@matrixai/db'; import type NodeConnectionManager from './NodeConnectionManager'; import type NodeGraph from './NodeGraph'; +import type Queue from './Queue'; import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; -import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; +import type { Timer } from '../types'; +import type { PromiseDeconstructed } from '../utils/utils'; import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; +import * as networkUtils from '../network/utils'; import * as validationUtils from '../validation/utils'; import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; -import * as networkErrors from '../network/errors'; -import * as networkUtils from '../network/utils'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; +import { promise, timerStart } from '../utils/utils'; +interface NodeManager extends StartStop {} +@StartStop() class NodeManager { protected db: DB; protected logger: Logger; @@ -25,6 +36,18 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; + protected queue: Queue; + // Refresh bucket timer + protected refreshBucketDeadlineMap: Map = new Map(); + protected refreshBucketTimer: NodeJS.Timer; + protected refreshBucketNext: NodeBucketIndex; + public readonly refreshBucketTimerDefault; + protected refreshBucketQueue: Set = new Set(); + protected refreshBucketQueueRunning: boolean = false; + protected refreshBucketQueueRunner: Promise; + protected refreshBucketQueuePlug_: PromiseDeconstructed = promise(); + protected refreshBucketQueueDrained_: PromiseDeconstructed = promise(); + protected refreshBucketQueueAbortController: AbortController; constructor({ db, @@ -32,6 +55,8 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, + queue, + refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds logger, }: { db: DB; @@ -39,6 +64,8 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; + queue: Queue; + refreshBucketTimerDefault?: number; logger?: Logger; }) { this.logger = logger ?? new Logger(this.constructor.name); @@ -47,32 +74,50 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; + this.queue = queue; + this.refreshBucketTimerDefault = refreshBucketTimerDefault; + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + this.startRefreshBucketTimers(); + this.refreshBucketQueueRunner = this.startRefreshBucketQueue(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + await this.stopRefreshBucketTimers(); + await this.stopRefreshBucketQueue(); + this.logger.info(`Stopped ${this.constructor.name}`); } /** * Determines whether a node in the Polykey network is online. * @return true if online, false if offline + * @param nodeId - NodeId of the node we're pinging + * @param address - Optional Host and Port we want to ping + * @param timer Connection timeout timer */ - public async pingNode(targetNodeId: NodeId): Promise { - const targetAddress: NodeAddress = - await this.nodeConnectionManager.findNode(targetNodeId); - try { - // Attempt to open a connection via the forward proxy - // i.e. no NodeConnection object created (no need for GRPCClient) - await this.nodeConnectionManager.holePunchForward( - targetNodeId, - await networkUtils.resolveHost(targetAddress.host), - targetAddress.port, - ); - } catch (e) { - // If the connection request times out, then return false - if (e instanceof networkErrors.ErrorConnectionStart) { - return false; - } - // Throw any other error back up the callstack - throw e; + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + timer?: Timer, + ): Promise { + // We need to attempt a connection using the proxies + // For now we will just do a forward connect + relay message + const targetAddress = + address ?? (await this.nodeConnectionManager.findNode(nodeId)); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } - return true; + const targetHost = await networkUtils.resolveHost(targetAddress.host); + return await this.nodeConnectionManager.pingNode( + nodeId, + targetHost, + targetAddress.port, + timer, + ); } /** @@ -311,9 +356,9 @@ class NodeManager { */ public async getNodeAddress( nodeId: NodeId, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { - return await this.nodeGraph.getNode(nodeId, tran); + return (await this.nodeGraph.getNode(nodeId, tran))?.address; } /** @@ -324,9 +369,9 @@ class NodeManager { */ public async knowsNode( targetNodeId: NodeId, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { - return await this.nodeGraph.knowsNode(targetNodeId, tran); + return (await this.nodeGraph.getNode(targetNodeId, tran)) != null; } /** @@ -336,51 +381,344 @@ class NodeManager { bucketIndex: number, tran?: DBTransaction, ): Promise { - return await this.nodeGraph.getBucket(bucketIndex, tran); + return await this.nodeGraph.getBucket( + bucketIndex, + undefined, + undefined, + tran, + ); } /** - * Sets a node in the NodeGraph + * Adds a node to the node graph. This assumes that you have already authenticated the node + * Updates the node if the node already exists + * This operation is blocking by default - set `block` 2qto false to make it non-blocking + * @param nodeId - Id of the node we wish to add + * @param nodeAddress - Expected address of the node we want to add + * @param block - Flag for if the operation should block or utilize the async queue + * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. + * This will drop the oldest node in favor of the new. + * @param timeout Connection timeout + * @param tran */ + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, + block: boolean = true, + force: boolean = false, + timeout?: number, tran?: DBTransaction, ): Promise { - return await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // We don't want to add our own node + if (nodeId.equals(this.keyManager.getNodeId())) { + this.logger.debug('Is own NodeId, skipping'); + return; + } + + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.setNode(nodeId, nodeAddress, block, force, timeout, tran), + ); + } + + // When adding a node we need to handle 3 cases + // 1. The node already exists. We need to update it's last updated field + // 2. The node doesn't exist and bucket has room. + // We need to add the node to the bucket + // 3. The node doesn't exist and the bucket is full. + // We need to ping the oldest node. If the ping succeeds we need to update + // the lastUpdated of the oldest node and drop the new one. If the ping + // fails we delete the old node and add in the new one. + const nodeData = await this.nodeGraph.getNode(nodeId, tran); + // If this is a new entry, check the bucket limit + const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); + const count = await this.nodeGraph.getBucketMetaProp( + bucketIndex, + 'count', + tran, + ); + if (nodeData != null || count < this.nodeGraph.nodeBucketLimit) { + // Either already exists or has room in the bucket + // We want to add or update the node + await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } else { + // We want to add a node but the bucket is full + // We need to ping the oldest node + if (force) { + // We just add the new node anyway without checking the old one + const oldNodeId = ( + await this.nodeGraph.getOldestNode(bucketIndex, 1, tran) + ).pop()!; + this.logger.debug( + `Force was set, removing ${nodesUtils.encodeNodeId( + oldNodeId, + )} and adding ${nodesUtils.encodeNodeId(nodeId)}`, + ); + await this.nodeGraph.unsetNode(oldNodeId, tran); + await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + return; + } else if (block) { + this.logger.debug( + `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( + nodeId, + )}`, + ); + await this.garbageCollectOldNode( + bucketIndex, + nodeId, + nodeAddress, + timeout, + ); + } else { + this.logger.debug( + `Bucket was full and blocking was false, adding ${nodesUtils.encodeNodeId( + nodeId, + )} to queue`, + ); + // Re-attempt this later asynchronously by adding the the queue + this.queue.push(() => + this.setNode(nodeId, nodeAddress, true, false, timeout), + ); + } + } } - /** - * Updates the node in the NodeGraph - */ - public async updateNode( + private async garbageCollectOldNode( + bucketIndex: number, nodeId: NodeId, - nodeAddress?: NodeAddress, - tran?: DBTransaction, - ): Promise { - return await this.nodeGraph.updateNode(nodeId, nodeAddress, tran); + nodeAddress: NodeAddress, + timeout?: number, + ) { + const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3); + // We want to concurrently ping the nodes + const pingPromises = oldestNodeIds.map((nodeId) => { + const doPing = async (): Promise<{ + nodeId: NodeId; + success: boolean; + }> => { + // This needs to return nodeId and ping result + const data = await this.nodeGraph.getNode(nodeId); + if (data == null) return { nodeId, success: false }; + const timer = timeout != null ? timerStart(timeout) : undefined; + const result = await this.pingNode(nodeId, nodeAddress, timer); + return { nodeId, success: result }; + }; + return doPing(); + }); + const pingResults = await Promise.all(pingPromises); + for (const { nodeId, success } of pingResults) { + if (success) { + // Ping succeeded, update the node + this.logger.debug( + `Ping succeeded for ${nodesUtils.encodeNodeId(nodeId)}`, + ); + const node = (await this.nodeGraph.getNode(nodeId))!; + await this.nodeGraph.setNode(nodeId, node.address); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } else { + this.logger.debug(`Ping failed for ${nodesUtils.encodeNodeId(nodeId)}`); + // Otherwise we remove the node + await this.nodeGraph.unsetNode(nodeId); + } + } + // Check if we now have room and add the new node + const count = await this.nodeGraph.getBucketMetaProp(bucketIndex, 'count'); + if (count < this.nodeGraph.nodeBucketLimit) { + this.logger.debug(`Bucket ${bucketIndex} now has room, adding new node`); + await this.nodeGraph.setNode(nodeId, nodeAddress); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } } /** * Removes a node from the NodeGraph */ - public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { + public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { return await this.nodeGraph.unsetNode(nodeId, tran); } /** - * Gets all buckets from the NodeGraph + * To be called on key renewal. Re-orders all nodes in all buckets with respect + * to the new node ID. */ - public async getAllBuckets(tran?: DBTransaction): Promise> { - return await this.nodeGraph.getAllBuckets(tran); + public async resetBuckets(): Promise { + return await this.nodeGraph.resetBuckets(this.keyManager.getNodeId()); } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. + * Kademlia refresh bucket operation. + * It picks a random node within a bucket and does a search for that node. + * Connections during the search will will share node information with other + * nodes. + * @param bucketIndex + * @param options */ - public async refreshBuckets(tran?: DBTransaction): Promise { - return await this.nodeGraph.refreshBuckets(tran); + public async refreshBucket( + bucketIndex: NodeBucketIndex, + options: { signal?: AbortSignal } = {}, + ) { + const { signal } = { ...options }; + // We need to generate a random nodeId for this bucket + const nodeId = this.keyManager.getNodeId(); + const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( + nodeId, + bucketIndex, + ); + // We then need to start a findNode procedure + await this.nodeConnectionManager.findNode(bucketRandomNodeId, { signal }); + } + + // Refresh bucket activity timer methods + + private startRefreshBucketTimers() { + // Setting initial bucket to refresh + this.refreshBucketNext = 0; + // Setting initial deadline + this.refreshBucketTimerReset(this.refreshBucketTimerDefault); + + for ( + let bucketIndex = 0; + bucketIndex < this.nodeGraph.nodeIdBits; + bucketIndex++ + ) { + const deadline = Date.now() + this.refreshBucketTimerDefault; + this.refreshBucketDeadlineMap.set(bucketIndex, deadline); + } + } + + private async stopRefreshBucketTimers() { + clearTimeout(this.refreshBucketTimer); + } + + private refreshBucketTimerReset(timeout: number) { + clearTimeout(this.refreshBucketTimer); + this.refreshBucketTimer = setTimeout(() => { + this.refreshBucketRefreshTimer(); + }, timeout); + } + + public refreshBucketUpdateDeadline(bucketIndex: NodeBucketIndex) { + // Update the map deadline + this.refreshBucketDeadlineMap.set( + bucketIndex, + Date.now() + this.refreshBucketTimerDefault, + ); + // If the bucket was pending a refresh we remove it + this.refreshBucketQueueRemove(bucketIndex); + if (bucketIndex === this.refreshBucketNext) { + // Bucket is same as next bucket, this affects the timer + this.refreshBucketRefreshTimer(); + } + } + + private refreshBucketRefreshTimer() { + // Getting new closest deadline + let closestBucket = this.refreshBucketNext; + let closestDeadline = Date.now() + this.refreshBucketTimerDefault; + const now = Date.now(); + for (const [bucketIndex, deadline] of this.refreshBucketDeadlineMap) { + // Skip any queued buckets marked by 0 deadline + if (deadline === 0) continue; + if (deadline <= now) { + // Deadline for this has already passed, we add it to the queue + this.refreshBucketQueueAdd(bucketIndex); + continue; + } + if (deadline < closestDeadline) { + closestBucket = bucketIndex; + closestDeadline = deadline; + } + } + // Working out time left + const timeout = closestDeadline - Date.now(); + this.logger.debug( + `Refreshing refreshBucket timer with new timeout ${timeout}`, + ); + // Updating timer and next + this.refreshBucketNext = closestBucket; + this.refreshBucketTimerReset(timeout); + } + + // Refresh bucket async queue methods + + public refreshBucketQueueAdd(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Adding bucket ${bucketIndex} to queue`); + this.refreshBucketDeadlineMap.set(bucketIndex, 0); + this.refreshBucketQueue.add(bucketIndex); + this.refreshBucketQueueUnplug(); + } + + public refreshBucketQueueRemove(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Removing bucket ${bucketIndex} from queue`); + this.refreshBucketQueue.delete(bucketIndex); + } + + public async refreshBucketQueueDrained() { + await this.refreshBucketQueueDrained_.p; + } + + private async startRefreshBucketQueue(): Promise { + this.refreshBucketQueueRunning = true; + this.refreshBucketQueuePlug(); + let iterator: IterableIterator | undefined; + this.refreshBucketQueueAbortController = new AbortController(); + const pace = async () => { + // Wait for plug + await this.refreshBucketQueuePlug_.p; + if (iterator == null) { + iterator = this.refreshBucketQueue[Symbol.iterator](); + } + return this.refreshBucketQueueRunning; + }; + while (await pace()) { + const bucketIndex: NodeBucketIndex = iterator?.next().value; + if (bucketIndex == null) { + // Iterator is empty, plug and continue + iterator = undefined; + this.refreshBucketQueuePlug(); + continue; + } + // Do the job + this.logger.debug( + `processing refreshBucket for bucket ${bucketIndex}, ${this.refreshBucketQueue.size} left in queue`, + ); + try { + await this.refreshBucket(bucketIndex, { + signal: this.refreshBucketQueueAbortController.signal, + }); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeAborted) break; + throw e; + } + // Remove from queue and update bucket deadline + this.refreshBucketQueue.delete(bucketIndex); + this.refreshBucketUpdateDeadline(bucketIndex); + } + this.logger.debug('startRefreshBucketQueue has ended'); + } + + private async stopRefreshBucketQueue(): Promise { + // Flag end and await queue finish + this.refreshBucketQueueAbortController.abort(); + this.refreshBucketQueueRunning = false; + this.refreshBucketQueueUnplug(); + } + + private refreshBucketQueuePlug() { + this.refreshBucketQueuePlug_ = promise(); + this.refreshBucketQueueDrained_?.resolveP(); + } + + private refreshBucketQueueUnplug() { + this.refreshBucketQueueDrained_ = promise(); + this.refreshBucketQueuePlug_?.resolveP(); } } diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts new file mode 100644 index 0000000000..602efd5ae7 --- /dev/null +++ b/src/nodes/Queue.ts @@ -0,0 +1,91 @@ +import type { PromiseDeconstructed } from '../utils'; +import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import * as nodesErrors from './errors'; +import { promise } from '../utils'; + +interface Queue extends StartStop {} +@StartStop() +class Queue { + protected logger: Logger; + protected end: boolean = false; + protected queue: Array<() => Promise> = []; + protected runner: Promise; + protected plug_: PromiseDeconstructed = promise(); + protected drained_: PromiseDeconstructed = promise(); + + constructor({ logger }: { logger?: Logger }) { + this.logger = logger ?? new Logger(this.constructor.name); + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + const start = async () => { + this.logger.debug('Starting queue'); + this.plug(); + const pace = async () => { + await this.plug_.p; + return !this.end; + }; + // While queue hasn't ended + while (await pace()) { + const job = this.queue.shift(); + if (job == null) { + // If the queue is empty then we pause the queue + this.plug(); + continue; + } + try { + await job(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + this.logger.debug('queue has ended'); + }; + this.runner = start(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + this.logger.debug('Stopping queue'); + // Tell the queue runner to end + this.end = true; + this.unplug(); + // Wait for runner to finish it's current job + await this.runner; + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * This adds a setNode operation to the queue + */ + public push(f: () => Promise): void { + this.queue.push(f); + this.unplug(); + } + + @ready(new nodesErrors.ErrorQueueNotRunning()) + public async drained(): Promise { + await this.drained_.p; + } + + private plug(): void { + this.logger.debug('Plugging queue'); + // Pausing queue + this.plug_ = promise(); + // Signaling queue is empty + this.drained_.resolveP(); + } + + private unplug(): void { + this.logger.debug('Unplugging queue'); + // Starting queue + this.plug_.resolveP(); + // Signalling queue is running + this.drained_ = promise(); + } +} + +export default Queue; diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index 1c491bde49..bc0185025d 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -2,6 +2,21 @@ import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} +class ErrorNodeAborted extends ErrorNodes { + static description = 'Operation was aborted'; + exitCode = sysexits.USAGE; +} + +class ErrorNodeManagerNotRunning extends ErrorNodes { + static description = 'NodeManager is not running'; + exitCode = sysexits.USAGE; +} + +class ErrorQueueNotRunning extends ErrorNodes { + static description = 'queue is not running'; + exitCode = sysexits.USAGE; +} + class ErrorNodeGraphRunning extends ErrorNodes { static description = 'NodeGraph is running'; exitCode = sysexits.USAGE; @@ -37,6 +52,11 @@ class ErrorNodeGraphSameNodeId extends ErrorNodes { exitCode = sysexits.USAGE; } +class ErrorNodeGraphBucketIndex extends ErrorNodes { + static description: 'Bucket index is out of range'; + exitCode = sysexits.USAGE; +} + class ErrorNodeConnectionDestroyed extends ErrorNodes { static description = 'NodeConnection is destroyed'; exitCode = sysexits.USAGE; @@ -66,9 +86,17 @@ class ErrorNodeConnectionHostWildcard extends ErrorNodes { static description = 'An IP wildcard was provided for the target host'; exitCode = sysexits.USAGE; } +class ErrorNodePingFailed extends ErrorNodes { + static description = + 'Failed to ping the node when attempting to authenticate'; + exitCode = sysexits.NOHOST; +} export { ErrorNodes, + ErrorNodeAborted, + ErrorNodeManagerNotRunning, + ErrorQueueNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, @@ -76,10 +104,12 @@ export { ErrorNodeGraphEmptyDatabase, ErrorNodeGraphOversizedBucket, ErrorNodeGraphSameNodeId, + ErrorNodeGraphBucketIndex, ErrorNodeConnectionDestroyed, ErrorNodeConnectionTimeout, ErrorNodeConnectionInfoNotExist, ErrorNodeConnectionPublicKeyNotFound, ErrorNodeConnectionManagerNotRunning, ErrorNodeConnectionHostWildcard, + ErrorNodePingFailed, }; diff --git a/src/nodes/types.ts b/src/nodes/types.ts index ffb9168511..37775be9d7 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -4,6 +4,10 @@ import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; +// This should be a string +// actually cause it is a domain +type NodeGraphSpace = '0' | '1'; + type NodeId = Opaque<'NodeId', Id>; type NodeIdString = Opaque<'NodeIdString', string>; type NodeIdEncoded = Opaque<'NodeIdEncoded', string>; @@ -13,14 +17,34 @@ type NodeAddress = { port: Port; }; -type SeedNodes = Record; +type NodeBucketIndex = number; +// Type NodeBucket = Record; + +// TODO: +// No longer need to use NodeIdString +// It's an array, if you want to lookup +// It's ordered by the last updated date +// On the other hand, does this matter +// Not really? +// USE THIS TYPE INSTEAD +type NodeBucket = Array<[NodeId, NodeData]>; + +type NodeBucketMeta = { + count: number; +}; + +// Just make the bucket entries also +// bucketIndex anot as a key +// but as the domain +// !!NodeGraph!!meta!!ff!!count type NodeData = { - id: NodeId; address: NodeAddress; - distance: BigInt; + lastUpdated: number; }; +type SeedNodes = Record; + /** * A claim made on a node. That is, can be either: * - a claim from a node -> node @@ -41,16 +65,6 @@ type NodeInfo = { chain: ChainData; }; -type NodeBucketIndex = number; - -// The data type to be stored in each leveldb entry for the node table -type NodeBucket = { - [key: string]: { - address: NodeAddress; - lastUpdated: Date; - }; -}; - // Only 1 domain, so don't need a 'domain' value (like /gestalts/types.ts) type NodeGraphOp_ = { // Bucket index @@ -72,10 +86,12 @@ export type { NodeIdEncoded, NodeAddress, SeedNodes, - NodeData, NodeClaim, NodeInfo, NodeBucketIndex, + NodeBucketMeta, NodeBucket, + NodeData, NodeGraphOp, + NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 696e31d43b..1fe3c799d9 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,29 +1,75 @@ -import type { NodeData, NodeId, NodeIdEncoded } from './types'; +import type { + NodeBucket, + NodeBucketIndex, + NodeId, + NodeIdEncoded, +} from './types'; +import type { KeyPath } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; +import lexi from 'lexicographic-integer'; +import { utils as dbUtils } from '@matrixai/db'; import { bytes2BigInt } from '../utils'; +import * as keysUtils from '../keys/utils'; + +const sepBuffer = dbUtils.sep; /** - * Compute the distance between two nodes. - * distance = nodeId1 ^ nodeId2 - * where ^ = bitwise XOR operator + * Encodes the NodeId as a `base32hex` string */ -function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { - const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); - return bytes2BigInt(distance); +function encodeNodeId(nodeId: NodeId): NodeIdEncoded { + return nodeId.toMultibase('base32hex') as NodeIdEncoded; } /** - * Find the correct index of the k-bucket to add a new node to. + * Decodes an encoded NodeId string into a NodeId + */ +function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { + if (typeof nodeIdEncoded !== 'string') { + return; + } + const nodeId = IdInternal.fromMultibase(nodeIdEncoded); + if (nodeId == null) { + return; + } + // All NodeIds are 32 bytes long + // The NodeGraph requires a fixed size for Node Ids + if (nodeId.length !== 32) { + return; + } + return nodeId; +} + +/** + * Calculate the bucket index that the target node should be located in * A node's k-buckets are organised such that for the ith k-bucket where * 0 <= i < nodeIdBits, the contacts in this ith bucket are known to adhere to * the following inequality: * 2^i <= distance (from current node) < 2^(i+1) + * This means lower buckets will have less nodes then the upper buckets. + * The highest bucket will contain half of all possible nodes. + * The lowest bucket will only contain 1 node. * * NOTE: because XOR is a commutative operation (i.e. a XOR b = b XOR a), the * order of the passed parameters is actually irrelevant. These variables are * purely named for communicating function purpose. + * + * NOTE: Kademlia literature generally talks about buckets with 1-based indexing + * and that the buckets are ordered from largest to smallest. This means the first + * 1th-bucket is far & large bucket, and the last 255th-bucket is the close bucket. + * This is reversed in our `NodeBucketIndex` encoding. This is so that lexicographic + * sort orders our buckets from closest bucket to farthest bucket. + * + * To convert from `NodeBucketIndex` to nth-bucket in Kademlia literature: + * + * | NodeBucketIndex | Nth-Bucket | + * | --------------- | ---------- | + * | 255 | 1 | farthest & largest + * | 254 | 2 | + * | ... | ... | + * | 1 | 254 | + * | 0 | 256 | closest & smallest */ -function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { +function bucketIndex(sourceNode: NodeId, targetNode: NodeId): NodeBucketIndex { const distance = sourceNode.map((byte, i) => byte ^ targetNode[i]); const MSByteIndex = distance.findIndex((byte) => byte !== 0); if (MSByteIndex === -1) { @@ -37,48 +83,251 @@ function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { } /** - * A sorting compareFn to sort an array of NodeData by increasing distance. + * Encodes bucket index to bucket sublevel key */ -function sortByDistance(a: NodeData, b: NodeData) { - if (a.distance > b.distance) { - return 1; - } else if (a.distance < b.distance) { - return -1; - } else { - return 0; +function bucketKey(bucketIndex: NodeBucketIndex): string { + return lexi.pack(bucketIndex, 'hex'); +} + +/** + * Creates key for buckets sublevel + */ +function bucketsDbKey(bucketIndex: NodeBucketIndex, nodeId: NodeId): Buffer { + return Buffer.concat([ + sepBuffer, + Buffer.from(bucketKey(bucketIndex)), + sepBuffer, + bucketDbKey(nodeId), + ]); +} + +/** + * Creates key for single bucket sublevel + */ +function bucketDbKey(nodeId: NodeId): Buffer { + return nodeId.toBuffer(); +} + +/** + * Creates key for buckets indexed by lastUpdated sublevel + */ +function lastUpdatedBucketsDbKey( + bucketIndex: NodeBucketIndex, + lastUpdated: number, + nodeId: NodeId, +): Buffer { + return Buffer.concat([ + sepBuffer, + Buffer.from(bucketKey(bucketIndex)), + sepBuffer, + lastUpdatedBucketDbKey(lastUpdated, nodeId), + ]); +} + +/** + * Creates key for single bucket indexed by lastUpdated sublevel + */ +function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { + return Buffer.concat([ + Buffer.from(lexi.pack(lastUpdated, 'hex')), + Buffer.from('-'), + nodeId.toBuffer(), + ]); +} + +function lastUpdatedKey(lastUpdated: number): Buffer { + return Buffer.from(lexi.pack(lastUpdated, 'hex')); +} + +/** + * Parse the NodeGraph buckets sublevel key + * The keys look like `!!` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseBucketsDbKey(keyPath: KeyPath): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + nodeId: NodeId; +} { + const [bucketKeyPath, nodeIdKey] = keyPath; + if (bucketKeyPath == null || nodeIdKey == null) { + throw new TypeError('Buffer is not an NodeGraph buckets key'); } + const bucketKey = bucketKeyPath.toString(); + const bucketIndex = lexi.unpack(bucketKey); + const nodeId = IdInternal.fromBuffer(Buffer.from(nodeIdKey)); + return { + bucketIndex, + bucketKey, + nodeId, + }; } /** - * Encodes the NodeId as a `base32hex` string + * Parse the NodeGraph bucket key + * The keys look like `` */ -function encodeNodeId(nodeId: NodeId): NodeIdEncoded { - return nodeId.toMultibase('base32hex') as NodeIdEncoded; +function parseBucketDbKey(keyBuffer: Buffer): NodeId { + return IdInternal.fromBuffer(keyBuffer); } /** - * Decodes an encoded NodeId string into a NodeId + * Parse the NodeGraph index sublevel key + * The keys look like `!!-` + * It is assumed that the `!` is the sublevel prefix. */ -function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { - if (typeof nodeIdEncoded !== 'string') { - return; +function parseLastUpdatedBucketsDbKey(keyPath: KeyPath): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; +} { + const [bucketLevel, ...lastUpdatedKeyPath] = keyPath; + if (bucketLevel == null || lastUpdatedKeyPath == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - const nodeId = IdInternal.fromMultibase(nodeIdEncoded); - if (nodeId == null) { - return; + const bucketKey = bucketLevel.toString(); + const bucketIndex = lexi.unpack(bucketKey); + if (bucketIndex == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - // All NodeIds are 32 bytes long - // The NodeGraph requires a fixed size for Node Ids - if (nodeId.length !== 32) { - return; + const { lastUpdated, nodeId } = + parseLastUpdatedBucketDbKey(lastUpdatedKeyPath); + return { + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + }; +} + +/** + * Parse the NodeGraph index bucket sublevel key + * The keys look like `-` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseLastUpdatedBucketDbKey(keyPath: KeyPath): { + lastUpdated: number; + nodeId: NodeId; +} { + const [lastUpdatedLevel, nodeIdKey] = keyPath; + if (lastUpdatedLevel == null || nodeIdKey == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); } - return nodeId; + const lastUpdated = lexi.unpack(lastUpdatedLevel.toString()); + if (lastUpdated == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const nodeId = IdInternal.fromBuffer(Buffer.from(nodeIdKey)); + return { + lastUpdated, + nodeId, + }; +} + +/** + * Compute the distance between two nodes. + * distance = nodeId1 ^ nodeId2 + * where ^ = bitwise XOR operator + */ +function nodeDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { + const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); + return bytes2BigInt(distance); +} + +function bucketSortByDistance( + bucket: NodeBucket, + nodeId: NodeId, + order: 'asc' | 'desc' = 'asc', +): void { + const distances = {}; + if (order === 'asc') { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 < d2) { + return -1; + } else if (d1 > d2) { + return 1; + } else { + return 0; + } + }); + } else { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 > d2) { + return -1; + } else if (d1 < d2) { + return 1; + } else { + return 0; + } + }); + } +} + +function generateRandomDistanceForBucket(bucketIndex: NodeBucketIndex): NodeId { + const buffer = keysUtils.getRandomBytesSync(32); + // Calculate the most significant byte for bucket + const base = bucketIndex / 8; + const mSigByte = Math.floor(base); + const mSigBit = (base - mSigByte) * 8 + 1; + const mSigByteIndex = buffer.length - mSigByte - 1; + // Creating masks + // AND mask should look like 0b00011111 + // OR mask should look like 0b00010000 + const shift = 8 - mSigBit; + const andMask = 0b11111111 >>> shift; + const orMask = 0b10000000 >>> shift; + let byte = buffer[mSigByteIndex]; + byte = byte & andMask; // Forces 0 for bits above bucket bit + byte = byte | orMask; // Forces 1 in the desired bucket bit + buffer[mSigByteIndex] = byte; + // Zero out byte 'above' mSigByte + for (let byteIndex = 0; byteIndex < mSigByteIndex; byteIndex++) { + buffer[byteIndex] = 0; + } + return IdInternal.fromBuffer(buffer); +} + +function xOrNodeId(node1: NodeId, node2: NodeId): NodeId { + const xOrNodeArray = node1.map((byte, i) => byte ^ node2[i]); + const xOrNodeBuffer = Buffer.from(xOrNodeArray); + return IdInternal.fromBuffer(xOrNodeBuffer); +} + +function generateRandomNodeIdForBucket( + nodeId: NodeId, + bucket: NodeBucketIndex, +): NodeId { + const randomDistanceForBucket = generateRandomDistanceForBucket(bucket); + return xOrNodeId(nodeId, randomDistanceForBucket); } export { - calculateDistance, - calculateBucketIndex, - sortByDistance, + sepBuffer, encodeNodeId, decodeNodeId, + bucketIndex, + bucketKey, + bucketsDbKey, + bucketDbKey, + lastUpdatedBucketsDbKey, + lastUpdatedBucketDbKey, + lastUpdatedKey, + parseBucketsDbKey, + parseBucketDbKey, + parseLastUpdatedBucketsDbKey, + parseLastUpdatedBucketDbKey, + nodeDistance, + bucketSortByDistance, + generateRandomDistanceForBucket, + xOrNodeId, + generateRandomNodeIdForBucket, }; diff --git a/src/proto/js/google/protobuf/any_pb.js b/src/proto/js/google/protobuf/any_pb.js index 2154f20785..cec1761c80 100644 --- a/src/proto/js/google/protobuf/any_pb.js +++ b/src/proto/js/google/protobuf/any_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/descriptor_pb.js b/src/proto/js/google/protobuf/descriptor_pb.js index 64e84878bd..9c345b93dd 100644 --- a/src/proto/js/google/protobuf/descriptor_pb.js +++ b/src/proto/js/google/protobuf/descriptor_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/duration_pb.js b/src/proto/js/google/protobuf/duration_pb.js index 74166f0fd8..1b5f0fd84a 100644 --- a/src/proto/js/google/protobuf/duration_pb.js +++ b/src/proto/js/google/protobuf/duration_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/empty_pb.js b/src/proto/js/google/protobuf/empty_pb.js index d85fa310a9..bd5d8a4e18 100644 --- a/src/proto/js/google/protobuf/empty_pb.js +++ b/src/proto/js/google/protobuf/empty_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/field_mask_pb.js b/src/proto/js/google/protobuf/field_mask_pb.js index 67860a3a29..34e581b041 100644 --- a/src/proto/js/google/protobuf/field_mask_pb.js +++ b/src/proto/js/google/protobuf/field_mask_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/struct_pb.js b/src/proto/js/google/protobuf/struct_pb.js index bff1ed4123..b16b8b2fab 100644 --- a/src/proto/js/google/protobuf/struct_pb.js +++ b/src/proto/js/google/protobuf/struct_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/timestamp_pb.js b/src/proto/js/google/protobuf/timestamp_pb.js index 6881a1d93b..a270c1c47c 100644 --- a/src/proto/js/google/protobuf/timestamp_pb.js +++ b/src/proto/js/google/protobuf/timestamp_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/wrappers_pb.js b/src/proto/js/google/protobuf/wrappers_pb.js index 9c89af542f..458e1b436b 100644 --- a/src/proto/js/google/protobuf/wrappers_pb.js +++ b/src/proto/js/google/protobuf/wrappers_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/agent/agent_pb.js b/src/proto/js/polykey/v1/agent/agent_pb.js index 13a458c48f..29361addf7 100644 --- a/src/proto/js/polykey/v1/agent/agent_pb.js +++ b/src/proto/js/polykey/v1/agent/agent_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/agent_service_pb.js b/src/proto/js/polykey/v1/agent_service_pb.js index ade0b70fac..9fa48c7388 100644 --- a/src/proto/js/polykey/v1/agent_service_pb.js +++ b/src/proto/js/polykey/v1/agent_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts index 023631a45b..b230f8df4d 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts @@ -27,6 +27,7 @@ interface IClientServiceService extends grpc.ServiceDefinition; responseDeserialize: grpc.deserialize; } -interface IClientServiceService_INodesAdd extends grpc.MethodDefinition { +interface IClientServiceService_INodesAdd extends grpc.MethodDefinition { path: "/polykey.v1.ClientService/NodesAdd"; requestStream: false; responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; responseSerialize: grpc.serialize; responseDeserialize: grpc.deserialize; } @@ -157,6 +158,15 @@ interface IClientServiceService_INodesFind extends grpc.MethodDefinition; responseDeserialize: grpc.deserialize; } +interface IClientServiceService_INodesGetAll extends grpc.MethodDefinition { + path: "/polykey.v1.ClientService/NodesGetAll"; + requestStream: false; + responseStream: false; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} interface IClientServiceService_IKeysKeyPairRoot extends grpc.MethodDefinition { path: "/polykey.v1.ClientService/KeysKeyPairRoot"; requestStream: false; @@ -669,10 +679,11 @@ export interface IClientServiceServer extends grpc.UntypedServiceImplementation agentStatus: grpc.handleUnaryCall; agentStop: grpc.handleUnaryCall; agentUnlock: grpc.handleUnaryCall; - nodesAdd: grpc.handleUnaryCall; + nodesAdd: grpc.handleUnaryCall; nodesPing: grpc.handleUnaryCall; nodesClaim: grpc.handleUnaryCall; nodesFind: grpc.handleUnaryCall; + nodesGetAll: grpc.handleUnaryCall; keysKeyPairRoot: grpc.handleUnaryCall; keysKeyPairReset: grpc.handleUnaryCall; keysKeyPairRenew: grpc.handleUnaryCall; @@ -744,9 +755,9 @@ export interface IClientServiceClient { agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; @@ -756,6 +767,9 @@ export interface IClientServiceClient { nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; @@ -929,9 +943,9 @@ export class ClientServiceClient extends grpc.Client implements IClientServiceCl public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; @@ -941,6 +955,9 @@ export class ClientServiceClient extends grpc.Client implements IClientServiceCl public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.js b/src/proto/js/polykey/v1/client_service_grpc_pb.js index ede2e94709..e08b6512cd 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.js +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.js @@ -201,6 +201,17 @@ function deserialize_polykey_v1_nodes_Node(buffer_arg) { return polykey_v1_nodes_nodes_pb.Node.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_polykey_v1_nodes_NodeAdd(arg) { + if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeAdd)) { + throw new Error('Expected argument of type polykey.v1.nodes.NodeAdd'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_polykey_v1_nodes_NodeAdd(buffer_arg) { + return polykey_v1_nodes_nodes_pb.NodeAdd.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_polykey_v1_nodes_NodeAddress(arg) { if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeAddress)) { throw new Error('Expected argument of type polykey.v1.nodes.NodeAddress'); @@ -212,6 +223,17 @@ function deserialize_polykey_v1_nodes_NodeAddress(buffer_arg) { return polykey_v1_nodes_nodes_pb.NodeAddress.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_polykey_v1_nodes_NodeBuckets(arg) { + if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeBuckets)) { + throw new Error('Expected argument of type polykey.v1.nodes.NodeBuckets'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_polykey_v1_nodes_NodeBuckets(buffer_arg) { + return polykey_v1_nodes_nodes_pb.NodeBuckets.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_polykey_v1_notifications_List(arg) { if (!(arg instanceof polykey_v1_notifications_notifications_pb.List)) { throw new Error('Expected argument of type polykey.v1.notifications.List'); @@ -517,10 +539,10 @@ nodesAdd: { path: '/polykey.v1.ClientService/NodesAdd', requestStream: false, responseStream: false, - requestType: polykey_v1_nodes_nodes_pb.NodeAddress, + requestType: polykey_v1_nodes_nodes_pb.NodeAdd, responseType: polykey_v1_utils_utils_pb.EmptyMessage, - requestSerialize: serialize_polykey_v1_nodes_NodeAddress, - requestDeserialize: deserialize_polykey_v1_nodes_NodeAddress, + requestSerialize: serialize_polykey_v1_nodes_NodeAdd, + requestDeserialize: deserialize_polykey_v1_nodes_NodeAdd, responseSerialize: serialize_polykey_v1_utils_EmptyMessage, responseDeserialize: deserialize_polykey_v1_utils_EmptyMessage, }, @@ -557,6 +579,17 @@ nodesAdd: { responseSerialize: serialize_polykey_v1_nodes_NodeAddress, responseDeserialize: deserialize_polykey_v1_nodes_NodeAddress, }, + nodesGetAll: { + path: '/polykey.v1.ClientService/NodesGetAll', + requestStream: false, + responseStream: false, + requestType: polykey_v1_utils_utils_pb.EmptyMessage, + responseType: polykey_v1_nodes_nodes_pb.NodeBuckets, + requestSerialize: serialize_polykey_v1_utils_EmptyMessage, + requestDeserialize: deserialize_polykey_v1_utils_EmptyMessage, + responseSerialize: serialize_polykey_v1_nodes_NodeBuckets, + responseDeserialize: deserialize_polykey_v1_nodes_NodeBuckets, + }, // Keys keysKeyPairRoot: { path: '/polykey.v1.ClientService/KeysKeyPairRoot', diff --git a/src/proto/js/polykey/v1/client_service_pb.js b/src/proto/js/polykey/v1/client_service_pb.js index 4adc8bd6d0..68a9ebcb8f 100644 --- a/src/proto/js/polykey/v1/client_service_pb.js +++ b/src/proto/js/polykey/v1/client_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/gestalts/gestalts_pb.js b/src/proto/js/polykey/v1/gestalts/gestalts_pb.js index 90435b3bec..36b225293b 100644 --- a/src/proto/js/polykey/v1/gestalts/gestalts_pb.js +++ b/src/proto/js/polykey/v1/gestalts/gestalts_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/identities/identities_pb.js b/src/proto/js/polykey/v1/identities/identities_pb.js index cbfb21ed9a..a52a535f48 100644 --- a/src/proto/js/polykey/v1/identities/identities_pb.js +++ b/src/proto/js/polykey/v1/identities/identities_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/keys/keys_pb.js b/src/proto/js/polykey/v1/keys/keys_pb.js index dfbc1df0bc..323ef8f160 100644 --- a/src/proto/js/polykey/v1/keys/keys_pb.js +++ b/src/proto/js/polykey/v1/keys/keys_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts index 0da62ce435..09fb028ebe 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts @@ -98,6 +98,60 @@ export namespace Claim { } } +export class NodeAdd extends jspb.Message { + getNodeId(): string; + setNodeId(value: string): NodeAdd; + + hasAddress(): boolean; + clearAddress(): void; + getAddress(): Address | undefined; + setAddress(value?: Address): NodeAdd; + getForce(): boolean; + setForce(value: boolean): NodeAdd; + getPing(): boolean; + setPing(value: boolean): NodeAdd; + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): NodeAdd.AsObject; + static toObject(includeInstance: boolean, msg: NodeAdd): NodeAdd.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: NodeAdd, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): NodeAdd; + static deserializeBinaryFromReader(message: NodeAdd, reader: jspb.BinaryReader): NodeAdd; +} + +export namespace NodeAdd { + export type AsObject = { + nodeId: string, + address?: Address.AsObject, + force: boolean, + ping: boolean, + } +} + +export class NodeBuckets extends jspb.Message { + + getBucketsMap(): jspb.Map; + clearBucketsMap(): void; + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): NodeBuckets.AsObject; + static toObject(includeInstance: boolean, msg: NodeBuckets): NodeBuckets.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: NodeBuckets, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): NodeBuckets; + static deserializeBinaryFromReader(message: NodeBuckets, reader: jspb.BinaryReader): NodeBuckets; +} + +export namespace NodeBuckets { + export type AsObject = { + + bucketsMap: Array<[number, NodeTable.AsObject]>, + } +} + export class Connection extends jspb.Message { getAId(): string; setAId(value: string): Connection; diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.js b/src/proto/js/polykey/v1/nodes/nodes_pb.js index 01d29ce4f0..6dd70cdc30 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.js +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public @@ -24,7 +25,9 @@ goog.exportSymbol('proto.polykey.v1.nodes.Claims', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Connection', null, global); goog.exportSymbol('proto.polykey.v1.nodes.CrossSign', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Node', null, global); +goog.exportSymbol('proto.polykey.v1.nodes.NodeAdd', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeAddress', null, global); +goog.exportSymbol('proto.polykey.v1.nodes.NodeBuckets', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeTable', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Relay', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Signature', null, global); @@ -112,6 +115,48 @@ if (goog.DEBUG && !COMPILED) { */ proto.polykey.v1.nodes.Claim.displayName = 'proto.polykey.v1.nodes.Claim'; } +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.polykey.v1.nodes.NodeAdd = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.polykey.v1.nodes.NodeAdd, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.polykey.v1.nodes.NodeAdd.displayName = 'proto.polykey.v1.nodes.NodeAdd'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.polykey.v1.nodes.NodeBuckets = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.polykey.v1.nodes.NodeBuckets, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.polykey.v1.nodes.NodeBuckets.displayName = 'proto.polykey.v1.nodes.NodeBuckets'; +} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -956,6 +1001,380 @@ proto.polykey.v1.nodes.Claim.prototype.setForceInvite = function(value) { +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.toObject = function(opt_includeInstance) { + return proto.polykey.v1.nodes.NodeAdd.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.polykey.v1.nodes.NodeAdd} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeAdd.toObject = function(includeInstance, msg) { + var f, obj = { + nodeId: jspb.Message.getFieldWithDefault(msg, 1, ""), + address: (f = msg.getAddress()) && proto.polykey.v1.nodes.Address.toObject(includeInstance, f), + force: jspb.Message.getBooleanFieldWithDefault(msg, 3, false), + ping: jspb.Message.getBooleanFieldWithDefault(msg, 4, false) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.polykey.v1.nodes.NodeAdd} + */ +proto.polykey.v1.nodes.NodeAdd.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.polykey.v1.nodes.NodeAdd; + return proto.polykey.v1.nodes.NodeAdd.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.polykey.v1.nodes.NodeAdd} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.polykey.v1.nodes.NodeAdd} + */ +proto.polykey.v1.nodes.NodeAdd.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setNodeId(value); + break; + case 2: + var value = new proto.polykey.v1.nodes.Address; + reader.readMessage(value,proto.polykey.v1.nodes.Address.deserializeBinaryFromReader); + msg.setAddress(value); + break; + case 3: + var value = /** @type {boolean} */ (reader.readBool()); + msg.setForce(value); + break; + case 4: + var value = /** @type {boolean} */ (reader.readBool()); + msg.setPing(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.polykey.v1.nodes.NodeAdd.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.polykey.v1.nodes.NodeAdd} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeAdd.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getNodeId(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getAddress(); + if (f != null) { + writer.writeMessage( + 2, + f, + proto.polykey.v1.nodes.Address.serializeBinaryToWriter + ); + } + f = message.getForce(); + if (f) { + writer.writeBool( + 3, + f + ); + } + f = message.getPing(); + if (f) { + writer.writeBool( + 4, + f + ); + } +}; + + +/** + * optional string node_id = 1; + * @return {string} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getNodeId = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setNodeId = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional Address address = 2; + * @return {?proto.polykey.v1.nodes.Address} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getAddress = function() { + return /** @type{?proto.polykey.v1.nodes.Address} */ ( + jspb.Message.getWrapperField(this, proto.polykey.v1.nodes.Address, 2)); +}; + + +/** + * @param {?proto.polykey.v1.nodes.Address|undefined} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this +*/ +proto.polykey.v1.nodes.NodeAdd.prototype.setAddress = function(value) { + return jspb.Message.setWrapperField(this, 2, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.clearAddress = function() { + return this.setAddress(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.hasAddress = function() { + return jspb.Message.getField(this, 2) != null; +}; + + +/** + * optional bool force = 3; + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getForce = function() { + return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 3, false)); +}; + + +/** + * @param {boolean} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setForce = function(value) { + return jspb.Message.setProto3BooleanField(this, 3, value); +}; + + +/** + * optional bool ping = 4; + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getPing = function() { + return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 4, false)); +}; + + +/** + * @param {boolean} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setPing = function(value) { + return jspb.Message.setProto3BooleanField(this, 4, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.toObject = function(opt_includeInstance) { + return proto.polykey.v1.nodes.NodeBuckets.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.toObject = function(includeInstance, msg) { + var f, obj = { + bucketsMap: (f = msg.getBucketsMap()) ? f.toObject(includeInstance, proto.polykey.v1.nodes.NodeTable.toObject) : [] + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.polykey.v1.nodes.NodeBuckets; + return proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = msg.getBucketsMap(); + reader.readMessage(value, function(message, reader) { + jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readInt32, jspb.BinaryReader.prototype.readMessage, proto.polykey.v1.nodes.NodeTable.deserializeBinaryFromReader, 0, new proto.polykey.v1.nodes.NodeTable()); + }); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.polykey.v1.nodes.NodeBuckets} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getBucketsMap(true); + if (f && f.getLength() > 0) { + f.serializeBinary(1, writer, jspb.BinaryWriter.prototype.writeInt32, jspb.BinaryWriter.prototype.writeMessage, proto.polykey.v1.nodes.NodeTable.serializeBinaryToWriter); + } +}; + + +/** + * map buckets = 1; + * @param {boolean=} opt_noLazyCreate Do not create the map if + * empty, instead returning `undefined` + * @return {!jspb.Map} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.getBucketsMap = function(opt_noLazyCreate) { + return /** @type {!jspb.Map} */ ( + jspb.Message.getMapField(this, 1, opt_noLazyCreate, + proto.polykey.v1.nodes.NodeTable)); +}; + + +/** + * Clears values from the map. The map will be non-null. + * @return {!proto.polykey.v1.nodes.NodeBuckets} returns this + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.clearBucketsMap = function() { + this.getBucketsMap().clear(); + return this;}; + + + + + if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. diff --git a/src/proto/js/polykey/v1/notifications/notifications_pb.js b/src/proto/js/polykey/v1/notifications/notifications_pb.js index f50f614f5f..80794ae7f4 100644 --- a/src/proto/js/polykey/v1/notifications/notifications_pb.js +++ b/src/proto/js/polykey/v1/notifications/notifications_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/permissions/permissions_pb.js b/src/proto/js/polykey/v1/permissions/permissions_pb.js index 53e1299855..1b55e4f47f 100644 --- a/src/proto/js/polykey/v1/permissions/permissions_pb.js +++ b/src/proto/js/polykey/v1/permissions/permissions_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/secrets/secrets_pb.js b/src/proto/js/polykey/v1/secrets/secrets_pb.js index 5008028d8f..28d2e02ae9 100644 --- a/src/proto/js/polykey/v1/secrets/secrets_pb.js +++ b/src/proto/js/polykey/v1/secrets/secrets_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/sessions/sessions_pb.js b/src/proto/js/polykey/v1/sessions/sessions_pb.js index c2d81541f0..212d584bc4 100644 --- a/src/proto/js/polykey/v1/sessions/sessions_pb.js +++ b/src/proto/js/polykey/v1/sessions/sessions_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/test_service_pb.js b/src/proto/js/polykey/v1/test_service_pb.js index f5ab8f2dea..56dd0245c1 100644 --- a/src/proto/js/polykey/v1/test_service_pb.js +++ b/src/proto/js/polykey/v1/test_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/utils/utils_pb.js b/src/proto/js/polykey/v1/utils/utils_pb.js index 852c0903d6..39b5c869e3 100644 --- a/src/proto/js/polykey/v1/utils/utils_pb.js +++ b/src/proto/js/polykey/v1/utils/utils_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/vaults/vaults_pb.js b/src/proto/js/polykey/v1/vaults/vaults_pb.js index 6b793dc63c..153565a462 100644 --- a/src/proto/js/polykey/v1/vaults/vaults_pb.js +++ b/src/proto/js/polykey/v1/vaults/vaults_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/schemas/polykey/v1/client_service.proto b/src/proto/schemas/polykey/v1/client_service.proto index 57788c678d..9c90e02865 100644 --- a/src/proto/schemas/polykey/v1/client_service.proto +++ b/src/proto/schemas/polykey/v1/client_service.proto @@ -22,10 +22,11 @@ service ClientService { rpc AgentUnlock (polykey.v1.utils.EmptyMessage) returns (polykey.v1.utils.EmptyMessage); // Nodes - rpc NodesAdd(polykey.v1.nodes.NodeAddress) returns (polykey.v1.utils.EmptyMessage); + rpc NodesAdd(polykey.v1.nodes.NodeAdd) returns (polykey.v1.utils.EmptyMessage); rpc NodesPing(polykey.v1.nodes.Node) returns (polykey.v1.utils.StatusMessage); rpc NodesClaim(polykey.v1.nodes.Claim) returns (polykey.v1.utils.StatusMessage); rpc NodesFind(polykey.v1.nodes.Node) returns (polykey.v1.nodes.NodeAddress); + rpc NodesGetAll(polykey.v1.utils.EmptyMessage) returns (polykey.v1.nodes.NodeBuckets); // Keys rpc KeysKeyPairRoot (polykey.v1.utils.EmptyMessage) returns (polykey.v1.keys.KeyPair); diff --git a/src/proto/schemas/polykey/v1/nodes/nodes.proto b/src/proto/schemas/polykey/v1/nodes/nodes.proto index 4c5d64a51e..cd8b23785f 100644 --- a/src/proto/schemas/polykey/v1/nodes/nodes.proto +++ b/src/proto/schemas/polykey/v1/nodes/nodes.proto @@ -25,6 +25,18 @@ message Claim { bool force_invite = 2; } +message NodeAdd { + string node_id = 1; + Address address = 2; + bool force = 3; + bool ping = 4; +} + +// Bucket index -> a node bucket (from NodeGraph) +message NodeBuckets { + map buckets = 1; +} + // Agent specific. message Connection { diff --git a/src/sigchain/utils.ts b/src/sigchain/utils.ts index 7f40dd6a3a..fe8cc83f83 100644 --- a/src/sigchain/utils.ts +++ b/src/sigchain/utils.ts @@ -19,7 +19,7 @@ async function verifyChainData( continue; } // If verified, add the claim to the decoded chain - decodedChain[claimId] = await claimsUtils.decodeClaim(encodedClaim); + decodedChain[claimId] = claimsUtils.decodeClaim(encodedClaim); } return decodedChain; } diff --git a/src/types.ts b/src/types.ts index 161181b8d2..fae58ae018 100644 --- a/src/types.ts +++ b/src/types.ts @@ -86,6 +86,24 @@ interface FileSystem { type FileHandle = fs.promises.FileHandle; +type FunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? K : never; +}[keyof T]; + +/** + * Functional properties of an object + */ +type FunctionProperties = Pick>; + +type NonFunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? never : K; +}[keyof T]; + +/** + * Non-functional properties of an object + */ +type NonFunctionProperties = Pick>; + export type { POJO, Opaque, @@ -99,4 +117,6 @@ export type { Timer, FileSystem, FileHandle, + FunctionProperties, + NonFunctionProperties, }; diff --git a/src/utils/index.ts b/src/utils/index.ts index f50908acab..2ee8414ff7 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -2,4 +2,5 @@ export { default as sysexits } from './sysexits'; export * from './utils'; export * from './matchers'; export * from './binary'; +export * from './random'; export * as errors from './errors'; diff --git a/src/utils/random.ts b/src/utils/random.ts new file mode 100644 index 0000000000..fa0c3ecda4 --- /dev/null +++ b/src/utils/random.ts @@ -0,0 +1,11 @@ +/** + * Gets a random number between min (inc) and max (exc) + * This is not cryptographically-secure + */ +function getRandomInt(min: number, max: number) { + min = Math.ceil(min); + max = Math.floor(max); + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +export { getRandomInt }; diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 4d837ecbfe..0a1519d193 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -170,14 +170,16 @@ function promisify< }; } -/** - * Deconstructed promise - */ -function promise(): { +type PromiseDeconstructed = { p: Promise; resolveP: (value: T | PromiseLike) => void; rejectP: (reason?: any) => void; -} { +}; + +/** + * Deconstructed promise + */ +function promise(): PromiseDeconstructed { let resolveP, rejectP; const p = new Promise((resolve, reject) => { resolveP = resolve; @@ -236,6 +238,67 @@ function arrayZipWithPadding( ]); } +async function asyncIterableArray( + iterable: AsyncIterable, +): Promise> { + const arr: Array = []; + for await (const item of iterable) { + arr.push(item); + } + return arr; +} + +function bufferSplit( + input: Buffer, + delimiter?: Buffer, + limit?: number, + remaining: boolean = false, +): Array { + const output: Array = []; + let delimiterOffset = 0; + let delimiterIndex = 0; + let i = 0; + if (delimiter != null) { + while (true) { + if (i === limit) break; + delimiterIndex = input.indexOf(delimiter, delimiterOffset); + if (delimiterIndex > -1) { + output.push(input.subarray(delimiterOffset, delimiterIndex)); + delimiterOffset = delimiterIndex + delimiter.byteLength; + } else { + const chunk = input.subarray(delimiterOffset); + output.push(chunk); + delimiterOffset += chunk.byteLength; + break; + } + i++; + } + } else { + for (; delimiterIndex < input.byteLength; ) { + if (i === limit) break; + delimiterIndex++; + const chunk = input.subarray(delimiterOffset, delimiterIndex); + output.push(chunk); + delimiterOffset += chunk.byteLength; + i++; + } + } + // If remaining, then the rest of the input including delimiters is extracted + if ( + remaining && + limit != null && + output.length > 0 && + delimiterIndex > -1 && + delimiterIndex <= input.byteLength + ) { + const inputRemaining = input.subarray( + delimiterIndex - output[output.length - 1].byteLength, + ); + output[output.length - 1] = inputRemaining; + } + return output; +} + function debounce

( f: (...params: P) => any, timeout: number = 0, @@ -247,6 +310,7 @@ function debounce

( }; } +export type { PromiseDeconstructed }; export { getDefaultNodePath, never, @@ -266,5 +330,7 @@ export { arrayUnset, arrayZip, arrayZipWithPadding, + asyncIterableArray, + bufferSplit, debounce, }; diff --git a/src/validation/utils.ts b/src/validation/utils.ts index 3ce13f258a..020c1f51a1 100644 --- a/src/validation/utils.ts +++ b/src/validation/utils.ts @@ -165,7 +165,7 @@ function parseHostOrHostname(data: any): Host | Hostname { * Parses number into a Port * Data can be a string-number */ -function parsePort(data: any): Port { +function parsePort(data: any, connect: boolean = false): Port { if (typeof data === 'string') { try { data = parseInteger(data); @@ -176,10 +176,16 @@ function parsePort(data: any): Port { throw e; } } - if (!networkUtils.isPort(data)) { - throw new validationErrors.ErrorParse( - 'Port must be a number between 0 and 65535 inclusive', - ); + if (!networkUtils.isPort(data, connect)) { + if (!connect) { + throw new validationErrors.ErrorParse( + 'Port must be a number between 0 and 65535 inclusive', + ); + } else { + throw new validationErrors.ErrorParse( + 'Port must be a number between 1 and 65535 inclusive', + ); + } } return data; } diff --git a/src/vaults/VaultInternal.ts b/src/vaults/VaultInternal.ts index ae2adf6cfb..b5e32da067 100644 --- a/src/vaults/VaultInternal.ts +++ b/src/vaults/VaultInternal.ts @@ -35,7 +35,7 @@ import * as validationUtils from '../validation/utils'; import * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; import { never } from '../utils/utils'; -export type RemoteInfo = { +type RemoteInfo = { remoteNode: NodeIdEncoded; remoteVault: VaultIdEncoded; }; @@ -1098,3 +1098,4 @@ class VaultInternal { } export default VaultInternal; +export type { RemoteInfo }; diff --git a/tests/PolykeyAgent.test.ts b/tests/PolykeyAgent.test.ts index 9423050abf..7cb1f2fc75 100644 --- a/tests/PolykeyAgent.test.ts +++ b/tests/PolykeyAgent.test.ts @@ -1,4 +1,5 @@ import type { StateVersion } from '@/schema/types'; +import type { KeyManagerChangeData } from '@/keys/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; @@ -9,6 +10,7 @@ import { Status } from '@/status'; import { Schema } from '@/schema'; import * as errors from '@/errors'; import config from '@/config'; +import { promise } from '@/utils/index'; import * as testUtils from './utils'; describe('PolykeyAgent', () => { @@ -175,4 +177,76 @@ describe('PolykeyAgent', () => { }), ).rejects.toThrow(errors.ErrorSchemaVersionTooOld); }); + test('renewRootKeyPair change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.renewRootKeyPair(password); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); + test('resetRootKeyPair change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.resetRootKeyPair(password); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); + test('resetRootCert change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.resetRootCert(); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); }); diff --git a/tests/acl/ACL.test.ts b/tests/acl/ACL.test.ts index a671caf102..ec4020a1bf 100644 --- a/tests/acl/ACL.test.ts +++ b/tests/acl/ACL.test.ts @@ -11,7 +11,7 @@ import ACL from '@/acl/ACL'; import * as aclErrors from '@/acl/errors'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe(ACL.name, () => { const logger = new Logger(`${ACL.name} test`, LogLevel.WARN, [ @@ -19,14 +19,14 @@ describe(ACL.name, () => { ]); // Node Ids - const nodeIdX = testUtils.generateRandomNodeId(); - const nodeIdY = testUtils.generateRandomNodeId(); - const nodeIdG1First = testUtils.generateRandomNodeId(); - const nodeIdG1Second = testUtils.generateRandomNodeId(); - const nodeIdG1Third = testUtils.generateRandomNodeId(); - const nodeIdG1Fourth = testUtils.generateRandomNodeId(); - const nodeIdG2First = testUtils.generateRandomNodeId(); - const nodeIdG2Second = testUtils.generateRandomNodeId(); + const nodeIdX = testNodesUtils.generateRandomNodeId(); + const nodeIdY = testNodesUtils.generateRandomNodeId(); + const nodeIdG1First = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Second = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Third = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Fourth = testNodesUtils.generateRandomNodeId(); + const nodeIdG2First = testNodesUtils.generateRandomNodeId(); + const nodeIdG2Second = testNodesUtils.generateRandomNodeId(); let dataDir: string; let db: DB; diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 60a84410cb..134273e305 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -6,6 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -21,6 +22,7 @@ import NotificationsManager from '@/notifications/NotificationsManager'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as agentErrors from '@/agent/errors'; import * as keysUtils from '@/keys/utils'; +import { timerStart } from '@/utils'; import * as testAgentUtils from './utils'; describe(GRPCClientAgent.name, () => { @@ -48,6 +50,7 @@ describe(GRPCClientAgent.name, () => { let keyManager: KeyManager; let vaultManager: VaultManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -109,21 +112,26 @@ describe(GRPCClientAgent.name, () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db: db, sigchain: sigchain, keyManager: keyManager, nodeGraph: nodeGraph, nodeConnectionManager: nodeConnectionManager, + queue, logger: logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl: acl, @@ -175,6 +183,8 @@ describe(GRPCClientAgent.name, () => { await notificationsManager.stop(); await sigchain.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); @@ -257,7 +267,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy1.getForwardPort(), authToken: clientProxy1.authToken, }, - timeout: 5000, + timer: timerStart(5000), logger, }); @@ -291,7 +301,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy2.getForwardPort(), authToken: clientProxy2.authToken, }, - timeout: 5000, + timer: timerStart(5000), }); }); afterEach(async () => { diff --git a/tests/agent/service/nodesChainDataGet.test.ts b/tests/agent/service/nodesChainDataGet.test.ts new file mode 100644 index 0000000000..306d9cd063 --- /dev/null +++ b/tests/agent/service/nodesChainDataGet.test.ts @@ -0,0 +1,110 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeIdEncoded } from '@/nodes/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesClosestLocalNodesGet from '@/agent/service/nodesClosestLocalNodesGet'; +import * as testNodesUtils from '../../nodes/utils'; +import * as testUtils from '../../utils'; + +describe('nodesClosestLocalNode', () => { + const logger = new Logger('nodesClosestLocalNode test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + // Setting up a remote keynode + const agentService = { + nodesClosestLocalNodesGet: nodesClosestLocalNodesGet({ + nodeGraph: pkAgent.nodeGraph, + db: pkAgent.db, + logger, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest local nodes', async () => { + // Adding 10 nodes + const nodes: Array = []; + for (let i = 0; i < 10; i++) { + const nodeId = testNodesUtils.generateRandomNodeId(); + await pkAgent.nodeGraph.setNode(nodeId, { + host: 'localhost' as Host, + port: 55555 as Port, + }); + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + const nodeIdEncoded = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeMessage = new nodesPB.Node(); + nodeMessage.setNodeId(nodeIdEncoded); + const result = await grpcClient.nodesClosestLocalNodesGet(nodeMessage); + const resultNodes: Array = []; + for (const [resultNode] of result.toObject().nodeTableMap) { + resultNodes.push(resultNode as NodeIdEncoded); + } + expect(nodes.sort()).toEqual(resultNodes.sort()); + }); +}); diff --git a/tests/agent/service/nodesClosestLocalNode.test.ts b/tests/agent/service/nodesClosestLocalNode.test.ts new file mode 100644 index 0000000000..4e080443a5 --- /dev/null +++ b/tests/agent/service/nodesClosestLocalNode.test.ts @@ -0,0 +1,120 @@ +import type { Host, Port } from '@/network/types'; +import type { ClaimData } from '@/claims/types'; +import type { IdentityId, ProviderId } from '@/identities/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesChainDataGet from '@/agent/service/nodesChainDataGet'; +import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; + +describe('nodesChainDataGet', () => { + const logger = new Logger('nodesChainDataGet test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesChainDataGet: nodesChainDataGet({ + sigchain: pkAgent.sigchain, + db: pkAgent.db, + logger, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest nodes', async () => { + const srcNodeIdEncoded = nodesUtils.encodeNodeId( + pkAgent.keyManager.getNodeId(), + ); + // Add 10 claims + for (let i = 1; i <= 5; i++) { + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeLink: ClaimData = { + type: 'node', + node1: srcNodeIdEncoded, + node2: node2, + }; + await pkAgent.sigchain.addClaim(nodeLink); + } + for (let i = 6; i <= 10; i++) { + const identityLink: ClaimData = { + type: 'identity', + node: srcNodeIdEncoded, + provider: ('ProviderId' + i.toString()) as ProviderId, + identity: ('IdentityId' + i.toString()) as IdentityId, + }; + await pkAgent.sigchain.addClaim(identityLink); + } + + const response = await grpcClient.nodesChainDataGet( + new utilsPB.EmptyMessage(), + ); + const chainIds: Array = []; + for (const [id] of response.toObject().chainDataMap) chainIds.push(id); + expect(chainIds).toHaveLength(10); + }); +}); diff --git a/tests/agent/service/nodesHolePunchMessage.test.ts b/tests/agent/service/nodesHolePunchMessage.test.ts new file mode 100644 index 0000000000..70615948c3 --- /dev/null +++ b/tests/agent/service/nodesHolePunchMessage.test.ts @@ -0,0 +1,105 @@ +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesHolePunchMessageSend from '@/agent/service/nodesHolePunchMessageSend'; +import * as networkUtils from '@/network/utils'; +import * as testUtils from '../../utils'; + +describe('nodesHolePunchMessage', () => { + const logger = new Logger('nodesHolePunchMessage test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesHolePunchMessageSend: nodesHolePunchMessageSend({ + keyManager: pkAgent.keyManager, + nodeConnectionManager: pkAgent.nodeConnectionManager, + nodeManager: pkAgent.nodeManager, + db: pkAgent.db, + logger, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get the chain data', async () => { + const nodeId = nodesUtils.encodeNodeId(pkAgent.keyManager.getNodeId()); + const proxyAddress = networkUtils.buildAddress( + pkAgent.proxy.getProxyHost(), + pkAgent.proxy.getProxyPort(), + ); + const signature = await pkAgent.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const relayMessage = new nodesPB.Relay(); + relayMessage + .setTargetId(nodeId) + .setSrcId(nodeId) + .setSignature(signature.toString()) + .setProxyAddress(proxyAddress); + await grpcClient.nodesHolePunchMessageSend(relayMessage); + // TODO: check if the ping was sent + }); +}); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index c0b79e91cd..6d08b842ab 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -8,6 +8,7 @@ import { createPrivateKey, createPublicKey } from 'crypto'; import { exportJWK, SignJWT } from 'jose'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -40,6 +41,7 @@ describe('notificationsSend', () => { let senderKeyManager: KeyManager; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -109,23 +111,30 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeGraph, nodeConnectionManager, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -160,6 +169,8 @@ describe('notificationsSend', () => { await grpcServer.stop(); await notificationsManager.stop(); await nodeConnectionManager.stop(); + await queue.stop(); + await nodeManager.stop(); await sigchain.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index f2b896024f..afa61c0c08 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -1,5 +1,4 @@ import type { Host, Port, ProxyConfig } from '@/network/types'; - import type { IAgentServiceServer } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; import type { KeyManager } from '@/keys'; import type { VaultManager } from '@/vaults'; @@ -20,7 +19,8 @@ import { createAgentService, GRPCClientAgent, } from '@/agent'; -import * as testUtils from '../utils'; +import { timerStart } from '@/utils'; +import * as testNodesUtils from '../nodes/utils'; async function openTestAgentServer({ keyManager, @@ -89,13 +89,13 @@ async function openTestAgentClient( new StreamHandler(), ]); return await GRPCClientAgent.createGRPCClientAgent({ - nodeId: nodeId ?? testUtils.generateRandomNodeId(), + nodeId: nodeId ?? testNodesUtils.generateRandomNodeId(), host: '127.0.0.1' as Host, port: port as Port, logger: logger, destroyCallback: async () => {}, proxyConfig, - timeout: 30000, + timer: timerStart(30000), }); } diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index 062cf6cdf7..b3bd7cc677 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -9,22 +9,25 @@ import { sysexits } from '@/utils'; import PolykeyAgent from '@/PolykeyAgent'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; +import NodeManager from '@/nodes/NodeManager'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('add', () => { const logger = new Logger('add test', LogLevel.WARN, [new StreamHandler()]); const password = 'helloworld'; - const validNodeId = testUtils.generateRandomNodeId(); + const validNodeId = testNodesUtils.generateRandomNodeId(); const invalidNodeId = IdInternal.fromString('INVALIDID'); const validHost = '0.0.0.0'; const invalidHost = 'INVALIDHOST'; - const port = '55555'; + const port = 55555; let dataDir: string; let nodePath: string; let pkAgent: PolykeyAgent; let mockedGenerateKeyPair: jest.SpyInstance; let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + let mockedPingNode: jest.SpyInstance; beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); mockedGenerateKeyPair = jest @@ -37,6 +40,7 @@ describe('add', () => { path.join(os.tmpdir(), 'polykey-test-'), ); nodePath = path.join(dataDir, 'polykey'); + mockedPingNode = jest.spyOn(NodeManager.prototype, 'pingNode'); // Cannot use the shared global agent since we can't 'un-add' a node pkAgent = await PolykeyAgent.createPolykeyAgent({ password, @@ -59,10 +63,22 @@ describe('add', () => { }); mockedGenerateKeyPair.mockRestore(); mockedGenerateDeterministicKeyPair.mockRestore(); + mockedPingNode.mockRestore(); + }); + beforeEach(async () => { + await pkAgent.nodeGraph.stop(); + await pkAgent.nodeGraph.start({ fresh: true }); + mockedPingNode.mockImplementation(() => true); }); test('adds a node', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(validNodeId), validHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -80,11 +96,17 @@ describe('add', () => { dataDir, ); expect(stdout).toContain(validHost); - expect(stdout).toContain(port); + expect(stdout).toContain(`${port}`); }); test('fails to add a node (invalid node ID)', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(invalidNodeId), validHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(invalidNodeId), + validHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -95,7 +117,13 @@ describe('add', () => { }); test('fails to add a node (invalid IP address)', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(validNodeId), invalidHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + invalidHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -104,4 +132,65 @@ describe('add', () => { ); expect(exitCode).toBe(sysexits.USAGE); }); + test('adds a node with --force flag', async () => { + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + '--force', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(0); + // Checking if node was added. + const node = await pkAgent.nodeGraph.getNode(validNodeId); + expect(node?.address).toEqual({ host: validHost, port: port }); + }); + test('fails to add node when ping fails', async () => { + mockedPingNode.mockImplementation(() => false); + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(sysexits.NOHOST); + }); + test('adds a node with --no-ping flag', async () => { + mockedPingNode.mockImplementation(() => false); + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + '--no-ping', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(0); + // Checking if node was added. + const node = await pkAgent.nodeGraph.getNode(validNodeId); + expect(node?.address).toEqual({ host: validHost, port: port }); + }); }); diff --git a/tests/bin/nodes/find.test.ts b/tests/bin/nodes/find.test.ts index 56bffd2638..b60804c641 100644 --- a/tests/bin/nodes/find.test.ts +++ b/tests/bin/nodes/find.test.ts @@ -158,31 +158,37 @@ describe('find', () => { port: remoteOfflinePort, }); }); - test('fails to find an unknown node', async () => { - const unknownNodeId = nodesUtils.decodeNodeId( - 'vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg', - ); - const { exitCode, stdout } = await testBinUtils.pkStdio( - [ - 'nodes', - 'find', - nodesUtils.encodeNodeId(unknownNodeId!), - '--format', - 'json', - ], - { - PK_NODE_PATH: nodePath, - PK_PASSWORD: password, - }, - dataDir, - ); - expect(exitCode).toBe(sysexits.GENERAL); - expect(JSON.parse(stdout)).toEqual({ - success: false, - message: `Failed to find node ${nodesUtils.encodeNodeId(unknownNodeId!)}`, - id: nodesUtils.encodeNodeId(unknownNodeId!), - host: '', - port: 0, - }); - }); + test( + 'fails to find an unknown node', + async () => { + const unknownNodeId = nodesUtils.decodeNodeId( + 'vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg', + ); + const { exitCode, stdout } = await testBinUtils.pkStdio( + [ + 'nodes', + 'find', + nodesUtils.encodeNodeId(unknownNodeId!), + '--format', + 'json', + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(sysexits.GENERAL); + expect(JSON.parse(stdout)).toEqual({ + success: false, + message: `Failed to find node ${nodesUtils.encodeNodeId( + unknownNodeId!, + )}`, + id: nodesUtils.encodeNodeId(unknownNodeId!), + host: '', + port: 0, + }); + }, + global.failedConnectionTimeout, + ); }); diff --git a/tests/bin/vaults/vaults.test.ts b/tests/bin/vaults/vaults.test.ts index 52b5f4e4c2..949f208eeb 100644 --- a/tests/bin/vaults/vaults.test.ts +++ b/tests/bin/vaults/vaults.test.ts @@ -11,7 +11,7 @@ import * as vaultsUtils from '@/vaults/utils'; import sysexits from '@/utils/sysexits'; import NotificationsManager from '@/notifications/NotificationsManager'; import * as testBinUtils from '../utils'; -import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -378,7 +378,7 @@ describe('CLI vaults', () => { mockedSendNotification.mockImplementation(async (_) => {}); const vaultId = await polykeyAgent.vaultManager.createVault(vaultName); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -418,7 +418,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -489,7 +489,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), diff --git a/tests/claims/utils.test.ts b/tests/claims/utils.test.ts index f7c6e6410c..e574036830 100644 --- a/tests/claims/utils.test.ts +++ b/tests/claims/utils.test.ts @@ -11,12 +11,13 @@ import * as claimsErrors from '@/claims/errors'; import { utils as keysUtils } from '@/keys'; import { utils as nodesUtils } from '@/nodes'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('claims/utils', () => { // Node Ids - const nodeId1 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); const nodeId1Encoded = nodesUtils.encodeNodeId(nodeId1); - const nodeId2 = testUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); const nodeId2Encoded = nodesUtils.encodeNodeId(nodeId2); let publicKey: PublicKeyPem; @@ -327,9 +328,7 @@ describe('claims/utils', () => { // Create some dummy public key, and check that this does not verify const dummyKeyPair = await keysUtils.generateKeyPair(2048); - const dummyPublicKey = await keysUtils.publicKeyToPem( - dummyKeyPair.publicKey, - ); + const dummyPublicKey = keysUtils.publicKeyToPem(dummyKeyPair.publicKey); expect(await claimsUtils.verifyClaimSignature(claim, dummyPublicKey)).toBe( false, ); diff --git a/tests/client/GRPCClientClient.test.ts b/tests/client/GRPCClientClient.test.ts index bb083f8167..b90406a803 100644 --- a/tests/client/GRPCClientClient.test.ts +++ b/tests/client/GRPCClientClient.test.ts @@ -11,6 +11,7 @@ import Session from '@/sessions/Session'; import * as keysUtils from '@/keys/utils'; import * as clientErrors from '@/client/errors'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as testClientUtils from './utils'; import * as testUtils from '../utils'; @@ -76,7 +77,7 @@ describe(GRPCClientClient.name, () => { port: port as Port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, logger: logger, - timeout: 10000, + timer: timerStart(10000), session: session, }); await client.destroy(); diff --git a/tests/client/service/agentLockAll.test.ts b/tests/client/service/agentLockAll.test.ts index a024cc05cc..fe56a0d7d6 100644 --- a/tests/client/service/agentLockAll.test.ts +++ b/tests/client/service/agentLockAll.test.ts @@ -14,6 +14,7 @@ import { ClientServiceService } from '@/proto/js/polykey/v1/client_service_grpc_ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; +import { timerStart } from '@/utils/index'; import * as testUtils from '../../utils'; describe('agentLockall', () => { @@ -89,7 +90,7 @@ describe('agentLockall', () => { nodeId: keyManager.getNodeId(), host: '127.0.0.1' as Host, port: grpcServer.getPort(), - timeout: 5000, + timer: timerStart(5000), logger, }); }); diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index 2c314711b0..f9789cb608 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -59,6 +60,7 @@ describe('gestaltsDiscoveryByIdentity', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -125,23 +127,30 @@ describe('gestaltsDiscoveryByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -177,6 +186,8 @@ describe('gestaltsDiscoveryByIdentity', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index 7071428e61..3c0f00b10a 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -26,6 +27,7 @@ import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('gestaltsDiscoveryByNode', () => { const logger = new Logger('gestaltsDiscoveryByNode test', LogLevel.WARN, [ @@ -35,7 +37,7 @@ describe('gestaltsDiscoveryByNode', () => { const authenticate = async (metaClient, metaServer = new Metadata()) => metaServer; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; let mockedGenerateKeyPair: jest.SpyInstance; @@ -59,6 +61,7 @@ describe('gestaltsDiscoveryByNode', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -125,23 +128,30 @@ describe('gestaltsDiscoveryByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -177,6 +187,8 @@ describe('gestaltsDiscoveryByNode', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index ec38cc41de..01a162e316 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -9,6 +9,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -115,6 +116,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -191,23 +193,30 @@ describe('gestaltsGestaltTrustByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -248,6 +257,8 @@ describe('gestaltsGestaltTrustByIdentity', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index 1c1ad87b01..df84503a73 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -10,6 +10,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -114,6 +115,7 @@ describe('gestaltsGestaltTrustByNode', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -190,23 +192,30 @@ describe('gestaltsGestaltTrustByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -247,6 +256,8 @@ describe('gestaltsGestaltTrustByNode', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index 39535394ab..3a17b79a8b 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -2,12 +2,14 @@ import type { ClaimLinkIdentity } from '@/claims/types'; import type { NodeIdEncoded } from '@/nodes/types'; import type { IdentityId, ProviderId } from '@/identities/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import IdentitiesManager from '@/identities/IdentitiesManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -55,6 +57,7 @@ describe('identitiesClaim', () => { let mockedGenerateKeyPair: jest.SpyInstance; let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let mockedAddClaim: jest.SpyInstance; + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const claim = await claimsUtils.createClaim({ @@ -84,6 +87,7 @@ describe('identitiesClaim', () => { let testProvider: TestProvider; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -135,14 +139,19 @@ describe('identitiesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ connConnectTime: 2000, proxy, keyManager, nodeGraph, - logger: logger.getChild('nodeConnectionManager'), + queue, + logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await queue.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const clientService = { identitiesClaim: identitiesClaim({ authenticate, @@ -170,6 +179,7 @@ describe('identitiesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await nodeGraph.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/keysKeyPairRenew.test.ts b/tests/client/service/keysKeyPairRenew.test.ts index 8a792254bd..47445ead05 100644 --- a/tests/client/service/keysKeyPairRenew.test.ts +++ b/tests/client/service/keysKeyPairRenew.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairRenew', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairRenew', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/keysKeyPairReset.test.ts b/tests/client/service/keysKeyPairReset.test.ts index 8c41064b16..55af8f35c7 100644 --- a/tests/client/service/keysKeyPairReset.test.ts +++ b/tests/client/service/keysKeyPairReset.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairReset', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairReset', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index 86923b961f..f00e62566f 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -50,6 +51,7 @@ describe('nodesAdd', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -96,23 +98,30 @@ describe('nodesAdd', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesAdd: nodesAdd({ authenticate, @@ -139,6 +148,8 @@ describe('nodesAdd', () => { await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await db.stop(); @@ -152,13 +163,15 @@ describe('nodesAdd', () => { const addressMessage = new nodesPB.Address(); addressMessage.setHost('127.0.0.1'); addressMessage.setPort(11111); - const request = new nodesPB.NodeAddress(); + const request = new nodesPB.NodeAdd(); request.setNodeId('vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'); request.setAddress(addressMessage); const response = await grpcClient.nodesAdd( request, clientUtils.encodeAuthFromPassword(password), ); + request.setPing(false); + request.setForce(false); expect(response).toBeInstanceOf(utilsPB.EmptyMessage); const result = await nodeGraph.getNode( nodesUtils.decodeNodeId( @@ -166,15 +179,16 @@ describe('nodesAdd', () => { )!, ); expect(result).toBeDefined(); - expect(result!.host).toBe('127.0.0.1'); - expect(result!.port).toBe(11111); + expect(result!.address).toEqual({ host: '127.0.0.1', port: 11111 }); }); test('cannot add invalid node', async () => { // Invalid host const addressMessage = new nodesPB.Address(); addressMessage.setHost(''); addressMessage.setPort(11111); - const request = new nodesPB.NodeAddress(); + const request = new nodesPB.NodeAdd(); + request.setPing(false); + request.setForce(false); request.setNodeId('vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'); request.setAddress(addressMessage); await expectRemoteError( diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index bc04e2ae6e..95eaf8b6e4 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -7,6 +7,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NotificationsManager from '@/notifications/NotificationsManager'; import ACL from '@/acl/ACL'; @@ -75,6 +76,7 @@ describe('nodesClaim', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -126,23 +128,30 @@ describe('nodesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, + nodeGraph, + sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -178,6 +187,8 @@ describe('nodesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); await sigchain.stop(); diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index b01e157a13..4ff59d9f1e 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -1,10 +1,12 @@ import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -55,6 +57,7 @@ describe('nodesFind', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -100,15 +103,20 @@ describe('nodesFind', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await queue.start(); + await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); const clientService = { nodesFind: nodesFind({ authenticate, @@ -135,6 +143,7 @@ describe('nodesFind', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index d4954bb4ae..14f9cbceee 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -55,6 +56,7 @@ describe('nodesPing', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -101,23 +103,29 @@ describe('nodesPing', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesPing: nodesPing({ authenticate, @@ -144,6 +152,7 @@ describe('nodesPing', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index d8572c5844..4a9002f21d 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -53,6 +54,7 @@ describe('notificationsClear', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -105,23 +107,30 @@ describe('notificationsClear', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -158,6 +167,8 @@ describe('notificationsClear', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 73690a54d0..b5a3de17af 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -24,12 +25,13 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('notificationsRead', () => { const logger = new Logger('notificationsRead test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdSender = testUtils.generateRandomNodeId(); + const nodeIdSender = testNodesUtils.generateRandomNodeId(); const nodeIdSenderEncoded = nodesUtils.encodeNodeId(nodeIdSender); const password = 'helloworld'; const authenticate = async (metaClient, metaServer = new Metadata()) => @@ -127,6 +129,7 @@ describe('notificationsRead', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -179,23 +182,30 @@ describe('notificationsRead', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -233,6 +243,8 @@ describe('notificationsRead', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await proxy.stop(); await acl.stop(); await db.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 7709f7b47f..35a6a15bbe 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -63,6 +64,7 @@ describe('notificationsSend', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -114,23 +116,30 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -166,6 +175,8 @@ describe('notificationsSend', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/utils.ts b/tests/client/utils.ts index 0362573280..9af325dba4 100644 --- a/tests/client/utils.ts +++ b/tests/client/utils.ts @@ -11,7 +11,7 @@ import { } from '@/proto/js/polykey/v1/client_service_grpc_pb'; import createClientService from '@/client/service'; import PolykeyClient from '@/PolykeyClient'; -import { promisify } from '@/utils'; +import { promisify, timerStart } from '@/utils'; import * as grpcUtils from '@/grpc/utils'; async function openTestClientServer({ @@ -82,7 +82,7 @@ async function openTestClientClient( port: port, fs, logger, - timeout: 30000, + timer: timerStart(30000), }); return pkc; diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index c11c8d0003..a267cc7d8c 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -6,6 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import Discovery from '@/discovery/Discovery'; import GestaltGraph from '@/gestalts/GestaltGraph'; @@ -47,6 +48,7 @@ describe('Discovery', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; @@ -130,23 +132,30 @@ describe('Discovery', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up other gestalt nodeA = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -202,6 +211,8 @@ describe('Discovery', () => { await nodeA.stop(); await nodeB.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); @@ -237,7 +248,7 @@ describe('Discovery', () => { discovery.queueDiscoveryByIdentity('' as ProviderId, '' as IdentityId), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); await expect( - discovery.queueDiscoveryByNode(testUtils.generateRandomNodeId()), + discovery.queueDiscoveryByNode(testNodesUtils.generateRandomNodeId()), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); }); test('discovery by node', async () => { diff --git a/tests/gestalts/GestaltGraph.test.ts b/tests/gestalts/GestaltGraph.test.ts index 4b69761ce9..e24a08e00a 100644 --- a/tests/gestalts/GestaltGraph.test.ts +++ b/tests/gestalts/GestaltGraph.test.ts @@ -20,19 +20,19 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as gestaltsUtils from '@/gestalts/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GestaltGraph', () => { const logger = new Logger('GestaltGraph Test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdDEE = testUtils.generateRandomNodeId(); + const nodeIdDEE = testNodesUtils.generateRandomNodeId(); const nodeIdDEEEncoded = nodesUtils.encodeNodeId(nodeIdDEE); - const nodeIdDEF = testUtils.generateRandomNodeId(); + const nodeIdDEF = testNodesUtils.generateRandomNodeId(); const nodeIdDEFEncoded = nodesUtils.encodeNodeId(nodeIdDEF); - const nodeIdZZZ = testUtils.generateRandomNodeId(); + const nodeIdZZZ = testNodesUtils.generateRandomNodeId(); const nodeIdZZZEncoded = nodesUtils.encodeNodeId(nodeIdZZZ); let dataDir: string; @@ -1248,8 +1248,8 @@ describe('GestaltGraph', () => { // its just that node 1 is eliminated nodePerms = await acl.getNodePerms(); expect(Object.keys(nodePerms)).toHaveLength(1); - expect(nodePerms[0]).not.toHaveProperty(nodeIdABC.toString()); - expect(nodePerms[0]).toHaveProperty(nodeIdDEE.toString()); + expect(nodePerms[0][nodeIdABC.toString()]).toBeUndefined(); + expect(nodePerms[0][nodeIdDEE.toString()]).toBeDefined(); await gestaltGraph.unsetNode(nodeIdDEE); nodePerms = await acl.getNodePerms(); expect(Object.keys(nodePerms)).toHaveLength(0); diff --git a/tests/grpc/GRPCClient.test.ts b/tests/grpc/GRPCClient.test.ts index 31028187e8..bf252bc6da 100644 --- a/tests/grpc/GRPCClient.test.ts +++ b/tests/grpc/GRPCClient.test.ts @@ -16,8 +16,9 @@ import * as keysUtils from '@/keys/utils'; import * as grpcErrors from '@/grpc/errors'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as utils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; import { expectRemoteError } from '../utils'; describe('GRPCClient', () => { @@ -62,7 +63,7 @@ describe('GRPCClient', () => { }, }); const keyManager = { - getNodeId: () => testUtils.generateRandomNodeId(), + getNodeId: () => testNodesUtils.generateRandomNodeId(), } as KeyManager; // Cheeky mocking. sessionManager = await SessionManager.createSessionManager({ db, @@ -110,7 +111,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); await client.destroy(); @@ -124,7 +125,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const m = new utilsPB.EchoMessage(); @@ -157,7 +158,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); let pCall: PromiseUnaryCall; @@ -193,7 +194,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -236,7 +237,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -261,7 +262,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream, response] = client.clientStream(); @@ -299,7 +300,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream] = client.clientStream(); @@ -322,7 +323,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); @@ -357,7 +358,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); diff --git a/tests/grpc/utils/GRPCClientTest.ts b/tests/grpc/utils/GRPCClientTest.ts index e3c5f9489b..3b2af291df 100644 --- a/tests/grpc/utils/GRPCClientTest.ts +++ b/tests/grpc/utils/GRPCClientTest.ts @@ -5,6 +5,7 @@ import type { Host, Port, TLSConfig, ProxyConfig } from '@/network/types'; import type * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; import type { AsyncGeneratorReadableStreamClient } from '@/grpc/types'; +import type { Timer } from '@/types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import GRPCClient from '@/grpc/GRPCClient'; @@ -22,7 +23,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback, logger = new Logger(this.name), }: { @@ -32,7 +33,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig?: TLSConfig; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -48,7 +49,7 @@ class GRPCClientTest extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); diff --git a/tests/identities/IdentitiesManager.test.ts b/tests/identities/IdentitiesManager.test.ts index b7ca969b0c..23000440b9 100644 --- a/tests/identities/IdentitiesManager.test.ts +++ b/tests/identities/IdentitiesManager.test.ts @@ -17,7 +17,7 @@ import * as identitiesErrors from '@/identities/errors'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import TestProvider from './TestProvider'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('IdentitiesManager', () => { const logger = new Logger('IdentitiesManager Test', LogLevel.WARN, [ @@ -219,7 +219,7 @@ describe('IdentitiesManager', () => { expect(identityDatas).toHaveLength(1); expect(identityDatas).not.toContainEqual(identityData); // Now publish a claim - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); const signatures: Record = {}; signatures[nodeIdSome] = { diff --git a/tests/keys/KeyManager.test.ts b/tests/keys/KeyManager.test.ts index 260346bc6f..c1aaa345e4 100644 --- a/tests/keys/KeyManager.test.ts +++ b/tests/keys/KeyManager.test.ts @@ -88,9 +88,9 @@ describe('KeyManager', () => { expect(keysPathContents).toContain('root_certs'); expect(keysPathContents).toContain('db.key'); expect(keyManager.dbKey.toString()).toBeTruthy(); - const rootKeyPairPem = await keyManager.getRootKeyPairPem(); + const rootKeyPairPem = keyManager.getRootKeyPairPem(); expect(rootKeyPairPem).not.toBeUndefined(); - const rootCertPem = await keyManager.getRootCertPem(); + const rootCertPem = keyManager.getRootCertPem(); expect(rootCertPem).not.toBeUndefined(); const rootCertPems = await keyManager.getRootCertChainPems(); expect(rootCertPems.length).toBe(1); diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index f199f7a0b1..5bab753c4a 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -1,6 +1,6 @@ import type { AddressInfo, Socket } from 'net'; import type { KeyPairPem } from '@/keys/types'; -import type { Host, Port } from '@/network/types'; +import type { ConnectionData, Host, Port } from '@/network/types'; import net from 'net'; import http from 'http'; import tls from 'tls'; @@ -13,6 +13,7 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import { poll, promise, promisify, timerStart, timerStop } from '@/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; /** * Mock HTTP Connect Request @@ -110,11 +111,11 @@ describe(Proxy.name, () => { const logger = new Logger(`${Proxy.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); - const nodeIdRandom = testUtils.generateRandomNodeId(); + const nodeIdRandom = testNodesUtils.generateRandomNodeId(); const authToken = 'abc123'; let keyPairPem: KeyPairPem; let certPem: string; @@ -2972,4 +2973,114 @@ describe(Proxy.name, () => { utpSocket.unref(); await serverClose(); }); + test('connectionEstablishedCallback is called when a ReverseConnection is established', async () => { + const clientKeyPair = await keysUtils.generateKeyPair(1024); + const clientKeyPairPem = keysUtils.keyPairToPem(clientKeyPair); + const clientCert = keysUtils.generateCertificate( + clientKeyPair.publicKey, + clientKeyPair.privateKey, + clientKeyPair.privateKey, + 86400, + ); + const clientCertPem = keysUtils.certToPem(clientCert); + const { + serverListen, + serverClose, + serverConnP, + serverConnEndP, + serverConnClosedP, + serverHost, + serverPort, + } = tcpServer(); + await serverListen(0, localHost); + const clientNodeId = keysUtils.certNodeId(clientCert)!; + let callbackData: ConnectionData | undefined; + const proxy = new Proxy({ + logger: logger, + authToken: '', + connectionEstablishedCallback: (data) => { + callbackData = data; + }, + }); + await proxy.start({ + serverHost: serverHost(), + serverPort: serverPort(), + proxyHost: localHost, + tlsConfig: { + keyPrivatePem: keyPairPem.privateKey, + certChainPem: certPem, + }, + }); + + const proxyHost = proxy.getProxyHost(); + const proxyPort = proxy.getProxyPort(); + const { p: clientReadyP, resolveP: resolveClientReadyP } = promise(); + const { p: clientSecureConnectP, resolveP: resolveClientSecureConnectP } = + promise(); + const { p: clientCloseP, resolveP: resolveClientCloseP } = promise(); + const utpSocket = UTP({ allowHalfOpen: true }); + const utpSocketBind = promisify(utpSocket.bind).bind(utpSocket); + const handleMessage = async (data: Buffer) => { + const msg = networkUtils.unserializeNetworkMessage(data); + if (msg.type === 'ping') { + resolveClientReadyP(); + await send(networkUtils.pongBuffer); + } + }; + utpSocket.on('message', handleMessage); + const send = async (data: Buffer) => { + const utpSocketSend = promisify(utpSocket.send).bind(utpSocket); + await utpSocketSend(data, 0, data.byteLength, proxyPort, proxyHost); + }; + await utpSocketBind(0, localHost); + const utpSocketPort = utpSocket.address().port; + await proxy.openConnectionReverse(localHost, utpSocketPort as Port); + const utpConn = utpSocket.connect(proxyPort, proxyHost); + const tlsSocket = tls.connect( + { + key: Buffer.from(clientKeyPairPem.privateKey, 'ascii'), + cert: Buffer.from(clientCertPem, 'ascii'), + socket: utpConn, + rejectUnauthorized: false, + }, + () => { + resolveClientSecureConnectP(); + }, + ); + let tlsSocketEnded = false; + tlsSocket.on('end', () => { + tlsSocketEnded = true; + if (utpConn.destroyed) { + tlsSocket.destroy(); + } else { + tlsSocket.end(); + tlsSocket.destroy(); + } + }); + tlsSocket.on('close', () => { + resolveClientCloseP(); + }); + await send(networkUtils.pingBuffer); + expect(proxy.getConnectionReverseCount()).toBe(1); + await clientReadyP; + await clientSecureConnectP; + await serverConnP; + await proxy.closeConnectionReverse(localHost, utpSocketPort as Port); + expect(proxy.getConnectionReverseCount()).toBe(0); + await clientCloseP; + await serverConnEndP; + await serverConnClosedP; + expect(tlsSocketEnded).toBe(true); + utpSocket.off('message', handleMessage); + utpSocket.close(); + utpSocket.unref(); + await proxy.stop(); + await serverClose(); + + // Checking callback data + expect(callbackData?.remoteNodeId.equals(clientNodeId)).toBe(true); + expect(callbackData?.remoteHost).toEqual(localHost); + expect(callbackData?.remotePort).toEqual(utpSocketPort); + expect(callbackData?.type).toEqual('reverse'); + }); }); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index ed80ab06a5..beeb841edc 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -33,6 +33,9 @@ import * as GRPCErrors from '@/grpc/errors'; import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; +import { timerStart } from '@/utils'; +import Queue from '@/nodes/Queue'; +import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; import * as grpcTestUtils from '../grpc/utils'; import * as agentTestUtils from '../agent/utils'; @@ -60,7 +63,7 @@ const mockedGenerateDeterministicKeyPair = jest.spyOn( 'generateDeterministicKeyPair', ); -describe('${NodeConnection.name} test', () => { +describe(`${NodeConnection.name} test`, () => { const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); @@ -72,7 +75,7 @@ describe('${NodeConnection.name} test', () => { const password = 'password'; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; @@ -82,6 +85,7 @@ describe('${NodeConnection.name} test', () => { let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; let serverNodeGraph: NodeGraph; + let serverQueue: Queue; let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; @@ -170,6 +174,13 @@ describe('${NodeConnection.name} test', () => { }; } + const newTlsConfig = async (keyManager: KeyManager): Promise => { + return { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: await keyManager.getRootCertChainPem(), + }; + }; + beforeEach(async () => { // Server setup serverDataDir = await fs.promises.mkdtemp( @@ -229,22 +240,26 @@ describe('${NodeConnection.name} test', () => { logger, }); + serverQueue = new Queue({ logger }); serverNodeConnectionManager = new NodeConnectionManager({ keyManager: serverKeyManager, nodeGraph: serverNodeGraph, proxy: serverProxy, + queue: serverQueue, logger, }); - await serverNodeConnectionManager.start(); - serverNodeManager = new NodeManager({ db: serverDb, sigchain: serverSigchain, keyManager: serverKeyManager, nodeGraph: serverNodeGraph, nodeConnectionManager: serverNodeConnectionManager, + queue: serverQueue, logger: logger, }); + await serverQueue.start(); + await serverNodeManager.start(); + await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ keyManager: serverKeyManager, vaultsPath: serverVaultsPath, @@ -353,6 +368,8 @@ describe('${NodeConnection.name} test', () => { await serverNodeGraph.stop(); await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); + await serverNodeManager.stop(); + await serverQueue.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentTestUtils.closeTestAgentServer(agentServer); @@ -482,7 +499,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -517,7 +534,7 @@ describe('${NodeConnection.name} test', () => { targetNodeId: targetNodeId, targetHost: '128.0.0.1' as Host, targetPort: 12345 as Port, - connConnectTime: 300, + timer: timerStart(300), proxy: clientProxy, keyManager: clientKeyManager, nodeConnectionManager: dummyNodeConnectionManager, @@ -592,7 +609,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -635,7 +652,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -673,7 +690,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -735,7 +752,7 @@ describe('${NodeConnection.name} test', () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -805,7 +822,7 @@ describe('${NodeConnection.name} test', () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -842,4 +859,358 @@ describe('${NodeConnection.name} test', () => { }, global.defaultTimeout * 2, ); + + test('existing connection handles a resetRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.resetRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a renewRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.renewRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootCert on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.resetRootCert(); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.resetRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a renewRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.renewRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootCert on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.resetRootCert(); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.resetRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a renewRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.renewRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootCert on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.resetRootCert(); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootKeyPair on receiving side', async () => { + // Simulate key change + await serverKeyManager.resetRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + const connProm = NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + await expect(connProm).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + + // Connect with the new NodeId + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: serverKeyManager.getNodeId(), + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a renewRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await serverKeyManager.renewRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootCert on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await serverKeyManager.resetRootCert(); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); }); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index a6c3638cbc..17035b4dd3 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -1,11 +1,13 @@ -import type { NodeAddress, NodeData, NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeAddress, NodeBucket, NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -14,13 +16,11 @@ import Proxy from '@/network/Proxy'; import GRPCClientAgent from '@/agent/GRPCClientAgent'; import * as nodesUtils from '@/nodes/utils'; -import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import * as nodesTestUtils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { const logger = new Logger( @@ -75,8 +75,8 @@ describe(`${NodeConnectionManager.name} general test`, () => { let keyManager: KeyManager; let db: DB; let proxy: Proxy; - let nodeGraph: NodeGraph; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -126,6 +126,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -140,7 +141,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: logger.getChild('remoteNode1'), }); @@ -149,7 +153,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: logger.getChild('remoteNode2'), }); @@ -191,6 +198,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -216,6 +227,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); afterEach(async () => { + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -232,14 +244,15 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 1: node already exists in the local node graph (no contact required) const nodeId = nodeId1; const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, + host: localHost, port: 11111 as Port, }; await nodeGraph.setNode(nodeId, nodeAddress); @@ -254,26 +267,35 @@ describe(`${NodeConnectionManager.name} general test`, () => { test( 'finds node (contacts remote node)', async () => { + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); // NodeConnectionManager under test const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 2: node can be found on the remote node const nodeId = nodeId1; const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, + host: localHost, port: 11111 as Port, }; const server = await PolykeyAgent.createPolykeyAgent({ nodePath: path.join(dataDir, 'node2'), password, networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: nodeConnectionManagerLogger, }); @@ -288,6 +310,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { await server.stop(); } finally { await nodeConnectionManager.stop(); + mockedPingNode.mockRestore(); } }, global.polykeyStartupTimeout, @@ -300,9 +323,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 3: node exhausts all contacts and cannot find node const nodeId = nodeId1; @@ -310,7 +334,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { nodePath: path.join(dataDir, 'node3'), password, networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: nodeConnectionManagerLogger, }); @@ -326,9 +353,9 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: 22222 as Port, } as NodeAddress); // Un-findable Node cannot be found - await expect(() => - nodeConnectionManager.findNode(nodeId), - ).rejects.toThrowError(nodesErrors.ErrorNodeGraphNodeIdNotFound); + await expect(nodeConnectionManager.findNode(nodeId)).resolves.toEqual( + undefined, + ); await server.stop(); } finally { @@ -337,129 +364,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { }, global.failedConnectionTimeout * 2, ); - test('finds a single closest node', async () => { - // NodeConnectionManager under test - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Find the closest nodes to some node, NODEID3 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest).toContainEqual({ - id: newNode2Id, - distance: 121n, - address: { host: '227.1.1.1', port: 4567 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds 3 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Add 3 nodes - await nodeGraph.setNode(nodeId1, { - host: '2.2.2.2', - port: 2222, - } as NodeAddress); - await nodeGraph.setNode(nodeId2, { - host: '3.3.3.3', - port: 3333, - } as NodeAddress); - await nodeGraph.setNode(nodeId3, { - host: '4.4.4.4', - port: 4444, - } as NodeAddress); - - // Find the closest nodes to some node, NODEID4 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest.length).toBe(5); - expect(closest).toContainEqual({ - id: nodeId3, - distance: 0n, - address: { host: '4.4.4.4', port: 4444 }, - }); - expect(closest).toContainEqual({ - id: nodeId2, - distance: 116n, - address: { host: '3.3.3.3', port: 3333 }, - }); - expect(closest).toContainEqual({ - id: nodeId1, - distance: 121n, - address: { host: '2.2.2.2', port: 2222 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds the 20 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Generate the node ID to find the closest nodes to (in bucket 100) - const nodeId = keyManager.getNodeId(); - const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeIdToFind, - i, - ); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(nodeIdToFind, closeNodeId), - }); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: `${i}.${i}.${i}.${i}` as Host, - port: i as Port, - }; - await nodeGraph.setNode(farNodeId, nodeAddress); - } - - // Find the closest nodes to the original generated node ID - const closest = await nodeConnectionManager.getClosestLocalNodes( - nodeIdToFind, - ); - // We should always only receive k nodes - expect(closest.length).toBe(nodeGraph.maxNodesPerBucket); - // Retrieved closest nodes should be exactly the same as the ones we added - expect(closest).toEqual(addedClosestNodes); - } finally { - await nodeConnectionManager.stop(); - } - }); test('receives 20 closest local nodes from connected target', async () => { let serverPKAgent: PolykeyAgent | undefined; let nodeConnectionManager: NodeConnectionManager | undefined; @@ -469,17 +373,21 @@ describe(`${NodeConnectionManager.name} general test`, () => { logger: logger.getChild('serverPKAgent'), nodePath: path.join(dataDir, 'serverPKAgent'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const targetNodeId = serverPKAgent.keyManager.getNodeId(); await nodeGraph.setNode(targetNodeId, { host: serverPKAgent.proxy.getProxyHost(), @@ -487,9 +395,9 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; + const addedClosestNodes: NodeBucket = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( targetNodeId, i, ); @@ -498,11 +406,13 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: i as Port, }; await serverPKAgent.nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(targetNodeId, closeNodeId), - }); + addedClosestNodes.push([ + closeNodeId, + { + address: nodeAddress, + lastUpdated: 0, + }, + ]); } // Now create and add 10 more nodes that are far away from this node for (let i = 1; i <= 10; i++) { @@ -521,7 +431,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { ); // Sort the received nodes on distance such that we can check its equality // with addedClosestNodes - closest.sort(nodesUtils.sortByDistance); + nodesUtils.bucketSortByDistance(closest, targetNodeId); expect(closest.length).toBe(20); expect(closest).toEqual(addedClosestNodes); } finally { @@ -545,14 +455,15 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); - const targetNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); await nodeConnectionManager.sendHolePunchMessage( remoteNodeId1, sourceNodeId, @@ -582,13 +493,14 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); const relayMessage = new nodesPB.Relay(); relayMessage.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMessage.setTargetId(nodesUtils.encodeNodeId(remoteNodeId1)); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 6117ddc415..a6f9d04e72 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -1,5 +1,6 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -7,6 +8,7 @@ import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { withF } from '@matrixai/resources'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -17,6 +19,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; +import { timerStart } from '@/utils'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { const logger = new Logger( @@ -74,6 +77,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { let proxy: Proxy; let nodeGraph: NodeGraph; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -85,6 +89,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -99,7 +104,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode1'), }); @@ -109,7 +114,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode2'), }); @@ -151,6 +156,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -176,6 +185,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); afterEach(async () => { + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -194,9 +204,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -219,9 +230,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -253,9 +265,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -281,9 +294,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; @@ -299,7 +313,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }; // Creating the generator - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( remoteNodeId1, async function* () { yield* testGenerator(); @@ -333,10 +347,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, connConnectTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Add the dummy node await nodeGraph.setNode(dummyNodeId, { host: '125.0.0.1' as Host, @@ -374,9 +389,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; expect(connections.size).toBe(0); @@ -400,9 +416,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -433,9 +450,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -466,9 +484,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Do testing // set up connections await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -498,4 +517,87 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { await nodeConnectionManager?.stop(); } }); + + // New ping tests + test('should ping node with address', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), + ); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping non existent node', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + + // Pinging node + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + '127.1.2.3' as Host, + 55555 as Port, + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping node if NodeId does not match', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNode2.proxy.getProxyHost(), + remoteNode2.proxy.getProxyPort(), + timerStart(1000), + ), + ).toEqual(false); + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId2, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); }); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index b5ecf3e3c8..63ba90e9d0 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -1,11 +1,13 @@ -import type { NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeId, NodeIdEncoded, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type { Sigchain } from '@/sigchain'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import NodeManager from '@/nodes/NodeManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -15,6 +17,7 @@ import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; +import Queue from '@/nodes/Queue'; describe(`${NodeConnectionManager.name} seed nodes test`, () => { const logger = new Logger( @@ -77,6 +80,10 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { + setNode: jest.fn(), + refreshBucketQueueAdd: jest.fn(), + } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -116,6 +123,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); beforeEach(async () => { + // Clearing nodes from graphs + for await (const [nodeId] of remoteNode1.nodeGraph.getNodes()) { + await remoteNode1.nodeGraph.unsetNode(nodeId); + } + for await (const [nodeId] of remoteNode2.nodeGraph.getNodes()) { + await remoteNode2.nodeGraph.unsetNode(nodeId); + } dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -179,15 +193,29 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { // Seed nodes test('starting should add seed nodes to the node graph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; try { nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue: new Queue({ + logger: logger.getChild('queue'), + }), seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + queue: {} as Queue, + sigchain: {} as Sigchain, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toContainEqual(nodeId1); expect(seedNodes).toContainEqual(nodeId2); @@ -199,6 +227,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { } finally { // Clean up await nodeConnectionManager?.stop(); + await nodeManager?.stop(); } }); test('should get seed nodes', async () => { @@ -207,10 +236,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, + queue: new Queue({ + logger: logger.getChild('queue'), + }), seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toHaveLength(3); @@ -223,6 +255,18 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -233,13 +277,26 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, logger: logger, }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + queue, + sigchain: {} as Sigchain, + }); + await queue.start(); + await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, port: serverPort, @@ -248,17 +305,97 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeConnectionManager.syncNodeGraph(); expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); } finally { + mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); + await nodeManager?.stop(); + await nodeConnectionManager?.stop(); + await queue?.stop(); + } + }); + test('should call refreshBucket when syncing nodeGraph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); + try { + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + queue = new Queue({ logger }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + seedNodes, + logger: logger, + }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + queue, + }); + await queue.start(); + await nodeManager.start(); + await remoteNode1.nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await remoteNode2.nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + await nodeConnectionManager.start({ nodeManager }); + await nodeConnectionManager.syncNodeGraph(); + await nodeManager.refreshBucketQueueDrained(); + expect(mockedRefreshBucket).toHaveBeenCalled(); + } finally { + mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); + await nodeManager?.stop(); await nodeConnectionManager?.stop(); + await queue?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -282,22 +419,129 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, connConnectTime: 500, logger: logger, }); - await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + queue, + }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // This should complete without error await nodeConnectionManager.syncNodeGraph(); // Information on remotes are found expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); } finally { + mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); await nodeConnectionManager?.stop(); + await nodeManager?.stop(); + await queue?.stop(); } }); + test( + 'should expand the network when nodes enter', + async () => { + // Using a single seed node we need to check that each entering node adds itself to the seed node. + // Also need to check that the new nodes can be seen in the network. + let node1: PolykeyAgent | undefined; + let node2: PolykeyAgent | undefined; + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); + try { + logger.setLevel(LogLevel.WARN); + node1 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node1'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + node2 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node2'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + + await node1.queue.drained(); + await node1.nodeManager.refreshBucketQueueDrained(); + await node2.queue.drained(); + await node2.nodeManager.refreshBucketQueueDrained(); + + const getAllNodes = async (node: PolykeyAgent) => { + const nodes: Array = []; + for await (const [nodeId] of node.nodeGraph.getNodes()) { + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + return nodes; + }; + const rNode1Nodes = await getAllNodes(remoteNode1); + const rNode2Nodes = await getAllNodes(remoteNode2); + const node1Nodes = await getAllNodes(node1); + const node2Nodes = await getAllNodes(node2); + + const nodeIdR1 = nodesUtils.encodeNodeId(remoteNodeId1); + const nodeIdR2 = nodesUtils.encodeNodeId(remoteNodeId2); + const nodeId1 = nodesUtils.encodeNodeId(node1.keyManager.getNodeId()); + const nodeId2 = nodesUtils.encodeNodeId(node2.keyManager.getNodeId()); + expect(rNode1Nodes).toContain(nodeId1); + expect(rNode1Nodes).toContain(nodeId2); + expect(rNode2Nodes).toContain(nodeId1); + expect(rNode2Nodes).toContain(nodeId2); + expect(node1Nodes).toContain(nodeIdR1); + expect(node1Nodes).toContain(nodeIdR2); + expect(node1Nodes).toContain(nodeId2); + expect(node2Nodes).toContain(nodeIdR1); + expect(node2Nodes).toContain(nodeIdR2); + expect(node2Nodes).toContain(nodeId1); + } finally { + mockedPingNode.mockRestore(); + logger.setLevel(LogLevel.WARN); + await node1?.stop(); + await node1?.destroy(); + await node2?.stop(); + await node2?.destroy(); + } + }, + global.defaultTimeout * 2, + ); }); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 3fa14f66c6..86598e78c8 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -1,6 +1,8 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -85,6 +87,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeEach(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -244,10 +247,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection await expect( @@ -284,10 +288,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const resultP = nodeConnectionManager.withConnF(dummyNodeId, async () => { @@ -327,10 +332,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const connectionAttemptP = nodeConnectionManager.withConnF( @@ -370,10 +376,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -427,10 +434,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -506,10 +514,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -578,10 +587,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -599,7 +609,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { const firstConnection = firstConnAndLock?.connection; // Resolves if the shutdownCallback was called - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( agentNodeId, async function* (): AsyncGenerator { // Throw an error here @@ -655,10 +665,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -732,10 +743,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index 7b93b596da..3f73a1a395 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,5 +1,7 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -78,6 +80,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -186,10 +189,11 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections @@ -223,10 +227,11 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections @@ -276,9 +281,10 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 6b9eec7008..66b958716e 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,59 +1,46 @@ -import type { Host, Port } from '@/network/types'; -import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import type { + NodeId, + NodeData, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '@/nodes/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; -import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; -import Sigchain from '@/sigchain/Sigchain'; -import * as nodesTestUtils from './utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; +import * as testUtils from '../utils'; describe(`${NodeGraph.name} test`, () => { - const localHost = '127.0.0.1' as Host; - const port = 0 as Port; const password = 'password'; - let nodeGraph: NodeGraph; - let nodeId: NodeId; - - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; - - const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - let proxy: Proxy; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let dataDir: string; let keyManager: KeyManager; + let dbKey: Buffer; + let dbPath: string; let db: DB; - let nodeConnectionManager: NodeConnectionManager; - let sigchain: Sigchain; - - const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; - - const mockedGenerateDeterministicKeyPair = jest.spyOn( - keysUtils, - 'generateDeterministicKeyPair', - ); - - beforeEach(async () => { - mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { - return keysUtils.generateKeyPair(bits); - }); - + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValue(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValue(globalKeyPair); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -63,559 +50,1002 @@ describe(`${NodeGraph.name} test`, () => { keysPath, logger, }); - proxy = new Proxy({ - authToken: 'auth', - logger: logger, - }); - await proxy.start({ - serverHost: localHost, - serverPort: port, - tlsConfig: { - keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, - certChainPem: await keyManager.getRootCertChainPem(), - }, + dbKey = await keysUtils.generateKey(); + dbPath = `${dataDir}/db`; + }); + afterAll(async () => { + await keyManager.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, }); - const dbPath = `${dataDir}/db`; + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + beforeEach(async () => { db = await DB.createDB({ dbPath, logger, crypto: { - key: keyManager.dbKey, + key: dbKey, ops: { encrypt: keysUtils.encryptWithKey, decrypt: keysUtils.decryptWithKey, }, }, }); - sigchain = await Sigchain.createSigchain({ - keyManager: keyManager, - db: db, - logger: logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ + }); + afterEach(async () => { + await db.stop(); + await db.destroy(); + }); + test('get, set and unset node IDs', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - nodeConnectionManager = new NodeConnectionManager({ - keyManager: keyManager, - nodeGraph: nodeGraph, - proxy: proxy, - logger: logger, - }); - await nodeConnectionManager.start(); - // Retrieve the NodeGraph reference from NodeManager - nodeId = keyManager.getNodeId(); - }); + let nodeId1: NodeId; + do { + nodeId1 = testNodesUtils.generateRandomNodeId(); + } while (nodeId1.equals(keyManager.getNodeId())); + let nodeId2: NodeId; + do { + nodeId2 = testNodesUtils.generateRandomNodeId(); + } while (nodeId2.equals(keyManager.getNodeId())); - afterEach(async () => { - await db.stop(); - await sigchain.stop(); - await nodeConnectionManager.stop(); - await nodeGraph.stop(); - await keyManager.stop(); - await proxy.stop(); - await fs.promises.rm(dataDir, { - force: true, - recursive: true, + await nodeGraph.setNode(nodeId1, { + host: '10.0.0.1', + port: 1234, + } as NodeAddress); + const nodeData1 = await nodeGraph.getNode(nodeId1); + expect(nodeData1).toStrictEqual({ + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), }); + await utils.sleep(1000); + await nodeGraph.setNode(nodeId2, { + host: 'abc.com', + port: 8978, + } as NodeAddress); + const nodeData2 = await nodeGraph.getNode(nodeId2); + expect(nodeData2).toStrictEqual({ + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }); + expect(nodeData2!.lastUpdated > nodeData1!.lastUpdated).toBe(true); + const nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(2); + expect(nodes).toContainEqual([ + nodeId1, + { + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(nodes).toContainEqual([ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ]); + await nodeGraph.unsetNode(nodeId1); + expect(await nodeGraph.getNode(nodeId1)).toBeUndefined(); + expect(await utils.asyncIterableArray(nodeGraph.getNodes())).toStrictEqual([ + [ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ], + ]); + await nodeGraph.unsetNode(nodeId2); + await nodeGraph.stop(); }); - - test('NodeGraph readiness', async () => { - const nodeGraph2 = await NodeGraph.createNodeGraph({ + test('get all nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - // @ts-ignore - await expect(nodeGraph2.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeGraphRunning, - ); - // Should be a noop - await nodeGraph2.start(); - await nodeGraph2.stop(); - await nodeGraph2.destroy(); - await expect(async () => { - await nodeGraph2.start(); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - }); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeGraph.setNode(nodeId1, nodeAddress1); - expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); - - // Unknown node - expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); - }); - test('finds correct node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address - const foundAddress = await nodeGraph.getNode(newNode2Id); - expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); - }); - test('unable to find node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address (of non-existent node) - const foundAddress = await nodeGraph.getNode(dummyNode); - expect(foundAddress).toBeUndefined(); - }); - test('adds a single node into a bucket', async () => { - // New node added - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Check new node is in retrieved bucket from database - // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 - const bucket = await nodeGraph.getBucket(1); - expect(bucket).toBeDefined(); - expect(bucket![newNode2Id]).toEqual({ - address: { host: '227.1.1.1', port: 4567 }, - lastUpdated: expect.any(Date), + let nodeIds = Array.from({ length: 25 }, () => { + return testNodesUtils.generateRandomNodeId(); }); - }); - test('adds multiple nodes into the same bucket', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + let bucketIndexes: Array; + let nodes: NodeBucket; + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(0); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + } + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes ascending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + // Sorted by bucket indexes ascending explicitly + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('asc')); + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), + ); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('desc')); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes descending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(4); - expect(bucket).toBeDefined(); - if (!bucket) fail('bucket should be defined, letting TS know'); - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] >= bucketIndex; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('setting same node ID throws error', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), + await expect( + nodeGraph.setNode(keyManager.getNodeId(), { + host: '127.0.0.1', + port: 55555, + } as NodeAddress), + ).rejects.toThrow(nodesErrors.ErrorNodeGraphSameNodeId); + await nodeGraph.stop(); + }); + test('get bucket with 1 node', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), + let nodeId: NodeId; + do { + nodeId = testNodesUtils.generateRandomNodeId(); + } while (nodeId.equals(keyManager.getNodeId())); + // Set one node + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + const bucketIndex = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId); + const bucket = await nodeGraph.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + expect(bucket[0]).toStrictEqual([ + nodeId, + { + address: { + host: '127.0.0.1', + port: 55555, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(await nodeGraph.getBucketMeta(bucketIndex)).toStrictEqual({ + count: 1, }); - }); - test('adds a single node into different buckets', async () => { - // New node for bucket 3 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); - const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - // New node for bucket 255 (the highest possible bucket) - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const bucket3 = await nodeGraph.getBucket(3); - const bucket351 = await nodeGraph.getBucket(255); - if (bucket3 && bucket351) { - expect(bucket3[newNode1Id]).toEqual({ - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(Date), - }); - expect(bucket351[newNode2Id]).toEqual({ - address: { host: '2.2.2.2', port: 2222 }, - lastUpdated: expect.any(Date), - }); + // Adjacent bucket should be empty + let bucketIndex_: number; + if (bucketIndex >= nodeId.length * 8 - 1) { + bucketIndex_ = bucketIndex - 1; + } else if (bucketIndex === 0) { + bucketIndex_ = bucketIndex + 1; } else { - // Should be unreachable - fail('Bucket undefined'); + bucketIndex_ = bucketIndex + 1; } + expect(await nodeGraph.getBucket(bucketIndex_)).toHaveLength(0); + expect(await nodeGraph.getBucketMeta(bucketIndex_)).toStrictEqual({ + count: 0, + }); + await nodeGraph.stop(); }); - test('deletes a single node (and removes bucket)', async () => { - // New node for bucket 2 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - // Check the bucket is there first - const bucket = await nodeGraph.getBucket(2); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check bucket no longer exists - const newBucket = await nodeGraph.getBucket(2); - expect(newBucket).toBeUndefined(); - }); - test('deletes a single node (and retains remainder of bucket)', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, - ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + test('get bucket with multiple nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // Contiguous node IDs starting from 0 + let nodeIds = Array.from({ length: 25 }, (_, i) => + IdInternal.create( + utils.bigInt2Bytes(BigInt(i), keyManager.getNodeId().byteLength), + ), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(bucketIndex); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(bucketIndex); - if (newBucket) { - expect(newBucket[newNode1Id]).toBeUndefined(); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); + // Use first and last buckets because node IDs may be split between buckets + const bucketIndexFirst = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[0], + ); + const bucketIndexLast = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[nodeIds.length - 1], + ); + const bucketFirst = await nodeGraph.getBucket(bucketIndexFirst); + const bucketLast = await nodeGraph.getBucket(bucketIndexLast); + let bucket: NodeBucket; + let bucketIndex: NodeBucketIndex; + if (bucketFirst.length >= bucketLast.length) { + bucket = bucketFirst; + bucketIndex = bucketIndexFirst; } else { - // Should be unreachable - fail('New bucket undefined'); + bucket = bucketLast; + bucketIndex = bucketIndexLast; } - }); - test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - // Keep a record of the first node ID that we added - const firstNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, + expect(bucket.length > 1).toBe(true); + let bucketNodeIds = bucket.map(([nodeId]) => nodeId); + // The node IDs must be sorted lexicographically + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 1; + }), + ).toBe(true); + // Sort by node ID asc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'asc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); + // Sort by node ID desc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'desc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); + // Sort by distance asc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'asc'); + let bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), + ); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); + // Sort by distance desc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'desc'); + bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), - nodeAddress, + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); + // Sort by lastUpdated asc + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'asc'); + let bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'desc'); + bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get all buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 50; i++) { + await utils.sleep(50); + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + let bucketIndex_ = -1; + // Ascending order + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); + } + // There must have been at least 1 bucket + expect(bucketIndex_).not.toBe(-1); + // Descending order + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); + } + expect(bucketIndex_).not.toBe(keyManager.getNodeId().length * 8); + // Distance ascending order + // Lower distance buckets first + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - // Increment the current node ID + // It's the LAST bucket that fails this + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + // Distance descending order + // Higher distance buckets first + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); } - - // Attempt to add a new node into this full bucket (increment the last node - // ID that was added) - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - nodeGraph.maxNodesPerBucket + 1, - ); - const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; - await nodeGraph.setNode(newNodeId, newNodeAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket (but no more) - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Last updated ascending order + // Bucket index is ascending + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - // Ensure that this new node is in the bucket - expect(finalBucket[newNodeId]).toEqual({ - address: newNodeAddress, - lastUpdated: expect.any(Date), - }); - // NODEID1 should have been removed from this bucket (as this was the least active) - // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first) - expect(finalBucket[firstNodeId]).toBeUndefined(); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); } - }); - test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - const currNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - ); - // Keep a record of the first node ID that we added - // const firstNodeId = currNodeId; - let increment = 1; - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), - nodeAddress, + // Last updated descending order + // Bucket index is descending + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - // Increment the current node ID - skip for the last one to keep currNodeId - // as the last added node ID - if (i !== nodeGraph.maxNodesPerBucket) { - increment++; + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); + } + await nodeGraph.stop(); + }); + test('reset buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + // Reset the buckets according to the new node ID + // Note that this should normally be only executed when the key manager NodeID changes + // This means methods that use the KeyManager's node ID cannot be used here in this test + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + expect(buckets1).not.toStrictEqual(buckets0); + // Resetting again should change the space + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + // Resetting to the same NodeId results in the same bucket structure + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets3 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets3).toStrictEqual(buckets2); + // Resetting to an existing NodeId + const nodeIdExisting = buckets3[0][1][0][0]; + let nodeIdExistingFound = false; + await nodeGraph.resetBuckets(nodeIdExisting); + const buckets4 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets4.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets4) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + if (nodeId.equals(nodeIdExisting)) { + nodeIdExistingFound = true; + } + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdExisting, nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets4).not.toStrictEqual(buckets3); + // The existing node ID should not be put into the NodeGraph + expect(nodeIdExistingFound).toBe(false); + await nodeGraph.stop(); + }); + test('reset buckets is persistent', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + await nodeGraph.stop(); + }); + test('get closest nodes, 40 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, ); - } else { - // Should be unreachable - fail('Bucket undefined'); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active" - // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one - // So, re-add the last node - const newLastAddress: NodeAddress = { - host: '30.30.30.30' as Host, - port: 30 as Port, - }; - await nodeGraph.setNode(currNodeId, newLastAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, ); - // Ensure that this new node is in the bucket - expect(finalBucket[currNodeId]).toEqual({ - address: newLastAddress, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); } - }); - test('retrieves all buckets (in expected lexicographic order)', async () => { - // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) - // Bucket 1 (minimum): - - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Bucket 4 (multiple nodes in 1 bucket): - const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); - const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; - await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); - const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; - await nodeGraph.setNode(node42Id, node42Address); - - // Bucket 10 (lexicographic ordering - should appear after 2): - const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); - const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; - await nodeGraph.setNode(node10Id, node10Address); + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Bucket 255 (maximum): - const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const node255Address = { - host: '255.255.255.255', - port: 255, - } as NodeAddress; - await nodeGraph.setNode(node255Id, node255Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - const buckets = await nodeGraph.getAllBuckets(); - expect(buckets.length).toBe(4); - // Buckets should be returned in lexicographic ordering (using hex keys to - // ensure the bucket indexes are in numberical order) - expect(buckets).toEqual([ - { - [node1Id]: { - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(String), - }, - }, - { - [node41Id]: { - address: { host: '41.41.41.41', port: 4141 }, - lastUpdated: expect.any(String), - }, - [node42Id]: { - address: { host: '42.42.42.42', port: 4242 }, - lastUpdated: expect.any(String), - }, - }, - { - [node10Id]: { - address: { host: '10.10.10.10', port: 1010 }, - lastUpdated: expect.any(String), - }, - }, - { - [node255Id]: { - address: { host: '255.255.255.255', port: 255 }, - lastUpdated: expect.any(String), - }, - }, - ]); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); }); - test( - 'refreshes buckets', - async () => { - const initialNodes: Record = {}; - // Generate and add some nodes - for (let i = 1; i < 255; i += 20) { - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - keyManager.getNodeId(), - i, - ); - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode(newNodeId, nodeAddress); - initialNodes[newNodeId] = { - id: newNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance( - keyManager.getNodeId(), - newNodeId, - ), - }; - } + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 5', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 5); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Renew the keypair - await keyManager.renewRootKeyPair('newPassword'); - // Reset the test's node ID state - nodeId = keyManager.getNodeId(); - // Refresh the buckets - await nodeGraph.refreshBuckets(); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 5 nodes lower than target, 10 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 95 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Get all the new buckets, and expect that each node is in the correct bucket - const newBuckets = await nodeGraph.getAllBuckets(); - let nodeCount = 0; - for (const b of newBuckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - // Check that it was a node in the original DB - expect(initialNodes[nodeId]).toBeDefined(); - // Check it's in the correct bucket - const expectedIndex = nodesUtils.calculateBucketIndex( - keyManager.getNodeId(), - nodeId, - ); - const expectedBucket = await nodeGraph.getBucket(expectedIndex); - expect(expectedBucket).toBeDefined(); - expect(expectedBucket![nodeId]).toBeDefined(); - // Check it has the correct address - expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); - nodeCount++; - } - } - // We had less than k (20) nodes, so we expect that all nodes will be re-added - // If we had more than k nodes, we may lose some of them (because the nodes - // may be re-added to newly full buckets) - expect(Object.keys(initialNodes).length).toEqual(nodeCount); - }, - global.defaultTimeout * 4, - ); - test('updates node', async () => { - // New node added - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 40 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Check new node is in retrieved bucket from database - const bucket = await nodeGraph.getBucket(2); - const time1 = bucket![node1Id].lastUpdated; + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Update node and check that time is later - const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; - await nodeGraph.updateNode(node1Id, newNode1Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, no nodes, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - const bucket2 = await nodeGraph.getBucket(2); - const time2 = bucket2![node1Id].lastUpdated; - expect(bucket2![node1Id].address).toEqual(newNode1Address); - expect(time1 < time2).toBeTruthy(); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); }); }); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 0ac96ec27a..66bd40999e 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -7,6 +7,7 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import UTP from 'utp-native'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; @@ -16,8 +17,12 @@ import NodeManager from '@/nodes/NodeManager'; import Proxy from '@/network/Proxy'; import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; -import { promisify, sleep } from '@/utils'; +import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as nodesErrors from '@/nodes/errors'; +import * as nodesTestUtils from './utils'; +import { generateNodeIdForBucket } from './utils'; describe(`${NodeManager.name} test`, () => { const password = 'password'; @@ -26,6 +31,7 @@ describe(`${NodeManager.name} test`, () => { ]); let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let proxy: Proxy; let keyManager: KeyManager; @@ -37,14 +43,22 @@ describe(`${NodeManager.name} test`, () => { const serverHost = '::1' as Host; const externalHost = '127.0.0.1' as Host; + const localhost = '127.0.0.1' as Host; + const port = 55556 as Port; const serverPort = 0 as Port; const externalPort = 0 as Port; const mockedGenerateDeterministicKeyPair = jest.spyOn( keysUtils, 'generateDeterministicKeyPair', ); + const mockedPingNode = jest.fn(); // Jest.spyOn(NodeManager.prototype, 'pingNode'); + const dummyNodeConnectionManager = { + pingNode: mockedPingNode, + } as unknown as NodeConnectionManager; beforeEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { return keysUtils.generateKeyPair(bits); }); @@ -99,16 +113,20 @@ describe(`${NodeManager.name} test`, () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, + queue, proxy, logger, }); - await nodeConnectionManager.start(); }); afterEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await sigchain.stop(); @@ -129,6 +147,7 @@ describe(`${NodeManager.name} test`, () => { 'pings node', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -148,14 +167,17 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, nodeGraph, nodeConnectionManager, + queue, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set server node offline await server.stop(); @@ -192,6 +214,7 @@ describe(`${NodeManager.name} test`, () => { expect(active3).toBe(false); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -200,6 +223,7 @@ describe(`${NodeManager.name} test`, () => { ); // Ping needs to timeout (takes 20 seconds + setup + pulldown) test('getPublicKey', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -219,14 +243,17 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, nodeGraph, nodeConnectionManager, + queue, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // We want to get the public key of the server const key = await nodeManager.getPublicKey(serverNodeId); @@ -234,6 +261,7 @@ describe(`${NodeManager.name} test`, () => { expect(key).toEqual(expectedKey); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -401,26 +429,661 @@ describe(`${NodeManager.name} test`, () => { } }); test('can request chain data', async () => { - // Cross signing claims - await y.nodeManager.claimNode(xNodeId); + let nodeManager: NodeManager | undefined; + try { + // Cross signing claims + await y.nodeManager.claimNode(xNodeId); - const nodeManager = new NodeManager({ - db, - sigchain, - keyManager, - nodeGraph, - nodeConnectionManager, - logger, + nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, + queue, + logger, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + + await nodeGraph.setNode(xNodeId, xNodeAddress); + + // We want to get the public key of the server + const chainData = JSON.stringify( + await nodeManager.requestChainData(xNodeId), + ); + expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); + expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + } finally { + await nodeManager?.stop(); + } + }); + }); + test('should add a node when bucket has room', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, {} as NodeAddress); + + // Checking bucket + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should update a node if node exists', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 11111 as Port, }); - await nodeGraph.setNode(xNodeId, xNodeAddress); + const nodeData = (await nodeGraph.getNode(nodeId))!; + await sleep(1100); - // We want to get the public key of the server - const chainData = JSON.stringify( - await nodeManager.requestChainData(xNodeId), + // Should update the node + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 22222 as Port, + }); + + const newNodeData = (await nodeGraph.getNode(nodeId))!; + expect(newNodeData.address.port).not.toEqual(nodeData.address.port); + expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not add node if bucket is full and old node is alive', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + const oldestNode = await nodeGraph.getNode(oldestNodeId!); + // Waiting for a second to tick over + await sleep(1500); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was not added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeUndefined(); + // Oldest node was updated + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node if bucket is full, old node is alive and force is set', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode( + nodeId, + { port: 55555 } as NodeAddress, + false, + true, + ); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node if bucket is full and old node is dead', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(false); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node when an incoming connection is established', async () => { + let server: PolykeyAgent | undefined; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + server = await PolykeyAgent.createPolykeyAgent({ + password: 'password', + nodePath: path.join(dataDir, 'server'), + keysConfig: { + rootKeyPairBits: 2048, + }, + networkConfig: { + proxyHost: localhost, + }, + logger: logger, + }); + const serverNodeId = server.keyManager.getNodeId(); + const serverNodeAddress: NodeAddress = { + host: server.proxy.getProxyHost(), + port: server.proxy.getProxyPort(), + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + + const expectedHost = proxy.getProxyHost(); + const expectedPort = proxy.getProxyPort(); + const expectedNodeId = keyManager.getNodeId(); + + const nodeData = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData).toBeUndefined(); + + // Now we want to connect to the server by making an echo request. + await nodeConnectionManager.withConnF(serverNodeId, async (conn) => { + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello')); + }); + + const nodeData2 = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData2).toBeDefined(); + expect(nodeData2?.address.host).toEqual(expectedHost); + expect(nodeData2?.address.port).toEqual(expectedPort); + } finally { + // Clean up + await server?.stop(); + await server?.destroy(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not add nodes to full bucket if pings succeeds', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings succeed, node not added + mockedPingNode.mockImplementation(async (_) => true); + const newNode = generateNodeIdForBucket(nodeId, 100, 21); + await nodeManager.setNode(newNode, address); + expect(await listBucket(100)).not.toContain( + nodesUtils.encodeNodeId(newNode), + ); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add nodes to full bucket if pings fail', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings fail, new nodes get added + mockedPingNode.mockImplementation(async (_) => false); + const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); + const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); + const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); + await nodeManager.setNode(newNode1, address); + await nodeManager.setNode(newNode2, address); + await nodeManager.setNode(newNode3, address); + await queue.drained(); + const list = await listBucket(100); + expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not block when bucket is full', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node does not block + const delayPing = promise(); + mockedPingNode.mockImplementation(async (_) => { + await delayPing.p; + return true; + }); + const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); + // Set manually to non-blocking + await expect( + nodeManager.setNode(newNode4, address, false), + ).resolves.toBeUndefined(); + delayPing.resolveP(); + await queue.drained(); + } finally { + await nodeManager.stop(); + await queue.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + } + }); + test('should block when blocking is set to true', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node can block + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async () => true); + const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode5, address, true), + ).resolves.toBeUndefined(); + expect(mockedPingNode).toBeCalled(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should update deadline when updating a bucket', async () => { + const refreshBucketTimeout = 100000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // @ts-ignore: kidnap map + const deadlineMap = nodeManager.refreshBucketDeadlineMap; + // Getting starting value + const bucket = 0; + const startingDeadline = deadlineMap.get(bucket); + const nodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + bucket, ); - expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); - expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + await sleep(1000); + await nodeManager.setNode(nodeId, {} as NodeAddress); + // Deadline should be updated + const newDeadline = deadlineMap.get(bucket); + expect(newDeadline).not.toEqual(startingDeadline); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add buckets to the queue when exceeding deadline', async () => { + const refreshBucketTimeout = 100; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + const mockRefreshBucketQueueAdd = jest.spyOn( + NodeManager.prototype, + 'refreshBucketQueueAdd', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // Getting starting value + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(0); + await sleep(200); + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(256); + } finally { + mockRefreshBucketQueueAdd.mockRestore(); + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should digest queue to refresh buckets', async () => { + const refreshBucketTimeout = 1000000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation(async () => {}); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.refreshBucketQueueDrained(); + expect(mockRefreshBucket).toHaveBeenCalledTimes(5); + + // Add buckets to queue + // check if refresh buckets was called + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should abort refreshBucket queue when stopping', async () => { + const refreshBucketTimeout = 1000000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation( + async (bucket, options: { signal?: AbortSignal } = {}) => { + const { signal } = { ...options }; + const prom = promise(); + signal?.addEventListener('abort', () => + prom.rejectP(new nodesErrors.ErrorNodeAborted()), + ); + await prom.p; + }, + ); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.stop(); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } }); }); diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index ee1aeadc46..0d962f963e 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -1,48 +1,69 @@ import type { NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import lexi from 'lexicographic-integer'; import { IdInternal } from '@matrixai/id'; +import { DB } from '@matrixai/db'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; -describe('Nodes utils', () => { - test('basic distance calculation', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 23, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]); - - const distance = nodesUtils.calculateDistance(nodeId1, nodeId2); - expect(distance).toEqual(316912758671486456376015716356n); +describe('nodes/utils', () => { + const logger = new Logger(`nodes/utils test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + let dataDir: string; + let db: DB; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const dbKey = await keysUtils.generateKey(); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); }); - test('calculates correct first bucket (bucket 0)', async () => { - // "1" XOR "0" = distance of 1 - // Therefore, bucket 0 - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(0); + afterEach(async () => { + await db.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); }); - test('calculates correct arbitrary bucket (bucket 63)', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 255, 0, 0, 0, 0, 0, 0, 0, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(63); + test('calculating bucket index from the same node ID', () => { + const nodeId1 = IdInternal.create([0]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + expect(distance).toBe(0n); + expect(() => nodesUtils.bucketIndex(nodeId1, nodeId2)).toThrow(RangeError); + }); + test('calculating bucket index 0', () => { + // Distance is calculated based on XOR operation + // 1 ^ 0 == 1 + // Distance of 1 is bucket 0 + const nodeId1 = IdInternal.create([1]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + expect(distance).toBe(1n); + expect(bucketIndex).toBe(0); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); }); - test('calculates correct last bucket (bucket 255)', async () => { + test('calculating bucket index 255', () => { const nodeId1 = IdInternal.create([ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -51,7 +72,121 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(255); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + }); + test('calculating bucket index randomly', () => { + for (let i = 0; i < 1000; i++) { + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + if (nodeId1.equals(nodeId2)) { + continue; + } + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + } + }); + test('parse NodeGraph buckets db key', async () => { + const bucketsDbPath = ['buckets']; + const data: Array<{ + bucketIndex: number; + bucketKey: string; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const nodeId = testNodesUtils.generateRandomNodeId(); + data.push({ + bucketIndex, + bucketKey, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), nodeId]), + }); + await db.put( + ['buckets', bucketKey, nodesUtils.bucketDbKey(nodeId)], + null, + ); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of the bucket key and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + + for await (const [key] of db.iterator({}, bucketsDbPath)) { + const { bucketIndex, bucketKey, nodeId } = nodesUtils.parseBucketsDbKey( + key as Array, + ); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('parse NodeGraph lastUpdated buckets db key', async () => { + const lastUpdatedDbPath = ['lastUpdated']; + const data: Array<{ + bucketIndex: number; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = lexi.pack(bucketIndex, 'hex'); + const lastUpdated = utils.getUnixtime(); + const nodeId = testNodesUtils.generateRandomNodeId(); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); + data.push({ + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), lastUpdatedKey, nodeIdKey]), + }); + await db.put(['lastUpdated', bucketKey, lastUpdatedKey, nodeIdKey], null); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of + // the bucket key and last updated and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const [key] of db.iterator({}, lastUpdatedDbPath)) { + const { bucketIndex, bucketKey, lastUpdated, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key as Array); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(lastUpdated).toBe(data[i].lastUpdated); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('should generate random distance for a bucket', async () => { + // Const baseNodeId = testNodesUtils.generateRandomNodeId(); + const zeroNodeId = IdInternal.fromBuffer(Buffer.alloc(32, 0)); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomDistanceForBucket(i); + expect(nodesUtils.bucketIndex(zeroNodeId, randomDistance)).toEqual(i); + } + }); + test('should generate random NodeId for a bucket', async () => { + const baseNodeId = testNodesUtils.generateRandomNodeId(); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomNodeIdForBucket( + baseNodeId, + i, + ); + expect(nodesUtils.bucketIndex(baseNodeId, randomDistance)).toEqual(i); + } }); }); diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index fca9ad53bf..e6c603e14f 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,9 +1,27 @@ import type { NodeId, NodeAddress } from '@/nodes/types'; - import type PolykeyAgent from '@/PolykeyAgent'; import { IdInternal } from '@matrixai/id'; +import * as keysUtils from '@/keys/utils'; import { bigInt2Bytes } from '@/utils'; +/** + * Generate random `NodeId` + * If `readable` is `true`, then it will generate a `NodeId` where + * its binary string form will only contain hex characters + * However the `NodeId` will not be uniformly random as it will not cover + * the full space of possible node IDs + * Prefer to keep `readable` `false` if possible to ensure tests are robust + */ +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + /** * Generate a deterministic NodeId for a specific bucket given an existing NodeId * This requires solving the bucket index (`i`) and distance equation: @@ -61,4 +79,4 @@ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { } as NodeAddress); } -export { generateNodeIdForBucket, nodesConnect }; +export { generateRandomNodeId, generateNodeIdForBucket, nodesConnect }; diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 37be01f569..e2095f1916 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -8,6 +8,7 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import ACL from '@/acl/ACL'; import Sigchain from '@/sigchain/Sigchain'; @@ -50,6 +51,7 @@ describe('NotificationsManager', () => { let acl: ACL; let db: DB; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let keyManager: KeyManager; @@ -112,21 +114,26 @@ describe('NotificationsManager', () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ nodeGraph, keyManager, proxy, + queue, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, sigchain, nodeConnectionManager, nodeGraph, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up node for receiving notifications receiver = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -146,7 +153,9 @@ describe('NotificationsManager', () => { }, global.defaultTimeout); afterAll(async () => { await receiver.stop(); + await queue.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/notifications/utils.test.ts b/tests/notifications/utils.test.ts index 5a3b8a617b..fa6373e380 100644 --- a/tests/notifications/utils.test.ts +++ b/tests/notifications/utils.test.ts @@ -2,16 +2,15 @@ import type { Notification, NotificationData } from '@/notifications/types'; import type { VaultActions, VaultName } from '@/vaults/types'; import { createPublicKey } from 'crypto'; import { EmbeddedJWK, jwtVerify, exportJWK } from 'jose'; - import * as keysUtils from '@/keys/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Notifications utils', () => { - const nodeId = testUtils.generateRandomNodeId(); + const nodeId = testNodesUtils.generateRandomNodeId(); const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const vaultId = vaultsUtils.generateVaultId(); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); @@ -206,7 +205,7 @@ describe('Notifications utils', () => { }); test('validates correct notifications', async () => { - const nodeIdOther = testUtils.generateRandomNodeId(); + const nodeIdOther = testNodesUtils.generateRandomNodeId(); const nodeIdOtherEncoded = nodesUtils.encodeNodeId(nodeIdOther); const generalNotification: Notification = { data: { diff --git a/tests/sigchain/Sigchain.test.ts b/tests/sigchain/Sigchain.test.ts index e53a4c67fc..a3bbfb1937 100644 --- a/tests/sigchain/Sigchain.test.ts +++ b/tests/sigchain/Sigchain.test.ts @@ -13,6 +13,7 @@ import * as sigchainErrors from '@/sigchain/errors'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Sigchain', () => { const logger = new Logger('Sigchain Test', LogLevel.WARN, [ @@ -20,25 +21,25 @@ describe('Sigchain', () => { ]); const password = 'password'; const srcNodeIdEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId2Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId3Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdAEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdBEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdCEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdDEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); let mockedGenerateKeyPair: jest.SpyInstance; @@ -236,7 +237,7 @@ describe('Sigchain', () => { expect(verified2).toBe(true); // Check the hash of the previous claim is correct - const verifiedHash = await claimsUtils.verifyHashOfClaim( + const verifiedHash = claimsUtils.verifyHashOfClaim( claim1, decoded2.payload.hPrev as string, ); @@ -344,7 +345,9 @@ describe('Sigchain', () => { // Add 10 claims for (let i = 1; i <= 5; i++) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); node2s.push(node2); const nodeLink: ClaimData = { type: 'node', @@ -393,7 +396,9 @@ describe('Sigchain', () => { for (let i = 1; i <= 30; i++) { // If even, add a node link if (i % 2 === 0) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); nodes[i] = node2; const nodeLink: ClaimData = { type: 'node', diff --git a/tests/status/Status.test.ts b/tests/status/Status.test.ts index 311f89a11c..0b0744002d 100644 --- a/tests/status/Status.test.ts +++ b/tests/status/Status.test.ts @@ -6,15 +6,15 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import config from '@/config'; import { Status, errors as statusErrors } from '@/status'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Status', () => { const logger = new Logger(`${Status.name} Test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeId1 = testUtils.generateRandomNodeId(); - const nodeId2 = testUtils.generateRandomNodeId(); - const nodeId3 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + const nodeId3 = testNodesUtils.generateRandomNodeId(); let dataDir: string; beforeEach(async () => { dataDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'status-test-')); diff --git a/tests/utils.ts b/tests/utils.ts index 311743565c..3ac9a74996 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -1,6 +1,6 @@ -import type { StatusLive } from '@/status/types'; -import type { NodeId } from '@/nodes/types'; import type { Host } from '@/network/types'; +import type { NodeId } from '@/nodes/types'; +import type { StatusLive } from '@/status/types'; import path from 'path'; import fs from 'fs'; import lock from 'fd-lock'; @@ -71,24 +71,25 @@ async function setupGlobalKeypair() { } } -/** - * Setup the global agent - * Use this in beforeAll, and use the closeGlobalAgent in afterAll - * This is expected to be executed by multiple worker processes - * Uses a references directory as a reference count - * Uses fd-lock to serialise access - * This means all test modules using this will be serialised - * Any beforeAll must use globalThis.maxTimeout - * Tips for usage: - * * Do not restart this global agent - * * Ensure client-side side-effects are removed at the end of each test - * * Ensure server-side side-effects are removed at the end of each test - */ +// FIXME: what is going on here? is this getting removed? +// /** +// * Setup the global agent +// * Use this in beforeAll, and use the closeGlobalAgent in afterAll +// * This is expected to be executed by multiple worker processes +// * Uses a references directory as a reference count +// * Uses fd-lock to serialise access +// * This means all test modules using this will be serialised +// * Any beforeAll must use globalThis.maxTimeout +// * Tips for usage: +// * * Do not restart this global agent +// * * Ensure client-side side-effects are removed at the end of each test +// * * Ensure server-side side-effects are removed at the end of each test +// */ async function setupGlobalAgent( logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), -) { +): Promise { const globalAgentPassword = 'password'; const globalAgentDir = path.join(globalThis.dataDir, 'agent'); // The references directory will act like our reference count diff --git a/tests/vaults/VaultInternal.test.ts b/tests/vaults/VaultInternal.test.ts index 86c283baf4..d95ae1c2ca 100644 --- a/tests/vaults/VaultInternal.test.ts +++ b/tests/vaults/VaultInternal.test.ts @@ -15,7 +15,7 @@ import * as vaultsErrors from '@/vaults/errors'; import { sleep } from '@/utils'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -39,7 +39,7 @@ describe('VaultInternal', () => { const fakeKeyManager = { getNodeId: () => { - return testsUtils.generateRandomNodeId(); + return nodeTestUtils.generateRandomNodeId(); }, } as KeyManager; const secret1 = { name: 'secret-1', content: 'secret-content-1' }; @@ -668,7 +668,7 @@ describe('VaultInternal', () => { await efs.writeFile(secret2.name, secret2.content); }); const commit = (await vault.log())[0].commitId; - const gen = await vault.readG(async function* (efs): AsyncGenerator { + const gen = vault.readG(async function* (efs): AsyncGenerator { yield expect((await efs.readFile(secret1.name)).toString()).toEqual( secret1.content, ); @@ -678,60 +678,64 @@ describe('VaultInternal', () => { expect(log).toHaveLength(2); expect(log[0].commitId).toStrictEqual(commit); }); - test('garbage collection', async () => { - await vault.writeF(async (efs) => { - await efs.writeFile(secret1.name, secret1.content); - }); - await vault.writeF(async (efs) => { - await efs.writeFile(secret2.name, secret2.content); - }); - await vault.writeF(async (efs) => { - await efs.writeFile(secret3.name, secret3.content); - }); - // @ts-ignore: kidnap efs - const vaultEfs = vault.efs; - // @ts-ignore: kidnap efs - const vaultEfsData = vault.efsVault; - const quickCommit = async (ref: string, secret: string) => { - await vaultEfsData.writeFile(secret, secret); - await git.add({ - fs: vaultEfs, - dir: vault.vaultDataDir, - gitdir: vault.vaultGitDir, - filepath: secret, + test( + 'garbage collection', + async () => { + await vault.writeF(async (efs) => { + await efs.writeFile(secret1.name, secret1.content); }); - return await git.commit({ - fs: vaultEfs, - dir: vault.vaultDataDir, - gitdir: vault.vaultGitDir, - author: { - name: 'test', - email: 'test', - }, - message: 'test', - ref: ref, + await vault.writeF(async (efs) => { + await efs.writeFile(secret2.name, secret2.content); }); - }; - const log = await vault.log(); - let num = 5; - const refs: string[] = []; - for (const logElement of log) { - refs.push(await quickCommit(logElement.commitId, `secret-${num++}`)); - } - // @ts-ignore - await vault.garbageCollectGitObjects(); - - for (const ref of refs) { - await expect( - git.checkout({ + await vault.writeF(async (efs) => { + await efs.writeFile(secret3.name, secret3.content); + }); + // @ts-ignore: kidnap efs + const vaultEfs = vault.efs; + // @ts-ignore: kidnap efs + const vaultEfsData = vault.efsVault; + const quickCommit = async (ref: string, secret: string) => { + await vaultEfsData.writeFile(secret, secret); + await git.add({ fs: vaultEfs, dir: vault.vaultDataDir, gitdir: vault.vaultGitDir, - ref, - }), - ).rejects.toThrow(git.Errors.CommitNotFetchedError); - } - }); + filepath: secret, + }); + return await git.commit({ + fs: vaultEfs, + dir: vault.vaultDataDir, + gitdir: vault.vaultGitDir, + author: { + name: 'test', + email: 'test', + }, + message: 'test', + ref: ref, + }); + }; + const log = await vault.log(); + let num = 5; + const refs: string[] = []; + for (const logElement of log) { + refs.push(await quickCommit(logElement.commitId, `secret-${num++}`)); + } + // @ts-ignore + await vault.garbageCollectGitObjects(); + + for (const ref of refs) { + await expect( + git.checkout({ + fs: vaultEfs, + dir: vault.vaultDataDir, + gitdir: vault.vaultGitDir, + ref, + }), + ).rejects.toThrow(git.Errors.CommitNotFetchedError); + } + }, + global.defaultTimeout * 2, + ); // Locking tests const waitDelay = 200; test('writeF respects read and write locking', async () => { diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index e4ed618aa8..e57495cb93 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -7,6 +7,8 @@ import type { } from '@/vaults/types'; import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -30,7 +32,7 @@ import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import { sleep } from '@/utils'; import VaultInternal from '@/vaults/VaultInternal'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; import { expectRemoteError } from '../utils'; const mockedGenerateDeterministicKeyPair = jest @@ -65,7 +67,7 @@ describe('VaultManager', () => { let db: DB; // We only ever use this to get NodeId, No need to create a whole one - const nodeId = testsUtils.generateRandomNodeId(); + const nodeId = nodeTestUtils.generateRandomNodeId(); const dummyKeyManager = { getNodeId: () => nodeId, } as KeyManager; @@ -183,7 +185,7 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }, - global.defaultTimeout * 2, + global.defaultTimeout * 4, ); test('can rename a vault', async () => { const vaultManager = await VaultManager.createVaultManager({ @@ -276,49 +278,53 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }); - test('able to read and load existing metadata', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - const vaultNames = [ - 'Vault1', - 'Vault2', - 'Vault3', - 'Vault4', - 'Vault5', - 'Vault6', - 'Vault7', - 'Vault8', - 'Vault9', - 'Vault10', - ]; - for (const vaultName of vaultNames) { - await vaultManager.createVault(vaultName as VaultName); - } - const vaults = await vaultManager.listVaults(); - const vaultId = vaults.get('Vault1' as VaultName) as VaultId; - expect(vaultId).not.toBeUndefined(); - await vaultManager.stop(); - await vaultManager.start(); - const restartedVaultNames: Array = []; - const vaultList = await vaultManager.listVaults(); - vaultList.forEach((_, vaultName) => { - restartedVaultNames.push(vaultName); + test( + 'able to read and load existing metadata', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager: {} as NodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - expect(restartedVaultNames.sort()).toEqual(vaultNames.sort()); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + try { + const vaultNames = [ + 'Vault1', + 'Vault2', + 'Vault3', + 'Vault4', + 'Vault5', + 'Vault6', + 'Vault7', + 'Vault8', + 'Vault9', + 'Vault10', + ]; + for (const vaultName of vaultNames) { + await vaultManager.createVault(vaultName as VaultName); + } + const vaults = await vaultManager.listVaults(); + const vaultId = vaults.get('Vault1' as VaultName) as VaultId; + expect(vaultId).not.toBeUndefined(); + await vaultManager.stop(); + await vaultManager.start(); + const restartedVaultNames: Array = []; + const vaultList = await vaultManager.listVaults(); + vaultList.forEach((_, vaultName) => { + restartedVaultNames.push(vaultName); + }); + expect(restartedVaultNames.sort()).toEqual(vaultNames.sort()); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test('cannot concurrently create vaults with the same name', async () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, @@ -493,7 +499,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 1'), nodePath: path.join(allDataDir, 'remoteKeynode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode1Id = remoteKeynode1.keyManager.getNodeId(); @@ -503,7 +509,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 2'), nodePath: path.join(allDataDir, 'remoteKeynode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode2Id = remoteKeynode2.keyManager.getNodeId(); @@ -579,9 +585,12 @@ describe('VaultManager', () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); await nodeGraph.setNode(remoteKeynode1Id, { host: remoteKeynode1.proxy.getProxyHost(), @@ -879,91 +888,95 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }); - test('can pull a cloned vault', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - // Creating some state at the remote - await remoteKeynode1.vaultManager.withVaults( - [remoteVaultId], - async (vault) => { - await vault.writeF(async (efs) => { - await efs.writeFile('secret-1', 'secret1'); - }); - }, - ); - - // Setting permissions - await remoteKeynode1.gestaltGraph.setNode({ - id: localNodeIdEncoded, - chain: {}, + test( + 'can pull a cloned vault', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - await remoteKeynode1.gestaltGraph.setGestaltActionByNode( - localNodeId, - 'scan', - ); - await remoteKeynode1.acl.setVaultAction( - remoteVaultId, - localNodeId, - 'clone', - ); - await remoteKeynode1.acl.setVaultAction( - remoteVaultId, - localNodeId, - 'pull', - ); + try { + // Creating some state at the remote + await remoteKeynode1.vaultManager.withVaults( + [remoteVaultId], + async (vault) => { + await vault.writeF(async (efs) => { + await efs.writeFile('secret-1', 'secret1'); + }); + }, + ); - await vaultManager.cloneVault(remoteKeynode1Id, vaultName); - const vaultId = await vaultManager.getVaultId(vaultName); - if (vaultId === undefined) fail('VaultId is not found.'); - await vaultManager.withVaults([vaultId], async (vaultClone) => { - return await vaultClone.readF(async (efs) => { - const file = await efs.readFile('secret-1', { encoding: 'utf8' }); - const secretsList = await efs.readdir('.'); - expect(file).toBe('secret1'); - expect(secretsList).toContain('secret-1'); - expect(secretsList).not.toContain('secret-2'); + // Setting permissions + await remoteKeynode1.gestaltGraph.setNode({ + id: localNodeIdEncoded, + chain: {}, }); - }); + await remoteKeynode1.gestaltGraph.setGestaltActionByNode( + localNodeId, + 'scan', + ); + await remoteKeynode1.acl.setVaultAction( + remoteVaultId, + localNodeId, + 'clone', + ); + await remoteKeynode1.acl.setVaultAction( + remoteVaultId, + localNodeId, + 'pull', + ); - // Creating new history - await remoteKeynode1.vaultManager.withVaults( - [remoteVaultId], - async (vault) => { - await vault.writeF(async (efs) => { - await efs.writeFile('secret-2', 'secret2'); + await vaultManager.cloneVault(remoteKeynode1Id, vaultName); + const vaultId = await vaultManager.getVaultId(vaultName); + if (vaultId === undefined) fail('VaultId is not found.'); + await vaultManager.withVaults([vaultId], async (vaultClone) => { + return await vaultClone.readF(async (efs) => { + const file = await efs.readFile('secret-1', { encoding: 'utf8' }); + const secretsList = await efs.readdir('.'); + expect(file).toBe('secret1'); + expect(secretsList).toContain('secret-1'); + expect(secretsList).not.toContain('secret-2'); }); - }, - ); + }); - // Pulling vault - await vaultManager.pullVault({ - vaultId: vaultId, - }); + // Creating new history + await remoteKeynode1.vaultManager.withVaults( + [remoteVaultId], + async (vault) => { + await vault.writeF(async (efs) => { + await efs.writeFile('secret-2', 'secret2'); + }); + }, + ); - // Should have new data - await vaultManager.withVaults([vaultId], async (vaultClone) => { - return await vaultClone.readF(async (efs) => { - const file = await efs.readFile('secret-1', { encoding: 'utf8' }); - const secretsList = await efs.readdir('.'); - expect(file).toBe('secret1'); - expect(secretsList).toContain('secret-1'); - expect(secretsList).toContain('secret-2'); + // Pulling vault + await vaultManager.pullVault({ + vaultId: vaultId, }); - }); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + + // Should have new data + await vaultManager.withVaults([vaultId], async (vaultClone) => { + return await vaultClone.readF(async (efs) => { + const file = await efs.readFile('secret-1', { encoding: 'utf8' }); + const secretsList = await efs.readdir('.'); + expect(file).toBe('secret1'); + expect(secretsList).toContain('secret-1'); + expect(secretsList).toContain('secret-2'); + }); + }); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test( 'manage pulling from different remotes', async () => { @@ -1100,78 +1113,82 @@ describe('VaultManager', () => { }, global.failedConnectionTimeout, ); - test('able to recover metadata after complex operations', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - const vaultNames = ['Vault1', 'Vault2', 'Vault3', 'Vault4', 'Vault5']; - const alteredVaultNames = [ - 'Vault1', - 'Vault2', - 'Vault3', - 'Vault6', - 'Vault10', - ]; - for (const vaultName of vaultNames) { - await vaultManager.createVault(vaultName as VaultName); - } - const v5 = await vaultManager.getVaultId('Vault5' as VaultName); - expect(v5).not.toBeUndefined(); - await vaultManager.destroyVault(v5!); - const v4 = await vaultManager.getVaultId('Vault4' as VaultName); - expect(v4).toBeTruthy(); - await vaultManager.renameVault(v4!, 'Vault10' as VaultName); - const v6 = await vaultManager.createVault('Vault6' as VaultName); - - await vaultManager.withVaults([v6], async (vault6) => { - await vault6.writeF(async (efs) => { - await efs.writeFile('reloaded', 'reload'); - }); + test( + 'able to recover metadata after complex operations', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - - const vn: Array = []; - (await vaultManager.listVaults()).forEach((_, vaultName) => - vn.push(vaultName), - ); - expect(vn.sort()).toEqual(alteredVaultNames.sort()); - await vaultManager.stop(); - await vaultManager.start(); - await vaultManager.createVault('Vault7' as VaultName); - - const v10 = await vaultManager.getVaultId('Vault10' as VaultName); - expect(v10).not.toBeUndefined(); - alteredVaultNames.push('Vault7'); - expect((await vaultManager.listVaults()).size).toEqual( - alteredVaultNames.length, - ); - const vnAltered: Array = []; - (await vaultManager.listVaults()).forEach((_, vaultName) => - vnAltered.push(vaultName), - ); - expect(vnAltered.sort()).toEqual(alteredVaultNames.sort()); - const file = await vaultManager.withVaults( - [v6], - async (reloadedVault) => { - return await reloadedVault.readF(async (efs) => { - return await efs.readFile('reloaded', { encoding: 'utf8' }); + try { + const vaultNames = ['Vault1', 'Vault2', 'Vault3', 'Vault4', 'Vault5']; + const alteredVaultNames = [ + 'Vault1', + 'Vault2', + 'Vault3', + 'Vault6', + 'Vault10', + ]; + for (const vaultName of vaultNames) { + await vaultManager.createVault(vaultName as VaultName); + } + const v5 = await vaultManager.getVaultId('Vault5' as VaultName); + expect(v5).not.toBeUndefined(); + await vaultManager.destroyVault(v5!); + const v4 = await vaultManager.getVaultId('Vault4' as VaultName); + expect(v4).toBeTruthy(); + await vaultManager.renameVault(v4!, 'Vault10' as VaultName); + const v6 = await vaultManager.createVault('Vault6' as VaultName); + + await vaultManager.withVaults([v6], async (vault6) => { + await vault6.writeF(async (efs) => { + await efs.writeFile('reloaded', 'reload'); }); - }, - ); + }); - expect(file).toBe('reload'); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + const vn: Array = []; + (await vaultManager.listVaults()).forEach((_, vaultName) => + vn.push(vaultName), + ); + expect(vn.sort()).toEqual(alteredVaultNames.sort()); + await vaultManager.stop(); + await vaultManager.start(); + await vaultManager.createVault('Vault7' as VaultName); + + const v10 = await vaultManager.getVaultId('Vault10' as VaultName); + expect(v10).not.toBeUndefined(); + alteredVaultNames.push('Vault7'); + expect((await vaultManager.listVaults()).size).toEqual( + alteredVaultNames.length, + ); + const vnAltered: Array = []; + (await vaultManager.listVaults()).forEach((_, vaultName) => + vnAltered.push(vaultName), + ); + expect(vnAltered.sort()).toEqual(alteredVaultNames.sort()); + const file = await vaultManager.withVaults( + [v6], + async (reloadedVault) => { + return await reloadedVault.readF(async (efs) => { + return await efs.readFile('reloaded', { encoding: 'utf8' }); + }); + }, + ); + + expect(file).toBe('reload'); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test('throw when trying to commit to a cloned vault', async () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, @@ -1394,8 +1411,8 @@ describe('VaultManager', () => { }); try { // Setting up state - const nodeId1 = testsUtils.generateRandomNodeId(); - const nodeId2 = testsUtils.generateRandomNodeId(); + const nodeId1 = nodeTestUtils.generateRandomNodeId(); + const nodeId2 = nodeTestUtils.generateRandomNodeId(); await gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(nodeId1), chain: {}, @@ -1454,7 +1471,7 @@ describe('VaultManager', () => { password: 'password', nodePath: path.join(dataDir, 'remoteNode'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, logger, }); @@ -1494,9 +1511,12 @@ describe('VaultManager', () => { logger, nodeGraph, proxy, + queue: {} as Queue, connConnectTime: 1000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyManager, diff --git a/tests/vaults/VaultOps.test.ts b/tests/vaults/VaultOps.test.ts index 81e061cd3f..2152a567d7 100644 --- a/tests/vaults/VaultOps.test.ts +++ b/tests/vaults/VaultOps.test.ts @@ -14,6 +14,7 @@ import * as vaultOps from '@/vaults/VaultOps'; import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('VaultOps', () => { const logger = new Logger('VaultOps', LogLevel.WARN, [new StreamHandler()]); @@ -27,7 +28,7 @@ describe('VaultOps', () => { let vaultsDbPath: LevelPath; const dummyKeyManager = { getNodeId: () => { - return testUtils.generateRandomNodeId(); + return testNodesUtils.generateRandomNodeId(); }, } as KeyManager; @@ -354,7 +355,7 @@ describe('VaultOps', () => { expect( (await vaultOps.getSecret(vault, '.hidingSecret')).toString(), ).toStrictEqual('change_contents'); - await expect( + expect( ( await vaultOps.getSecret(vault, '.hidingDir/.hiddenInSecret') ).toString(),