diff --git a/.circleci/config.yml b/.circleci/config.yml index a45f7132e4c..14ee8b04356 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -224,7 +224,7 @@ jobs: test-contracts: docker: - - image: circleci/node:10.16.3 + - image: circleci/node:16 - image: trufflesuite/ganache-cli:latest command: ['--port=8555', '-a', '100', '-l', '8000000'] steps: @@ -265,7 +265,7 @@ jobs: # uses medium (2vcpu/4gb) by default resource_class: medium docker: - - image: circleci/node:14.17.3 + - image: circleci/node:16 - image: trufflesuite/ganache-cli:latest command: ['--port=8546', '-a', '50', '-l', '8000000'] steps: @@ -340,8 +340,9 @@ jobs: export redisPort=6379 export spOwnerWallet='yes' export isCIBuild=true - npm run test:unit - npm run test:ci + npm run test:coverage:ci + - coveralls/upload: + path_to_lcov: ./creator-node/coverage/lcov.info test-discovery-provider: docker: @@ -636,6 +637,9 @@ jobs: parameters: repo: type: string + logspout-tag: + type: string + default: "" steps: - checkout - setup_remote_docker: @@ -644,10 +648,11 @@ jobs: name: Get tag or latest command: | # order of precendence for IMAGE tag is: - # 1. CIRCLE_TAG if defined, - # 2. Branch name if 'hotfix' in branch name (branch name passed in via $CIRCLE_BRANCH) - # 3. 'latest', which is the default - echo "export IMAGE_TAG=`[ $CIRCLE_TAG ] && echo $(echo $CIRCLE_TAG | cut -d@ -f3) || [[ "$CIRCLE_BRANCH" =~ (hotfix) ]] && echo $CIRCLE_BRANCH || echo "latest" `" | tee -a $BASH_ENV + # 1. $(head -n1 logging/logspout/Dockerfile) if << parameters.logspout-tag >> defined, + # 2. CIRCLE_TAG if defined, + # 3. Branch name if 'hotfix' in branch name (branch name passed in via $CIRCLE_BRANCH) + # 4. 'latest', which is the default + echo "export IMAGE_TAG=`[ << parameters.logspout-tag >> ] && echo $(head -n1 logspout/Dockerfile | cut -f 2 -d ':') || [ $CIRCLE_TAG ] && echo $(echo $CIRCLE_TAG | cut -d@ -f3) || [[ "$CIRCLE_BRANCH" =~ (hotfix) ]] && echo $CIRCLE_BRANCH || echo "latest" `" | tee -a $BASH_ENV - run: name: Docker login command: | @@ -656,7 +661,8 @@ jobs: name: Docker build << parameters.repo >> command: | cd << parameters.repo >> - docker build -t audius/<< parameters.repo >>:$IMAGE_TAG -t audius/<< parameters.repo>>:$(git rev-parse HEAD) --build-arg git_sha=$(git rev-parse HEAD) --build-arg audius_loggly_disable=$audius_loggly_disable --build-arg audius_loggly_token=$audius_loggly_token --build-arg audius_loggly_tags=$audius_loggly_tags --build-arg BUILD_NUM=$CIRCLE_BUILD_NUM . + audius_loggly_token_b64=$(echo ${audius_loggly_token} | base64) + docker build -t audius/<< parameters.repo >>:$IMAGE_TAG -t audius/<< parameters.repo>>:$(git rev-parse HEAD) --build-arg git_sha=$(git rev-parse HEAD) --build-arg audius_loggly_disable=$audius_loggly_disable --build-arg audius_loggly_token=$audius_loggly_token --build-arg audius_loggly_tags=$audius_loggly_tags --build-arg BUILD_NUM=$CIRCLE_BUILD_NUM --build-arg audius_loggly_token=$audius_loggly_token_b64 . docker image prune --filter label=prune=true --filter label=build=$CIRCLE_BUILD_NUM --force - run: name: Docker push << parameters.repo >> @@ -732,6 +738,15 @@ workflows: name: build-identity-service repo: identity-service + - hold-build-logspout: + type: approval + - docker-build-and-push: + name: build-logspout + repo: logspout + logspout-tag: "true" + requires: + - hold-build-logspout + - test-solana-programs: name: test-solana-programs - test-solana-programs-anchor: diff --git a/contracts/scripts/call-function.sh b/contracts/scripts/call-function.sh index 1fd76ab146f..b86be23c12d 100755 --- a/contracts/scripts/call-function.sh +++ b/contracts/scripts/call-function.sh @@ -5,6 +5,6 @@ set -e # Usage: call-function.sh contractName functionName -node_modules/.bin/truffle console << EOF +node_modules/.bin/truffle console < instance.$2.call()).then((value) => console.log("Function returned: " + value)) EOF diff --git a/contracts/scripts/circleci-test.sh b/contracts/scripts/circleci-test.sh index 631f2f428bb..81d15ef0514 100755 --- a/contracts/scripts/circleci-test.sh +++ b/contracts/scripts/circleci-test.sh @@ -13,16 +13,12 @@ sh ./scripts/lint.sh printf '\nSTART Truffle tests:\n\n' -if [ $# -eq 0 ] - then - node_modules/.bin/truffle test --network test_local -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ] - then - node_modules/.bin/truffle test --verbose-rpc --network test_local -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ] - then - node_modules/.bin/truffle test --verbose-rpc $2 --network test_local +if [ $# -eq 0 ]; then + node_modules/.bin/truffle test --network test_local +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ]; then + node_modules/.bin/truffle test --verbose-rpc --network test_local +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ]; then + node_modules/.bin/truffle test --verbose-rpc $2 --network test_local else node_modules/.bin/truffle test $1 --network test_local fi - diff --git a/contracts/scripts/lint.sh b/contracts/scripts/lint.sh index 64255c8f517..35fb4e8f530 100755 --- a/contracts/scripts/lint.sh +++ b/contracts/scripts/lint.sh @@ -17,5 +17,3 @@ printf 'START Solium lint' npm run solidity-lint printf '\nEND Solium lint' printline - - diff --git a/contracts/scripts/truffle-test.sh b/contracts/scripts/truffle-test.sh index c37427b0a73..3470b19ef15 100755 --- a/contracts/scripts/truffle-test.sh +++ b/contracts/scripts/truffle-test.sh @@ -12,7 +12,7 @@ COMMENT # start ganache container, # kill old container instance if tear down did not complete -if docker ps | grep 'audius_ganache_cli_test' > /dev/null; then +if docker ps | grep 'audius_ganache_cli_test' >/dev/null; then # killing the container seems to be faster than restarting printf 'Remove old containers and build artifacts\n' docker rm -f audius_ganache_cli_test @@ -31,15 +31,12 @@ docker run --name audius_ganache_cli_test -d -p 8555:8545 trufflesuite/ganache-c ./scripts/lint.sh # run truffle tests -if [ $# -eq 0 ] - then - node_modules/.bin/truffle test --network=test_local -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ] - then - node_modules/.bin/truffle test --network=test_local --verbose-rpc -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ] - then - node_modules/.bin/truffle test --network=test_local --verbose-rpc $2 +if [ $# -eq 0 ]; then + node_modules/.bin/truffle test --network=test_local +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ]; then + node_modules/.bin/truffle test --network=test_local --verbose-rpc +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ]; then + node_modules/.bin/truffle test --network=test_local --verbose-rpc $2 else node_modules/.bin/truffle test --network=test_local $1 fi diff --git a/creator-node/.version.json b/creator-node/.version.json index 86ce5792cc3..868ba84ccf6 100644 --- a/creator-node/.version.json +++ b/creator-node/.version.json @@ -1,4 +1,4 @@ { - "version": "0.3.58", + "version": "0.3.59", "service": "content-node" } \ No newline at end of file diff --git a/creator-node/default-config.json b/creator-node/default-config.json index b542b3acd5f..c08f750f21d 100644 --- a/creator-node/default-config.json +++ b/creator-node/default-config.json @@ -31,7 +31,7 @@ "ethWallets": "", "spOwnerWalletIndex": 1, "spOwnerWallet": "", - "discoveryProviderWhitelist": "http://dn1_web-server_1:5000", + "discoveryProviderWhitelist": "", "identityService": "http://audius-identity-service_identity-service_1:7000", "dataRegistryAddress": "", "dataProviderUrl": "http://docker.for.mac.localhost:8545", diff --git a/creator-node/package-lock.json b/creator-node/package-lock.json index e2f781a8d63..89fb6ce0825 100644 --- a/creator-node/package-lock.json +++ b/creator-node/package-lock.json @@ -5023,6 +5023,19 @@ "vary": "^1" } }, + "coveralls": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/coveralls/-/coveralls-3.1.1.tgz", + "integrity": "sha512-+dxnG2NHncSD1NrqbSM3dn/lE57O6Qf/koe9+I7c+wzkqRmEvcp0kgJdxKInzYzkICKkFMZsX3Vct3++tsF9ww==", + "dev": true, + "requires": { + "js-yaml": "^3.13.1", + "lcov-parse": "^1.0.0", + "log-driver": "^1.2.7", + "minimist": "^1.2.5", + "request": "^2.88.2" + } + }, "crc-32": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", @@ -9472,6 +9485,12 @@ "package-json": "^4.0.0" } }, + "lcov-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lcov-parse/-/lcov-parse-1.0.0.tgz", + "integrity": "sha1-6w1GtUER68VhrLTECO+TY73I9+A=", + "dev": true + }, "levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -9581,6 +9600,12 @@ "integrity": "sha1-WjUNoLERO4N+z//VgSy+WNbq4ZM=", "dev": true }, + "log-driver": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/log-driver/-/log-driver-1.2.7.tgz", + "integrity": "sha512-U7KCmLdqsGHBLeWqYlFA0V0Sl6P08EE1ZrmA9cxjUE0WVqT9qnyVDPz1kzpFEP0jdJuFnasWIfSd7fsaNXkpbg==", + "dev": true + }, "lolex": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/lolex/-/lolex-4.2.0.tgz", diff --git a/creator-node/package.json b/creator-node/package.json index 31779a29cbe..c463ade71ea 100644 --- a/creator-node/package.json +++ b/creator-node/package.json @@ -10,8 +10,8 @@ "test:local:teardown": "./scripts/run-tests.sh standalone_creator teardown", "test:teardown": "./scripts/run-tests.sh teardown", "test:unit": "./scripts/run-tests.sh unit_test", - "coverage": "nyc npm run test", - "report": "nyc report --reporter=html", + "test:coverage": "nyc --reporter=lcov --reporter=text npm run test", + "test:coverage:ci": "nyc --reporter=lcov npm run test:ci && nyc report --reporter=text-lcov | coveralls", "lint:fix": "eslint --fix --ext=js,ts src", "lint": "eslint --ext=js,ts src" }, @@ -75,6 +75,7 @@ "@typescript-eslint/parser": "^5.5.0", "chai": "4.3.6", "chai-as-promised": "^7.1.1", + "coveralls": "3.1.1", "eslint": "^7.32.0", "eslint-config-standard": "^16.0.3", "eslint-plugin-import": "^2.25.3", @@ -84,7 +85,7 @@ "mocha": "^5.2.0", "nock": "^13.1.0", "nodemon": "^1.19.4", - "nyc": "^15.0.0", + "nyc": "15.1.0", "prettier": "^2.5.1", "proxyquire": "^2.1.3", "sequelize-cli": "^5.3.0", diff --git a/creator-node/scripts/start.sh b/creator-node/scripts/start.sh index 7f480c2573a..e8a3185c7dd 100644 --- a/creator-node/scripts/start.sh +++ b/creator-node/scripts/start.sh @@ -7,13 +7,18 @@ if [[ "$WAIT_HOSTS" != "" ]]; then /usr/bin/wait fi -if [[ -z "$logglyDisable" ]]; then - if [[ -n "$logglyToken" ]]; then - logglyTags=$(echo $logglyTags | python3 -c "print(' '.join(f'tag=\\\\\"{i}\\\\\"' for i in input().split(',')))") - mkdir -p /var/spool/rsyslog - mkdir -p /etc/rsyslog.d - sed -i '1s|^|$MaxMessageSize 64k\n|' /etc/rsyslog.conf - cat >/etc/rsyslog.d/22-loggly.conf </etc/rsyslog.d/22-loggly.conf < { next() } catch (error) { - console.error('HandleResponse', error) + genericLogger.error('HandleResponse', error) next(error) } } diff --git a/creator-node/src/blacklistManager.js b/creator-node/src/blacklistManager.js index 22bfbafd48f..4044442b74e 100644 --- a/creator-node/src/blacklistManager.js +++ b/creator-node/src/blacklistManager.js @@ -719,12 +719,12 @@ class BlacklistManager { } }) stream.on('end', function () { - console.log( + logger.info( `Done deleting ${REDIS_MAP_BLACKLIST_SEGMENTCID_TO_TRACKID_KEY} entries` ) }) stream.on('error', function (e) { - console.error( + logger.error( `Could not delete ${REDIS_MAP_BLACKLIST_SEGMENTCID_TO_TRACKID_KEY} entries: ${e.toString()}` ) }) diff --git a/creator-node/src/redis.js b/creator-node/src/redis.js index 752c47323be..516bf1294a9 100644 --- a/creator-node/src/redis.js +++ b/creator-node/src/redis.js @@ -1,4 +1,5 @@ const config = require('./config.js') +const { logger } = require('./logging') const Redis = require('ioredis') const redisClient = new Redis(config.get('redisPort'), config.get('redisHost')) @@ -6,24 +7,24 @@ const redisClient = new Redis(config.get('redisPort'), config.get('redisHost')) const EXPIRATION = 60 * 60 * 2 // 2 hours in seconds class RedisLock { static async setLock(key, expiration = EXPIRATION) { - console.log(`SETTING LOCK ${key}`) + logger.info(`SETTING LOCK ${key}`) // set allows you to set an optional expire param return redisClient.set(key, true, 'EX', expiration) } static async getLock(key) { - console.log(`GETTING LOCK ${key}`) + logger.info(`GETTING LOCK ${key}`) return redisClient.get(key) } static async acquireLock(key, expiration = EXPIRATION) { - console.log(`SETTING LOCK IF NOT EXISTS ${key}`) + logger.info(`SETTING LOCK IF NOT EXISTS ${key}`) const response = await redisClient.set(key, true, 'NX', 'EX', expiration) return !!response } static async removeLock(key) { - console.log(`DELETING LOCK ${key}`) + logger.info(`DELETING LOCK ${key}`) return redisClient.del(key) } } diff --git a/creator-node/src/reqLimiter.js b/creator-node/src/reqLimiter.js index 4ffaddc0847..78c8383d7e3 100644 --- a/creator-node/src/reqLimiter.js +++ b/creator-node/src/reqLimiter.js @@ -1,6 +1,7 @@ const express = require('express') const rateLimit = require('express-rate-limit') const config = require('./config.js') +const { logger } = require('./logging') const RedisStore = require('rate-limit-redis') const client = require('./redis.js') const { verifyRequesterIsValidSP } = require('./apiSigning.js') @@ -9,7 +10,7 @@ let endpointRateLimits = {} try { endpointRateLimits = JSON.parse(config.get('endpointRateLimits')) } catch (e) { - console.error('Failed to parse endpointRateLimits!') + logger.error('Failed to parse endpointRateLimits!') } // Key generator for rate limiter that rate limits based on unique IP diff --git a/creator-node/src/routes/files.js b/creator-node/src/routes/files.js index dccf66f51a7..d6a10730931 100644 --- a/creator-node/src/routes/files.js +++ b/creator-node/src/routes/files.js @@ -133,7 +133,7 @@ const logGetCIDDecisionTree = (decisionTree, req) => { try { req.logger.info(`[getCID] Decision Tree: ${JSON.stringify(decisionTree)}`) } catch (e) { - console.error(`[getCID] Decision Tree - Failed to print: ${e.message}`) + req.logger.error(`[getCID] Decision Tree - Failed to print: ${e.message}`) } } diff --git a/creator-node/src/services/TrustedNotifierManager.js b/creator-node/src/services/TrustedNotifierManager.js index ca1787e6e94..deec1ba8c43 100644 --- a/creator-node/src/services/TrustedNotifierManager.js +++ b/creator-node/src/services/TrustedNotifierManager.js @@ -64,7 +64,7 @@ class TrustedNotifierManager { this.trustedNotifierData = this.trustedNotifierChainData } catch (e) { - console.error(e) + this.logError(`Failed to initialize: ${e}`) } } diff --git a/creator-node/src/services/stateMachineManager/stateMachineConstants.js b/creator-node/src/services/stateMachineManager/stateMachineConstants.js index 234042fd42e..56fa3d3cd49 100644 --- a/creator-node/src/services/stateMachineManager/stateMachineConstants.js +++ b/creator-node/src/services/stateMachineManager/stateMachineConstants.js @@ -1,12 +1,12 @@ module.exports = { // Max number of completed/failed jobs to keep in redis QUEUE_HISTORY: 500, - // Name of StateMonitoringQueue - STATE_MONITORING_QUEUE_NAME: 'state-monitoring-queue', // Max millis to run a StateMonitoringQueue job for before marking it as stalled (1 hour) STATE_MONITORING_QUEUE_MAX_JOB_RUNTIME_MS: 1000 * 60 * 60, // Millis to delay starting the first job in the StateMonitoringQueue (30 seconds) STATE_MONITORING_QUEUE_INIT_DELAY_MS: 1000 * 30, + // Max millis to run a StateReconciliationQueue job for before marking it as stalled (1 hour) + STATE_RECONCILIATION_QUEUE_MAX_JOB_RUNTIME_MS: 1000 * 60 * 60, // Millis to timeout request for getting users who have a node as their primary/secondary (60 seconds) GET_NODE_USERS_TIMEOUT_MS: 1000 * 60, // Millis to forcibly cancel getNodeUsers request if axios timeout doesn't work (70 seconds) @@ -21,6 +21,22 @@ module.exports = { MAX_USER_BATCH_CLOCK_FETCH_RETRIES: 5, // Number of users to process in each batch when calculating reconfigs and syncs AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS_BATCH_SIZE: 500, + QUEUE_NAMES: Object.freeze({ + // Name of StateMonitoringQueue + STATE_MONITORING: 'state-monitoring-queue', + // Name of StateReconciliationQueue + STATE_RECONCILIATION: 'state-reconciliation-queue' + }), + JOB_NAMES: Object.freeze({ + // Name of job in reconciliation queue that handles executing a manual sync on this node + HANDLE_MANUAL_SYNC_REQUEST: 'handle-manual-sync-request', + // Name of job in reconciliation queue that handles executing a recurring sync on this node + HANDLE_RECURRING_SYNC_REQUEST: 'handle-recurring-sync-request', + // Name of job in reconciliation queue that issues an outgoing sync request to this node or to another node + ISSUE_SYNC_REQUEST: 'issue-sync-request', + // Name of job in reconciliation queue that executes a reconfiguration of a user's replica set when it's unhealthy + UPDATE_REPLICA_SET: 'update-replica-set' + }), // Modes used in issuing a reconfig. Each successive mode is a superset of the mode prior. // The `key` of the reconfig states is used to identify the current reconfig mode. // The `value` of the reconfig states is used in the superset logic of determining which type of diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js b/creator-node/src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js index c70c136a447..267a30ebfc4 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js @@ -4,13 +4,13 @@ const _ = require('lodash') const config = require('../../../config') const { QUEUE_HISTORY, - STATE_MONITORING_QUEUE_NAME, + QUEUE_NAMES, STATE_MONITORING_QUEUE_MAX_JOB_RUNTIME_MS, STATE_MONITORING_QUEUE_INIT_DELAY_MS } = require('../stateMachineConstants') const { logger } = require('../../../logging') const { getLatestUserIdFromDiscovery } = require('./stateMonitoringUtils') -const processStateMonitoringJob = require('./processStateMonitoringJob') +const processStateMonitoringJob = require('./monitorState.jobProcessor') /** * Handles setup and lifecycle management (adding and processing jobs) @@ -23,12 +23,12 @@ class StateMonitoringQueue { config.get('redisHost'), config.get('redisPort') ) - this.registerQueueEventHandlers({ + this.registerQueueEventHandlersAndJobProcessor({ queue: this.queue, jobSuccessCallback: this.enqueueJobAfterSuccess, - jobFailureCallback: this.enqueueJobAfterFailure + jobFailureCallback: this.enqueueJobAfterFailure, + processJob: this.processJob.bind(this) }) - this.registerQueueJobProcessor(this.queue) await this.startQueue( this.queue, @@ -55,7 +55,7 @@ class StateMonitoringQueue { makeQueue(redisHost, redisPort) { // Settings config from https://github.com/OptimalBits/bull/blob/develop/REFERENCE.md#advanced-settings - return new BullQueue(STATE_MONITORING_QUEUE_NAME, { + return new BullQueue(QUEUE_NAMES.STATE_MONITORING, { redis: { host: redisHost, port: redisPort @@ -83,11 +83,13 @@ class StateMonitoringQueue { * @param {Object} params.queue the queue to register events for * @param {Function} params.jobSuccessCallback the function to call when a job succeeds * @param {Function} params.jobFailureCallback the function to call when a job fails + * @param {Function} params.processJob the function to call when processing a job from the queue */ - registerQueueEventHandlers({ + registerQueueEventHandlersAndJobProcessor({ queue, jobSuccessCallback, - jobFailureCallback + jobFailureCallback, + processJob }) { // Add handlers for logging queue.on('global:waiting', (jobId) => { @@ -127,6 +129,9 @@ class StateMonitoringQueue { ) jobFailureCallback(queue, job) }) + + // Register the logic that gets executed to process each new job from the queue + queue.process(1 /** concurrency */, processJob) } /** @@ -173,55 +178,42 @@ class StateMonitoringQueue { }) } - /** - * Registers the logic that gets executed to process each new job from the queue. - * @param {Object} queue the StateMonitoringQueue to consume jobs from - */ - registerQueueJobProcessor(queue) { - // Initialize queue job processor (aka consumer) - queue.process(1 /** concurrency */, async (job) => { - const { - id: jobId, - data: { - lastProcessedUserId, - discoveryNodeEndpoint, - moduloBase, - currentModuloSlice - } - } = job - - try { - this.log(`New job details: jobId=${jobId}, job=${JSON.stringify(job)}`) - } catch (e) { - this.logError(`Failed to log details for jobId=${jobId}: ${e}`) + async processJob(job) { + const { + id: jobId, + data: { + lastProcessedUserId, + discoveryNodeEndpoint, + moduloBase, + currentModuloSlice } + } = job + this.log(`New job details: jobId=${jobId}, job=${JSON.stringify(job)}`) - // Default results of this job will be passed to the next job, so default to failure - let result = { + // Default results of this job will be passed to the next job, so default to failure + let result = { + lastProcessedUserId, + jobFailed: true, + moduloBase, + currentModuloSlice + } + try { + // TODO: Wire up metrics + // await redis.set('stateMachineQueueLatestJobStart', Date.now()) + result = await processStateMonitoringJob( + jobId, lastProcessedUserId, - jobFailed: true, + discoveryNodeEndpoint, moduloBase, currentModuloSlice - } - try { - // TODO: Wire up metrics - // await redis.set('stateMachineQueueLatestJobStart', Date.now()) - result = await processStateMonitoringJob( - jobId, - lastProcessedUserId, - discoveryNodeEndpoint, - moduloBase, - currentModuloSlice - ) - // TODO: Wire up metrics - // await redis.set('stateMachineQueueLatestJobSuccess', Date.now()) - } catch (e) { - this.logError(`Error processing jobId ${jobId}: ${e}`) - console.log(e.stack) - } + ) + // TODO: Wire up metrics + // await redis.set('stateMachineQueueLatestJobSuccess', Date.now()) + } catch (e) { + this.logError(`Error processing jobId ${jobId}: ${e}`) + } - return result - }) + return result } /** diff --git a/creator-node/src/services/stateMachineManager/stateMonitoring/processStateMonitoringJob.js b/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js similarity index 82% rename from creator-node/src/services/stateMachineManager/stateMonitoring/processStateMonitoringJob.js rename to creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js index 30c3ca4bbdf..7a646f05587 100644 --- a/creator-node/src/services/stateMachineManager/stateMonitoring/processStateMonitoringJob.js +++ b/creator-node/src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js @@ -1,12 +1,10 @@ const config = require('../../../config') const { logger } = require('../../../logging') const NodeHealthManager = require('../CNodeHealthManager') -const NodeToSpIdManager = require('../CNodeToSpIdMapManager') const { getNodeUsers, buildReplicaSetNodesToUserWalletsMap, - computeUserSecondarySyncSuccessRatesMap, - aggregateReconfigAndPotentialSyncOps + computeUserSecondarySyncSuccessRatesMap } = require('./stateMonitoringUtils') const { retrieveClockStatusesForUsersAcrossReplicaSet @@ -18,21 +16,29 @@ const THIS_CNODE_ENDPOINT = config.get('creatorNodeEndpoint') /** * Processes a job to monitor the current state of `USERS_PER_JOB` users. - * Returns the syncs and replica set updates that are required for these users. + * Returns state data for the slice of users processed and the Content Nodes affiliated with them. * @param {number} jobId the id of the job being run * @param {number} lastProcessedUserId the highest ID of the user that was most recently processed * @param {string} discoveryNodeEndpoint the IP address / URL of a Discovery Node to make requests to * @param {number} moduloBase (DEPRECATED) * @param {number} currentModuloSlice (DEPRECATED) - * @return {Object} { lastProcessedUserId (number), jobFailed (boolean) } + * @return {Object} { + * lastProcessedUserId (number), + * jobFailed (boolean), + * users (array of objects), + * unhealthyPeers (set of content node endpoint strings), + * secondarySyncMetrics (object), + * replicaSetNodesToUserClockStatusesMap (object), + * userSecondarySyncMetricsMap (object) + * } */ -const processStateMonitoringJob = async ( +module.exports = async function ( jobId, lastProcessedUserId, discoveryNodeEndpoint, moduloBase, // TODO: Remove. https://linear.app/audius/issue/CON-146/clean-up-modulo-slicing-after-all-dns-update-to-support-pagination currentModuloSlice // TODO: Remove. https://linear.app/audius/issue/CON-146/clean-up-modulo-slicing-after-all-dns-update-to-support-pagination -) => { +) { // Record all stages of this function along with associated information for use in logging const decisionTree = [] _addToDecisionTree(decisionTree, jobId, 'BEGIN processStateMonitoringJob', { @@ -45,12 +51,15 @@ const processStateMonitoringJob = async ( }) let jobFailed = false - let nodeUsers = [] + let users = [] + let unhealthyPeers = new Set() + let replicaSetNodesToUserClockStatusesMap = {} + let userSecondarySyncMetricsMap = {} // New DN versions support pagination, so we fall back to modulo slicing for old versions // TODO: Remove modulo supports once all DNs update to include https://github.com/AudiusProject/audius-protocol/pull/3071 try { try { - nodeUsers = await getNodeUsers( + users = await getNodeUsers( discoveryNodeEndpoint, THIS_CNODE_ENDPOINT, lastProcessedUserId, @@ -60,19 +69,19 @@ const processStateMonitoringJob = async ( // Backwards compatibility -- DN will return all users if it doesn't have pagination. // In that case, we have to manually paginate the full set of users // TODO: Remove. https://linear.app/audius/issue/CON-146/clean-up-modulo-slicing-after-all-dns-update-to-support-pagination - if (nodeUsers.length > USERS_PER_JOB) { - nodeUsers = sliceUsers(nodeUsers, moduloBase, currentModuloSlice) + if (users.length > USERS_PER_JOB) { + users = sliceUsers(users, moduloBase, currentModuloSlice) } _addToDecisionTree( decisionTree, jobId, 'getNodeUsers and sliceUsers Success', - { nodeUsersLength: nodeUsers?.length } + { usersLength: users?.length } ) } catch (e) { // Make the next job try again instead of looping back to userId 0 - nodeUsers = [{ user_id: lastProcessedUserId }] + users = [{ user_id: lastProcessedUserId }] _addToDecisionTree( decisionTree, @@ -85,9 +94,8 @@ const processStateMonitoringJob = async ( ) } - let unhealthyPeers try { - unhealthyPeers = await NodeHealthManager.getUnhealthyPeers(nodeUsers) + unhealthyPeers = await NodeHealthManager.getUnhealthyPeers(users) _addToDecisionTree(decisionTree, jobId, 'getUnhealthyPeers Success', { unhealthyPeerSetLength: unhealthyPeers?.size, unhealthyPeers: Array.from(unhealthyPeers) @@ -106,7 +114,7 @@ const processStateMonitoringJob = async ( // Build map of const replicaSetNodesToUserWalletsMap = - buildReplicaSetNodesToUserWalletsMap(nodeUsers) + buildReplicaSetNodesToUserWalletsMap(users) _addToDecisionTree( decisionTree, jobId, @@ -117,7 +125,6 @@ const processStateMonitoringJob = async ( ) // Retrieve clock statuses for all users and their current replica sets - let replicaSetNodesToUserClockStatusesMap try { // Set mapping of replica endpoint to (mapping of wallet to clock value) const clockStatusResp = @@ -151,10 +158,9 @@ const processStateMonitoringJob = async ( } // Retrieve success metrics for all users syncing to their secondaries - let userSecondarySyncMetricsMap = {} try { userSecondarySyncMetricsMap = - await computeUserSecondarySyncSuccessRatesMap(nodeUsers) + await computeUserSecondarySyncSuccessRatesMap(users) _addToDecisionTree( decisionTree, jobId, @@ -176,25 +182,6 @@ const processStateMonitoringJob = async ( 'processStateMonitoringJob computeUserSecondarySyncSuccessRatesMap Error' ) } - - // Find sync requests that need to be issued and ReplicaSets that need to be updated - const { requiredUpdateReplicaSetOps, potentialSyncRequests } = - await aggregateReconfigAndPotentialSyncOps( - nodeUsers, - unhealthyPeers, - userSecondarySyncMetricsMap, - NodeToSpIdManager.getCNodeEndpointToSpIdMap(), - THIS_CNODE_ENDPOINT - ) - _addToDecisionTree( - decisionTree, - jobId, - 'Build requiredUpdateReplicaSetOps and potentialSyncRequests arrays', - { - requiredUpdateReplicaSetOpsLength: requiredUpdateReplicaSetOps?.length, - potentialSyncRequestsLength: potentialSyncRequests?.length - } - ) } catch (e) { logger.info(`processStateMonitoringJob ERROR: ${e.toString()}`) jobFailed = true @@ -206,12 +193,16 @@ const processStateMonitoringJob = async ( } // The next job should start processing where this one ended or loop back around to the first user - const lastProcessedUser = nodeUsers[nodeUsers.length - 1] || { + const lastProcessedUser = users[users.length - 1] || { user_id: 0 } return { lastProcessedUserId: lastProcessedUser?.user_id || 0, - jobFailed + jobFailed, + users, + unhealthyPeers, + replicaSetNodesToUserClockStatusesMap, + userSecondarySyncMetricsMap } } @@ -275,5 +266,3 @@ const sliceUsers = (nodeUsers, moduloBase, currentModuloSlice) => { (nodeUser) => nodeUser.user_id % moduloBase === currentModuloSlice ) } - -module.exports = processStateMonitoringJob diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/StateReconciliationQueue.js b/creator-node/src/services/stateMachineManager/stateReconciliation/StateReconciliationQueue.js index 31ac887cddf..e92bfc485b6 100644 --- a/creator-node/src/services/stateMachineManager/stateReconciliation/StateReconciliationQueue.js +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/StateReconciliationQueue.js @@ -1,5 +1,270 @@ +const BullQueue = require('bull') +const _ = require('lodash') + +const config = require('../../../config') +const { + QUEUE_HISTORY, + QUEUE_NAMES, + JOB_NAMES, + STATE_RECONCILIATION_QUEUE_MAX_JOB_RUNTIME_MS, + SyncType +} = require('../stateMachineConstants') +const { logger } = require('../../../logging') +const issueSyncRequestJobProcessor = require('./issueSyncRequest.jobProcessor') +const handleSyncRequestJobProcessor = require('./handleSyncRequest.jobProcessor') +const updateReplicaSetJobProcessor = require('./updateReplicaSet.jobProcessor') + /** * Handles setup and lifecycle management (adding and processing jobs) - * of the queue that issues syncs and replica set updates. + * of the queue with jobs for: + * - issuing sync requests to nodes (this can be other nodes or this node) + * - executing syncs from these requests + * - updating user's replica sets when one or more nodes in their replica set becomes unhealthy */ -class StateReconciliationQueue {} +class StateReconciliationQueue { + async init() { + this.queue = this.makeQueue( + config.get('redisHost'), + config.get('redisPort') + ) + this.registerQueueEventHandlersAndJobProcessor({ + queue: this.queue, + processManualSync: this.processManualSyncJob.bind(this), + processRecurringSync: this.processRecurringSyncJob.bind(this), + processIssueSyncRequests: this.processIssueSyncRequestsJob.bind(this), + processUpdateReplicaSets: this.processUpdateReplicaSetsJob.bind(this) + }) + + // Clear any old state if redis was running but the rest of the server restarted + await this.queue.obliterate({ force: true }) + } + + logDebug(msg) { + logger.debug(`StateReconciliationQueue DEBUG: ${msg}`) + } + + log(msg) { + logger.info(`StateReconciliationQueue: ${msg}`) + } + + logWarn(msg) { + logger.warn(`StateReconciliationQueue WARNING: ${msg}`) + } + + logError(msg) { + logger.error(`StateReconciliationQueue ERROR: ${msg}`) + } + + makeQueue(redisHost, redisPort) { + // Settings config from https://github.com/OptimalBits/bull/blob/develop/REFERENCE.md#advanced-settings + return new BullQueue(QUEUE_NAMES.STATE_RECONCILIATION, { + redis: { + host: redisHost, + port: redisPort + }, + defaultJobOptions: { + removeOnComplete: QUEUE_HISTORY, + removeOnFail: QUEUE_HISTORY + }, + settings: { + // Should be sufficiently larger than expected job runtime + lockDuration: STATE_RECONCILIATION_QUEUE_MAX_JOB_RUNTIME_MS, + // We never want to re-process stalled jobs + maxStalledCount: 0 + } + }) + } + + /** + * Registers event handlers for logging and job success/failure. + * @param {Object} params.queue the queue to register events for + * @param {Function} params.jobSuccessCallback the function to call when a job succeeds + * @param {Function} params.jobFailureCallback the function to call when a job fails + * @param {Function} params.processManualSync the function to call when processing a manual sync job from the queue + * @param {Function} params.processRecurringSync the function to call when processing a recurring sync job from the queue + * @param {Function} params.processIssueSyncRequests the function to call when processing an issue-sync-requests job from the queue + * @param {Function} params.processUpdateReplicaSet the function to call when processing an update-replica-set job from the queue + */ + registerQueueEventHandlersAndJobProcessor({ + queue, + processManualSync, + processRecurringSync, + processIssueSyncRequests, + processUpdateReplicaSets + }) { + // Add handlers for logging + queue.on('global:waiting', (jobId) => { + this.log(`Queue Job Waiting - ID ${jobId}`) + }) + queue.on('global:active', (jobId, jobPromise) => { + this.log(`Queue Job Active - ID ${jobId}`) + }) + queue.on('global:lock-extension-failed', (jobId, err) => { + this.logError( + `Queue Job Lock Extension Failed - ID ${jobId} - Error ${err}` + ) + }) + queue.on('global:stalled', (jobId) => { + this.logError(`stateMachineQueue Job Stalled - ID ${jobId}`) + }) + queue.on('global:error', (error) => { + this.logError(`Queue Job Error - ${error}`) + }) + + // Add handlers for when a job fails to complete (or completes with an error) or successfully completes + queue.on('completed', (job, result) => { + this.log( + `Queue Job Completed - ID ${job?.id} - Result ${JSON.stringify( + result + )}. Queuing another job...` + ) + }) + queue.on('failed', (job, err) => { + this.logError(`Queue Job Failed - ID ${job?.id} - Error ${err}`) + }) + + // Register the logic that gets executed to process each new job from the queue + queue.process( + JOB_NAMES.HANDLE_MANUAL_SYNC_REQUEST, + config.get('maxManualRequestSyncJobConcurrency'), + processManualSync + ) + queue.process( + JOB_NAMES.HANDLE_RECURRING_SYNC_REQUEST, + config.get('maxRecurringRequestSyncJobConcurrency'), + processRecurringSync + ) + queue.process( + JOB_NAMES.ISSUE_SYNC_REQUEST, + 1 /** concurrency */, + processIssueSyncRequests + ) + queue.process( + JOB_NAMES.UPDATE_REPLICA_SET, + 1 /** concurrency */, + processUpdateReplicaSets + ) + } + + async processManualSyncJob(job) { + const { id: jobId, data: syncRequestParameters } = job + + this.log( + `New manual sync job details: jobId=${jobId}, job=${JSON.stringify(job)}` + ) + + let result = {} + try { + result = await handleSyncRequestJobProcessor( + jobId, + SyncType.Manual, + syncRequestParameters + ) + } catch (error) { + this.logError(`Error processing manual sync jobId ${jobId}: ${error}`) + result = { error } + } + + return result + } + + async processRecurringSyncJob(job) { + const { id: jobId, data: syncRequestParameters } = job + + this.log( + `New recurring sync job details: jobId=${jobId}, job=${JSON.stringify( + job + )}` + ) + + let result = {} + try { + result = await handleSyncRequestJobProcessor( + jobId, + SyncType.Recurring, + syncRequestParameters + ) + } catch (error) { + this.logError(`Error processing recurring sync jobId ${jobId}: ${error}`) + result = { error } + } + + return result + } + + async processIssueSyncRequestsJob(job) { + const { + id: jobId, + data: { + users, + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserClockStatusesMap + } + } = job + + this.log( + `New ${ + JOB_NAMES.ISSUE_SYNC_REQUEST + } job details: jobId=${jobId}, job=${JSON.stringify(job)}` + ) + + let result = {} + try { + result = await issueSyncRequestJobProcessor( + jobId, + users, + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserClockStatusesMap + ) + } catch (error) { + this.logError( + `Error processing ${JOB_NAMES.ISSUE_SYNC_REQUEST} jobId ${jobId}: ${error}` + ) + result = { error } + } + + return result + } + + async processUpdateReplicaSetsJob(job) { + const { + id: jobId, + data: { + users, + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserWalletsMap, + replicaSetNodesToUserClockStatusesMap + } + } = job + + this.log( + `New ${ + JOB_NAMES.UPDATE_REPLICA_SET + } job details: jobId=${jobId}, job=${JSON.stringify(job)}` + ) + + let result = {} + try { + result = await updateReplicaSetJobProcessor( + jobId, + users, + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserWalletsMap, + replicaSetNodesToUserClockStatusesMap + ) + } catch (error) { + this.logError( + `Error processing ${JOB_NAMES.UPDATE_REPLICA_SET} jobId ${jobId}: ${error}` + ) + result = { error } + } + + return result + } +} + +module.exports = StateReconciliationQueue diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/handleSyncRequest.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateReconciliation/handleSyncRequest.jobProcessor.js new file mode 100644 index 00000000000..2f2c944c2df --- /dev/null +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/handleSyncRequest.jobProcessor.js @@ -0,0 +1,10 @@ +/** + * Processes a job to execute a manual or recurring sync (determined by syncType param). + * @param {number} jobId the id of the job being run + * @param {string} syncType the type of sync (manual or recurring) + * @param {Object} syncRequestParameters axios params to make the sync request. Shape: { baseURL, url, method, data } + */ +module.exports = async function (jobId, syncType, syncRequestParameters) { + // TODO: Copy from snapback's `processSyncOperation()` + return {} +} diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.js new file mode 100644 index 00000000000..5ae9ddfba5c --- /dev/null +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/issueSyncRequest.jobProcessor.js @@ -0,0 +1,37 @@ +/** + * Processes a job to issue a sync request for a user. + * Only issues request to a healthy secondary when this node is the user's primary and has a clock value + * greater than the secondary's clock value. + * + * @param {number} jobId the id of the job being run + * @param {Object[]} users users to find mismatched clock values for and, as necessary, issue requests to sync data from this node to the user's secondary/secondaries + * @param {Set} unhealthyPeers the set of unhealthy content nodes that one or more `users` has as their primary or secondary + * @param {Object} userSecondarySyncMetrics mapping of each secondary node to the success metrics the user has had syncing to it + * @param {Object} replicaSetNodesToUserClockStatusesMap map(replica set node => map(userWallet => clockValue)) + * @returns {Object} data about which sync requests were successfully enqueued and which failed + */ +module.exports = async function ( + jobId, + users, // TODO: This will change to be for a single user + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserClockStatusesMap +) { + // TODO: Extract the part of snapback's _aggregateOps() code that + // finds potentialSyncRequests *into the monitoring queue*, and then + // pass it here (update params here) so that this can copy snapback's `issueSyncRequestsToSecondaries()` + + /** + * TODO: Instead of how snapback currently has issueSyncRequestsToSecondaries() return: { + * syncRequestsRequired + * syncRequestsEnqueued, + * enqueueSyncRequestErrors + * } + * + * make this job return more informative data: { + * syncReqsSuccessfullyEnqueued + * syncReqsFailedToEnqueue + * } + */ + return {} +} diff --git a/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js new file mode 100644 index 00000000000..2a92f0b50e5 --- /dev/null +++ b/creator-node/src/services/stateMachineManager/stateReconciliation/updateReplicaSet.jobProcessor.js @@ -0,0 +1,23 @@ +/** + * Updates replica sets of a user who has one or more unhealthy nodes as their primary or secondaries. + * @param {number} jobId the id of the job being run + * @param {Object} users { primary, secondary1, secondary2, primarySpID, secondary1SpID, secondary2SpID, user_id, wallet } + * @param {Set} unhealthyPeers set of unhealthy peers + * @param {string (wallet): Object{ string (secondary endpoint): Object{ successRate: number (0-1), successCount: number, failureCount: number }}} userSecondarySyncMetricsMap mapping of user's wallet (string) to metrics for their sync success to secondaries + * @param {Object} replicaSetNodesToUserWalletsMap map of + * @param {Object} replicaSetNodesToUserClockStatusesMap map(replica set node => map(userWallet => clockValue)) + */ +module.exports = async function ( + jobId, + users, // TODO: This will be updated to only run for one user + unhealthyPeers, + userSecondarySyncMetricsMap, + replicaSetNodesToUserWalletsMap, + replicaSetNodesToUserClockStatusesMap +) { + // TODO: Move snapback's `_aggregateOps` (decouple from the sync part of this function), `autoSelectCreatorNodes`, `determineNewReplicaSet`, and `issueUpdateReplicaSetOp` steps + // into the *monitoring queue* and then make them output a single reconfig to execute here + + // TODO: Return data about updated replica set + return {} +} diff --git a/creator-node/src/snapbackSM/snapbackSM.js b/creator-node/src/snapbackSM/snapbackSM.js index dd7fae24986..8094dec31d5 100644 --- a/creator-node/src/snapbackSM/snapbackSM.js +++ b/creator-node/src/snapbackSM/snapbackSM.js @@ -202,14 +202,8 @@ class SnapbackSM { this.logError(`recurringSyncQueue Job Stalled - ID ${jobId}`) }) - // PeerSetManager instance to determine the peer set and its health state - this.peerSetManager = new PeerSetManager({ - discoveryProviderEndpoint: - audiusLibs.discoveryProvider.discoveryProviderEndpoint, - creatorNodeEndpoint: this.endpoint - }) - this.updateEnabledReconfigModesSet() + this.inittedJobProcessors = false } /** @@ -223,6 +217,13 @@ class SnapbackSM { await this.manualSyncQueue.empty() await this.recurringSyncQueue.empty() + // PeerSetManager instance to determine the peer set and its health state + this.peerSetManager = new PeerSetManager({ + discoveryProviderEndpoint: + this.audiusLibs.discoveryProvider.discoveryProviderEndpoint, + creatorNodeEndpoint: this.endpoint + }) + // SyncDeDuplicator ensure a sync for a (syncType, userWallet, secondaryEndpoint) tuple is only enqueued once this.syncDeDuplicator = new SyncDeDuplicator() @@ -230,59 +231,62 @@ class SnapbackSM { * Initialize all queue processors */ - // Initialize stateMachineQueue job processor (aka consumer) - this.stateMachineQueue.process(1 /** concurrency */, async (job) => { - this.log('StateMachineQueue: Consuming new job...') - const { id: jobId } = job - - try { - this.log( - `StateMachineQueue: New job details: jobId=${jobId}, job=${JSON.stringify( - job - )}` - ) - } catch (e) { - this.logError( - `StateMachineQueue: Failed to log details for jobId=${jobId}: ${e}` - ) - } - - try { - await redis.set('stateMachineQueueLatestJobStart', Date.now()) - await this.processStateMachineOperation(jobId) - await redis.set('stateMachineQueueLatestJobSuccess', Date.now()) - } catch (e) { - this.logError( - `StateMachineQueue: Processing error on jobId ${jobId}: ${e}` - ) - } - - return {} - }) + if (!this.inittedJobProcessors) { + // Initialize stateMachineQueue job processor (aka consumer) + this.stateMachineQueue.process(1 /** concurrency */, async (job) => { + this.log('StateMachineQueue: Consuming new job...') + const { id: jobId } = job - // Initialize manualSyncQueue job processor - this.manualSyncQueue.process( - this.MaxManualRequestSyncJobConcurrency, - async (job) => { try { - await this.processSyncOperation(job, SyncType.Manual) + this.log( + `StateMachineQueue: New job details: jobId=${jobId}, job=${JSON.stringify( + job + )}` + ) } catch (e) { - this.logError(`ManualSyncQueue processing error: ${e}`) + this.logError( + `StateMachineQueue: Failed to log details for jobId=${jobId}: ${e}` + ) } - } - ) - // Initialize recurringSyncQueue job processor - this.recurringSyncQueue.process( - this.MaxRecurringRequestSyncJobConcurrency, - async (job) => { try { - await this.processSyncOperation(job, SyncType.Recurring) + await redis.set('stateMachineQueueLatestJobStart', Date.now()) + await this.processStateMachineOperation(jobId) + await redis.set('stateMachineQueueLatestJobSuccess', Date.now()) } catch (e) { - this.logError(`RecurringSyncQueue processing error ${e}`) + this.logError( + `StateMachineQueue: Processing error on jobId ${jobId}: ${e}` + ) } - } - ) + + return {} + }) + + // Initialize manualSyncQueue job processor + this.manualSyncQueue.process( + this.MaxManualRequestSyncJobConcurrency, + async (job) => { + try { + await this.processSyncOperation(job, SyncType.Manual) + } catch (e) { + this.logError(`ManualSyncQueue processing error: ${e}`) + } + } + ) + + // Initialize recurringSyncQueue job processor + this.recurringSyncQueue.process( + this.MaxRecurringRequestSyncJobConcurrency, + async (job) => { + try { + await this.processSyncOperation(job, SyncType.Recurring) + } catch (e) { + this.logError(`RecurringSyncQueue processing error ${e}`) + } + } + ) + this.inittedJobProcessors = true + } // Start at a random userId to avoid biased processing of early users const latestUserId = await this.getLatestUserId() diff --git a/creator-node/src/utils.js b/creator-node/src/utils.js index ee1b9679f8f..2827a2b5883 100644 --- a/creator-node/src/utils.js +++ b/creator-node/src/utils.js @@ -24,7 +24,7 @@ class Utils { static async timeout(ms, log = true) { if (log) { - console.log(`starting timeout of ${ms}`) + genericLogger.info(`starting timeout of ${ms}`) } return new Promise((resolve) => setTimeout(resolve, ms)) } diff --git a/creator-node/test/CNodeHealthManager.test.js b/creator-node/test/CNodeHealthManager.test.js new file mode 100644 index 00000000000..609572a994e --- /dev/null +++ b/creator-node/test/CNodeHealthManager.test.js @@ -0,0 +1,506 @@ +/* eslint-disable no-unused-expressions */ +const nock = require('nock') +const chai = require('chai') +const sinon = require('sinon') +const { expect } = chai +chai.use(require('sinon-chai')) +chai.use(require('chai-as-promised')) +const proxyquire = require('proxyquire') + +const CNodeHealthManager = require('../src/services/stateMachineManager/CNodeHealthManager') +const config = require('../src/config') +const Utils = require('../src/utils') + +describe('test CNodeHealthManager -- getUnhealthyPeers()', function () { + let sandbox + beforeEach(function () { + sandbox = sinon.createSandbox() + }) + + afterEach(function () { + sandbox.restore() + }) + + const healthyCn1 = 'http://healthy_cn1.co' + const healthyCn2 = 'http://healthy_cn2.co' + const unhealthyCn3 = 'http://unhealthy_cn3.co' + const unhealthyCn4 = 'http://unhealthy_cn4.co' + const healthyNodes = [healthyCn1, healthyCn2] + const unhealthyNodes = [unhealthyCn3, unhealthyCn4] + const users = [ + { + user_id: 1, + wallet: 'wallet1', + primary: healthyCn1, + secondary1: unhealthyCn3, + secondary2: unhealthyCn3 + }, + { + user_id: 2, + wallet: 'wallet2', + primary: unhealthyCn4, + secondary1: healthyCn2, + secondary2: healthyCn1 + } + ] + const thisContentNodeEndpoint = 'http://healthy_cn1.co' + + it('returns all unhealthy nodes with default performSimpleCheck', async function () { + // Stub functions that getUnhealthyPeers() will call + sandbox + .stub(CNodeHealthManager, '_computeContentNodePeerSet') + .returns(new Set([...healthyNodes, ...unhealthyNodes])) + const isNodeHealthyStub = sandbox.stub(CNodeHealthManager, 'isNodeHealthy') + isNodeHealthyStub.withArgs(sinon.match.in(healthyNodes)).resolves(true) + isNodeHealthyStub.withArgs(sinon.match.in(unhealthyNodes)).resolves(false) + + // Verify that the correct unhealthy peers are returned + return expect( + CNodeHealthManager.getUnhealthyPeers(users, thisContentNodeEndpoint) + ).to.eventually.be.fulfilled.and.deep.equal(new Set(unhealthyNodes)) + }) + + it('returns all unhealthy nodes when performSimpleCheck=true', async function () { + // Stub functions that getUnhealthyPeers() will call + sandbox + .stub(CNodeHealthManager, '_computeContentNodePeerSet') + .returns(new Set([...healthyNodes, ...unhealthyNodes])) + const isNodeHealthyStub = sandbox.stub(CNodeHealthManager, 'isNodeHealthy') + isNodeHealthyStub.withArgs(sinon.match.in(healthyNodes)).resolves(true) + isNodeHealthyStub.withArgs(sinon.match.in(unhealthyNodes)).resolves(false) + + // Verify that the correct unhealthy peers are returned + return expect( + CNodeHealthManager.getUnhealthyPeers(users, thisContentNodeEndpoint, true) + ).to.eventually.be.fulfilled.and.deep.equal(new Set(unhealthyNodes)) + }) + + it('returns all unhealthy nodes when performSimpleCheck=false', async function () { + // Stub functions that getUnhealthyPeers() will call + sandbox + .stub(CNodeHealthManager, '_computeContentNodePeerSet') + .returns(new Set([...healthyNodes, ...unhealthyNodes])) + const isNodeHealthyStub = sandbox.stub(CNodeHealthManager, 'isNodeHealthy') + isNodeHealthyStub.withArgs(sinon.match.in(healthyNodes)).resolves(true) + isNodeHealthyStub.withArgs(sinon.match.in(unhealthyNodes)).resolves(false) + + // Verify that the correct unhealthy peers are returned + return expect( + CNodeHealthManager.getUnhealthyPeers( + users, + thisContentNodeEndpoint, + false + ) + ).to.eventually.be.fulfilled.and.deep.equal(new Set(unhealthyNodes)) + }) +}) + +describe('test CNodeHealthManager -- isNodeHealthy()', function () { + let sandbox + beforeEach(function () { + sandbox = sinon.createSandbox() + }) + + afterEach(function () { + sandbox.restore() + }) + + it('returns true when health check passes with performSimpleCheck=true', async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const verboseHealthCheckResp = { healthy: true } + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .resolves(verboseHealthCheckResp) + const determinePeerHealthStub = sandbox.stub( + CNodeHealthManager, + 'determinePeerHealth' + ) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that only the simple check was performed and returned healthy + const isHealthy = await CNodeHealthManager.isNodeHealthy(node, true) + expect(isHealthy).to.be.true + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.not.have.been.called + expect(logErrorStub).to.not.have.been.called + }) + + it('returns false when health check fails with performSimpleCheck=true', async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const error = new Error('test error') + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .rejects(error) + const determinePeerHealthStub = sandbox.stub( + CNodeHealthManager, + 'determinePeerHealth' + ) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that determinePeerHealth is not called because the health + // check throwing an error causes the function to return false + const isHealthy = await CNodeHealthManager.isNodeHealthy(node, true) + expect(isHealthy).to.be.false + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.not.have.been.called + expect(logErrorStub).to.have.been.called.calledOnceWithExactly( + `isNodeHealthy() peer=${node} is unhealthy: ${error.toString()}` + ) + }) + + it('returns false when health check fails with performSimpleCheck=false', async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const error = new Error('test error') + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .rejects(error) + const determinePeerHealthStub = sandbox.stub( + CNodeHealthManager, + 'determinePeerHealth' + ) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that determinePeerHealth is not called because the health + // check throwing an error causes the function to return false + const isHealthy = await CNodeHealthManager.isNodeHealthy(node, false) + expect(isHealthy).to.be.false + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.not.have.been.called + expect(logErrorStub).to.have.been.called.calledOnceWithExactly( + `isNodeHealthy() peer=${node} is unhealthy: ${error.toString()}` + ) + }) + + it('returns false when determinePeerHealth throws with performSimpleCheck=false', async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const determinePeerHealthError = new Error('test determinePeerHealthError') + const verboseHealthCheckResp = { healthy: false } + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .resolves(verboseHealthCheckResp) + const determinePeerHealthStub = sandbox + .stub(CNodeHealthManager, 'determinePeerHealth') + .throws(determinePeerHealthError) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that determinePeerHealth throwing causes the function to return false + const isHealthy = await CNodeHealthManager.isNodeHealthy(node, false) + expect(isHealthy).to.be.false + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.have.been.calledOnceWithExactly( + verboseHealthCheckResp + ) + expect(logErrorStub).to.have.been.called.calledOnceWithExactly( + `isNodeHealthy() peer=${node} is unhealthy: ${determinePeerHealthError.toString()}` + ) + }) + + it("returns true when determinePeerHealth doesn't throw with performSimpleCheck=false", async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const verboseHealthCheckResp = { healthy: true } + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .resolves(verboseHealthCheckResp) + const determinePeerHealthStub = sandbox.stub( + CNodeHealthManager, + 'determinePeerHealth' + ) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that only the simple check was performed and returned healthy + const isHealthy = await CNodeHealthManager.isNodeHealthy(node, false) + expect(isHealthy).to.be.true + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.have.been.calledOnceWithExactly( + verboseHealthCheckResp + ) + expect(logErrorStub).to.not.have.been.called.called + }) + + it("returns true when determinePeerHealth doesn't throw with default performSimpleCheck", async function () { + // Stub functions that isNodeHealthy() will call + const node = 'http://some_content_node.co' + const verboseHealthCheckResp = { healthy: true } + const queryVerboseHealthCheckStub = sandbox + .stub(CNodeHealthManager, 'queryVerboseHealthCheck') + .resolves(verboseHealthCheckResp) + const determinePeerHealthStub = sandbox.stub( + CNodeHealthManager, + 'determinePeerHealth' + ) + const logErrorStub = sandbox.stub(CNodeHealthManager, 'logError') + + // Verify that only the simple check was performed and returned healthy + const isHealthy = await CNodeHealthManager.isNodeHealthy(node) + expect(isHealthy).to.be.true + expect(queryVerboseHealthCheckStub).to.have.been.calledOnceWithExactly(node) + expect(determinePeerHealthStub).to.have.been.calledOnceWithExactly( + verboseHealthCheckResp + ) + expect(logErrorStub).to.not.have.been.called.called + }) +}) + +describe('test CNodeHealthManager -- queryVerboseHealthCheck()', function () { + let sandbox + beforeEach(function () { + sandbox = sinon.createSandbox() + nock.disableNetConnect() + }) + + afterEach(function () { + sandbox.restore() + nock.cleanAll() + nock.enableNetConnect() + }) + + it('returns successful response', async function () { + const endpoint = 'http://healthy_cn.co' + const verboseHealthResp = { + healthy: 'true', + verboseData: 'data' + } + nock(endpoint) + .get('/health_check/verbose') + .reply(200, { data: verboseHealthResp }) + + await CNodeHealthManager.queryVerboseHealthCheck(endpoint) + expect(nock.isDone()).to.be.true + }) +}) + +describe('test CNodeHealthManager -- determinePeerHealth()', function () { + // Set config vars for health thresholds + const minimumStoragePathSize = 100 + const minimumMemoryAvailable = 100 + const maxFileDescriptorsAllocatedPercentage = 50 + const minimumDailySyncCount = 3 + const minimumRollingSyncCount = 5 + const minimumSuccessfulSyncCountPercentage = 50 + config.set('minimumStoragePathSize', minimumStoragePathSize) + config.set('minimumMemoryAvailable', minimumMemoryAvailable) + config.set( + 'maxFileDescriptorsAllocatedPercentage', + maxFileDescriptorsAllocatedPercentage + ) + config.set('minimumDailySyncCount', minimumDailySyncCount) + config.set('minimumRollingSyncCount', minimumRollingSyncCount) + config.set( + 'minimumSuccessfulSyncCountPercentage', + minimumSuccessfulSyncCountPercentage + ) + + function determinePeerHealth(verboseHealthCheckResp) { + const CNodeHealthManagerMock = proxyquire( + '../src/services/stateMachineManager/CNodeHealthManager.js', + { + './../../config': config + } + ) + CNodeHealthManagerMock.determinePeerHealth(verboseHealthCheckResp) + } + + it("doesn't throw if all data is healthy (empty data counts as healthy)", function () { + expect(() => determinePeerHealth({})).to.not.throw() + }) + + it('throws when low on storage space', function () { + const storagePathSize = 1000 + const storagePathUsed = 990 + const verboseHealthCheckResp = { + storagePathSize, + storagePathUsed + } + expect(() => determinePeerHealth(verboseHealthCheckResp)).to.throw( + `Almost out of storage=${ + storagePathSize - storagePathUsed + }bytes remaining. Minimum storage required=${minimumStoragePathSize}bytes` + ) + }) + + it('throws when low on memory', function () { + const usedMemory = 90 + const totalMemory = 100 + const verboseHealthCheckResp = { + usedMemory, + totalMemory + } + expect(() => determinePeerHealth(verboseHealthCheckResp)).to.throw( + `Running low on memory=${ + totalMemory - usedMemory + }bytes remaining. Minimum memory required=${minimumMemoryAvailable}bytes` + ) + }) + + it('throws when low on file descriptor space', function () { + const allocatedFileDescriptors = 99 + const maxFileDescriptors = 100 + const verboseHealthCheckResp = { + allocatedFileDescriptors, + maxFileDescriptors + } + expect(() => determinePeerHealth(verboseHealthCheckResp)).to.throw( + `Running low on file descriptors availability=${ + (allocatedFileDescriptors / maxFileDescriptors) * 100 + }% used. Max file descriptors allocated percentage allowed=${maxFileDescriptorsAllocatedPercentage}%` + ) + }) + + it('throws when historical sync success rate for today is below threshold', function () { + const dailySyncSuccessCount = 1 + const dailySyncFailCount = 9 + const verboseHealthCheckResp = { + dailySyncSuccessCount, + dailySyncFailCount + } + expect(() => determinePeerHealth(verboseHealthCheckResp)).to.throw( + `Latest daily sync data shows that this node fails at a high rate of syncs. Successful syncs=${dailySyncSuccessCount} || Failed syncs=${dailySyncFailCount}. Minimum successful sync percentage=${minimumSuccessfulSyncCountPercentage}%` + ) + }) + + it('throws when historical sync success rate for rolling 30-day window is below threshold', function () { + const thirtyDayRollingSyncSuccessCount = 1 + const thirtyDayRollingSyncFailCount = 9 + const verboseHealthCheckResp = { + thirtyDayRollingSyncSuccessCount, + thirtyDayRollingSyncFailCount + } + expect(() => determinePeerHealth(verboseHealthCheckResp)).to.throw( + `Rolling sync data shows that this node fails at a high rate of syncs. Successful syncs=${thirtyDayRollingSyncSuccessCount} || Failed syncs=${thirtyDayRollingSyncFailCount}. Minimum successful sync percentage=${minimumSuccessfulSyncCountPercentage}%` + ) + }) +}) + +describe('test CNodeHealthManager -- isPrimaryHealthy()', function () { + let sandbox + beforeEach(function () { + sandbox = sinon.createSandbox() + }) + + afterEach(function () { + sandbox.restore() + }) + + const primary = 'http://cn1.co' + const gracePeriodSeconds = 1 + config.set('maxNumberSecondsPrimaryRemainsUnhealthy', 1) + + it('returns true (healthy) when healthy', async function () { + // Make isNodeHealthy return true + const isNodeHealthyStub = sandbox + .stub(CNodeHealthManager, 'isNodeHealthy') + .resolves(true) + + const isHealthy = await CNodeHealthManager.isPrimaryHealthy(primary) + expect(isHealthy).to.be.true + expect(isNodeHealthyStub).to.have.been.calledOnceWithExactly(primary, true) + }) + + it('returns true when health check fails during grace period, then false when grace period ends, then true when health check starts passing again', async function () { + // Mock CNodeHealthManager to use the config with our shorter grace period + const CNodeHealthManagerMock = proxyquire( + '../src/services/stateMachineManager/CNodeHealthManager.js', + { + './../../config': config + } + ) + // Make isNodeHealthy return false + const isNodeHealthyStub = sandbox + .stub(CNodeHealthManagerMock, 'isNodeHealthy') + .resolves(false) + + // Verify that the node is marked as healthy during the grace period (even though it's unhealthy) + const isHealthy = await CNodeHealthManagerMock.isPrimaryHealthy(primary) + expect(isHealthy).to.be.true + const isHealthyDuringGracePeriod = + await CNodeHealthManagerMock.isPrimaryHealthy(primary) + expect(isHealthyDuringGracePeriod).to.be.true + + // Verify that the node is unhealthy after the grace period ends + await Utils.timeout(gracePeriodSeconds * 1000 + 1) + const isHealthyAfterGracePeriod = + await CNodeHealthManagerMock.isPrimaryHealthy(primary) + expect(isHealthyAfterGracePeriod).to.be.false + + // Verify that the node is healthy when the health check starts passing again + isNodeHealthyStub.returns(true) + const isHealthyWhenIsHealthCheckPassesAgain = + await CNodeHealthManagerMock.isPrimaryHealthy(primary) + expect(isHealthyWhenIsHealthCheckPassesAgain).to.be.true + }) +}) + +describe('test CNodeHealthManager -- _computeContentNodePeerSet()', function () { + it('returns correct set of content nodes, filtering out empty endpoints and self', function () { + const thisContentNode = 'http://thisContentNode.co' + const users = [ + { + primary: thisContentNode, + secondary1: 'http://cn1.co', + secondary2: 'http://cn2.co' + }, + { + primary: thisContentNode, + secondary1: 'http://cn3.co', + secondary2: 'http://cn4.co' + }, + { + primary: 'http://cn5.co', + secondary1: 'http://cn2.co', + secondary2: '' + } + ] + expect( + CNodeHealthManager._computeContentNodePeerSet(users, thisContentNode) + ).to.have.all.keys( + 'http://cn1.co', + 'http://cn2.co', + 'http://cn3.co', + 'http://cn4.co', + 'http://cn5.co' + ) + }) +}) + +describe('test CNodeHealthManager logger', function () { + let sandbox + beforeEach(function () { + sandbox = sinon.createSandbox() + }) + + afterEach(function () { + sandbox.restore() + }) + + it('logs info and error with common prefix', function () { + // Initialize CNodeHealthManager with stubbed logger + const logInfoStub = sandbox.stub() + const logErrorStub = sandbox.stub() + const CNodeHealthManagerMock = proxyquire( + '../src/services/stateMachineManager/CNodeHealthManager.js', + { + './../../logging': { + logger: { + info: logInfoStub, + error: logErrorStub + } + } + } + ) + + // Verify that each log function passes the correct message to the logger + CNodeHealthManagerMock.log('test info msg') + expect(logInfoStub).to.have.been.calledOnceWithExactly( + 'CNodeHealthManager: test info msg' + ) + CNodeHealthManagerMock.logError('test error msg') + expect(logErrorStub).to.have.been.calledOnceWithExactly( + 'CNodeHealthManager ERROR: test error msg' + ) + }) +}) diff --git a/creator-node/test/StateMonitoringQueue.test.js b/creator-node/test/StateMonitoringQueue.test.js index bd33368597e..7a9892034c7 100644 --- a/creator-node/test/StateMonitoringQueue.test.js +++ b/creator-node/test/StateMonitoringQueue.test.js @@ -11,12 +11,12 @@ const BullQueue = require('bull') const config = require('../src/config') const StateMonitoringQueue = require('../src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue') const { - STATE_MONITORING_QUEUE_NAME + QUEUE_NAMES } = require('../src/services/stateMachineManager/stateMachineConstants') const { getApp } = require('./lib/app') const { getLibsMock } = require('./lib/libsMock') -describe('test StateMonitoringQueue initialization and logging', function () { +describe('test StateMonitoringQueue initialization, logging, and events', function () { let server, sandbox beforeEach(async function () { const appInfo = await getApp(getLibsMock()) @@ -36,18 +36,24 @@ describe('test StateMonitoringQueue initialization and logging', function () { }) it('creates the queue and registers its event handlers', async function () { - // Initialize StateMonitoringQueue and spy on its registerQueueEventHandlers function - sandbox.spy(StateMonitoringQueue.prototype, 'registerQueueEventHandlers') + // Initialize StateMonitoringQueue and spy on its registerQueueEventHandlersAndJobProcessor function + sandbox.spy( + StateMonitoringQueue.prototype, + 'registerQueueEventHandlersAndJobProcessor' + ) const stateMonitoringQueue = new StateMonitoringQueue(config) await stateMonitoringQueue.init(getLibsMock()) // Verify that the queue was successfully initialized and that its event listeners were registered expect(stateMonitoringQueue.queue).to.exist.and.to.be.instanceOf(BullQueue) - expect(stateMonitoringQueue.registerQueueEventHandlers).to.have.been - .calledOnce - expect(stateMonitoringQueue.registerQueueEventHandlers.getCall(0).args[0]) + expect(stateMonitoringQueue.registerQueueEventHandlersAndJobProcessor).to + .have.been.calledOnce + expect( + stateMonitoringQueue.registerQueueEventHandlersAndJobProcessor.getCall(0) + .args[0] + ) .to.have.property('queue') - .that.has.deep.property('name', STATE_MONITORING_QUEUE_NAME) + .that.has.deep.property('name', QUEUE_NAMES.STATE_MONITORING) }) it('kicks off an initial job when initting', async function () { @@ -95,6 +101,81 @@ describe('test StateMonitoringQueue initialization and logging', function () { .be.fulfilled.and.be.empty }) + it('processes jobs with expected data and returns the expected results', async function () { + // Mock StateMonitoringQueue to have processStateMonitoringJob return dummy data + const expectedResult = { jobFailed: false, test: 'test' } + const processStateMonitoringJobStub = sandbox + .stub() + .resolves(expectedResult) + const MockStateMonitoringQueue = proxyquire( + '../src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js', + { + './monitorState.jobProcessor': processStateMonitoringJobStub + } + ) + + // Verify that StateMonitoringQueue returns our dummy data + const job = { + id: 9, + data: { + lastProcessedUserId: 2, + discoveryNodeEndpoint: 'http://test_endpoint.co', + moduloBase: 1, + currentModuloSlice: 2 + } + } + await expect( + new MockStateMonitoringQueue().processJob(job) + ).to.eventually.be.fulfilled.and.deep.equal(expectedResult) + expect(processStateMonitoringJobStub).to.have.been.calledOnceWithExactly( + job.id, + job.data.lastProcessedUserId, + job.data.discoveryNodeEndpoint, + job.data.moduloBase, + job.data.currentModuloSlice + ) + }) + + it('returns default result when processing a job fails or logging fails', async function () { + // Mock StateMonitoringQueue to have processStateMonitoringJob reject the promise + const logErrorStub = sandbox.stub() + const processStateMonitoringJobStub = sandbox.stub().rejects('test error') + const MockStateMonitoringQueue = proxyquire( + '../src/services/stateMachineManager/stateMonitoring/StateMonitoringQueue.js', + { + './monitorState.jobProcessor': processStateMonitoringJobStub, + './../../../logging': { + logger: { + error: logErrorStub, + info: sandbox.stub() + } + } + } + ) + + // Verify that StateMonitoringQueue throws and returns default data + const job = { + id: 9, + data: { + lastProcessedUserId: 2, + discoveryNodeEndpoint: 'http://test_endpoint.co', + moduloBase: 1, + currentModuloSlice: 2 + } + } + await expect( + new MockStateMonitoringQueue().processJob(job) + ).to.eventually.be.fulfilled.and.deep.equal({ + lastProcessedUserId: job.data.lastProcessedUserId, + moduloBase: job.data.moduloBase, + currentModuloSlice: job.data.currentModuloSlice, + jobFailed: true + }) + expect(logErrorStub).to.have.been.calledOnceWithExactly( + `StateMonitoringQueue ERROR: Error processing jobId ${job.id}: test error` + ) + }) + it('logs debug, info, warning, and error', function () { // Initialize StateMonitoringQueue with stubbed logger const logDebugStub = sandbox.stub() diff --git a/creator-node/test/processStateMonitoringJob.test.js b/creator-node/test/monitorState.jobProcessor.test.js similarity index 77% rename from creator-node/test/processStateMonitoringJob.test.js rename to creator-node/test/monitorState.jobProcessor.test.js index 42ae19cb05c..fa39e177ddd 100644 --- a/creator-node/test/processStateMonitoringJob.test.js +++ b/creator-node/test/monitorState.jobProcessor.test.js @@ -10,18 +10,16 @@ const _ = require('lodash') const config = require('../src/config') const { getApp } = require('./lib/app') const { getLibsMock } = require('./lib/libsMock') -const NodeHealthManager = require('../src/services/stateMachineManager/CNodeHealthManager') -const CNodeToSpIdMapManager = require('../src/services/stateMachineManager/CNodeToSpIdMapManager') -describe('test processStateMonitoringJob', function () { +describe('test monitorState job processor', function () { let server, sandbox, + originalContentNodeEndpoint, getNodeUsersStub, getUnhealthyPeersStub, buildReplicaSetNodesToUserWalletsMapStub, retrieveClockStatusesForUsersAcrossReplicaSetStub, computeUserSecondarySyncSuccessRatesMapStub, - aggregateReconfigAndPotentialSyncOpsStub, getCNodeEndpointToSpIdMapStub beforeEach(async function () { const appInfo = await getApp(getLibsMock()) @@ -29,18 +27,19 @@ describe('test processStateMonitoringJob', function () { server = appInfo.server sandbox = sinon.createSandbox() config.set('spID', 1) + originalContentNodeEndpoint = config.get('creatorNodeEndpoint') }) afterEach(async function () { await server.close() sandbox.restore() + config.set('creatorNodeEndpoint', originalContentNodeEndpoint) getNodeUsersStub = null getUnhealthyPeersStub = null buildReplicaSetNodesToUserWalletsMapStub = null retrieveClockStatusesForUsersAcrossReplicaSetStub = null computeUserSecondarySyncSuccessRatesMapStub = null - aggregateReconfigAndPotentialSyncOpsStub = null getCNodeEndpointToSpIdMapStub = null }) @@ -64,12 +63,6 @@ describe('test processStateMonitoringJob', function () { unhealthyPeers: RETRIEVE_CLOCK_STATUS_EXTRA_UNHEALTHY_PEERS } const USER_SECONDARY_SYNC_SUCCESS_RATES_MAP = { dummyMap: 'dummyMap' } - const REQUIRED_UPDATE_REPLICA_SET_OPS = [] - const POTENTIAL_SYNC_REQUESTS = [] - const AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS = { - requiredUpdateReplicaSetOps: REQUIRED_UPDATE_REPLICA_SET_OPS, - potentialSyncRequests: POTENTIAL_SYNC_REQUESTS - } const CNODE_ENDPOINT_TO_SP_ID_MAP = { dummyCNodeMap: 'dummyCNodeMap' } // Return processStateMonitoringJob with each step stubbed to return @@ -80,7 +73,6 @@ describe('test processStateMonitoringJob', function () { replicaSetNodesToUserWalletsMap = REPLICA_SET_NODES_TO_USER_WALLETS_MAP, retrieveClockStatusesForUsersAcrossReplicaSetResp = RETRIEVE_CLOCK_STATUSES_FOR_USERS_ACROSS_REPLICA_SET_RESP, userSecondarySyncSuccessRatesMap = USER_SECONDARY_SYNC_SUCCESS_RATES_MAP, - aggregateReconfigAndPotentialSyncOps = AGGREGATE_RECONFIG_AND_POTENTIAL_SYNC_OPS, cNodeEndpointToSpIdMap = CNODE_ENDPOINT_TO_SP_ID_MAP }) { // Make the stubs return the given params if they're not already set @@ -105,23 +97,15 @@ describe('test processStateMonitoringJob', function () { .stub() .resolves(userSecondarySyncSuccessRatesMap) } - if (!aggregateReconfigAndPotentialSyncOpsStub) { - aggregateReconfigAndPotentialSyncOpsStub = sandbox - .stub() - .resolves(aggregateReconfigAndPotentialSyncOps) - } if (!getCNodeEndpointToSpIdMapStub) { getCNodeEndpointToSpIdMapStub = sandbox .stub() .returns(cNodeEndpointToSpIdMap) } - // Make processStateMonitoringJob.js's imports return our stubs - NodeHealthManager.getUnhealthyPeers = getUnhealthyPeersStub - CNodeToSpIdMapManager.getCNodeEndpointToSpIdMap = - getCNodeEndpointToSpIdMapStub + // Make monitorState.jobProcessor.js's imports return our stubs return proxyquire( - '../src/services/stateMachineManager/stateMonitoring/processStateMonitoringJob.js', + '../src/services/stateMachineManager/stateMonitoring/monitorState.jobProcessor.js', { '../../../config': config, './stateMonitoringUtils': { @@ -129,12 +113,14 @@ describe('test processStateMonitoringJob', function () { buildReplicaSetNodesToUserWalletsMap: buildReplicaSetNodesToUserWalletsMapStub, computeUserSecondarySyncSuccessRatesMap: - computeUserSecondarySyncSuccessRatesMapStub, - aggregateReconfigAndPotentialSyncOps: - aggregateReconfigAndPotentialSyncOpsStub + computeUserSecondarySyncSuccessRatesMapStub + }, + '../CNodeHealthManager': { + getUnhealthyPeers: getUnhealthyPeersStub + }, + '../CNodeToSpIdMapManager': { + getCNodeEndpointToSpIdMap: getCNodeEndpointToSpIdMapStub }, - '../CNodeHealthManager': NodeHealthManager, - '../CNodeToSpIdMapManager': CNodeToSpIdMapManager, '../stateMachineUtils': { retrieveClockStatusesForUsersAcrossReplicaSet: retrieveClockStatusesForUsersAcrossReplicaSetStub @@ -189,7 +175,11 @@ describe('test processStateMonitoringJob', function () { }) ).to.eventually.be.fulfilled.and.deep.equal({ lastProcessedUserId: lastProcessedUserId + numUsersToProcess - 1, - jobFailed: false + jobFailed: false, + users, + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: REPLICAS_TO_USER_CLOCK_STATUS_MAP, + userSecondarySyncMetricsMap: USER_SECONDARY_SYNC_SUCCESS_RATES_MAP }) }) @@ -208,11 +198,15 @@ describe('test processStateMonitoringJob', function () { ) expect(jobResult).to.deep.equal({ lastProcessedUserId: 0, - jobFailed: false + jobFailed: false, + users: [], + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: REPLICAS_TO_USER_CLOCK_STATUS_MAP, + userSecondarySyncMetricsMap: USER_SECONDARY_SYNC_SUCCESS_RATES_MAP }) }) - it('should call the steps and return required syncs/updates without throwing', async function () { + it('should call the steps and return state data for users slice without throwing', async function () { // Run processStateMonitoringJob with each step succeeding const jobResult = await processStateMonitoringJob({}) @@ -233,57 +227,13 @@ describe('test processStateMonitoringJob', function () { expect( computeUserSecondarySyncSuccessRatesMapStub ).to.have.been.calledOnceWithExactly(USERS) - expect( - aggregateReconfigAndPotentialSyncOpsStub - ).to.have.been.calledOnceWithExactly( - USERS, - UNHEALTHY_PEERS, - USER_SECONDARY_SYNC_SUCCESS_RATES_MAP, - CNODE_ENDPOINT_TO_SP_ID_MAP, - CONTENT_NODE_ENDPOINT - ) - expect(jobResult).to.deep.equal({ - lastProcessedUserId: USER_ID, - jobFailed: false - }) - }) - - it('should return without throwing when aggregateReconfigAndPotentialSyncOps throws an error', async function () { - // Run processStateMonitoringJob with each step succeeding except aggregateReconfigAndPotentialSyncOpsStub - aggregateReconfigAndPotentialSyncOpsStub = sandbox - .stub() - .rejects('test unexpected error') - const jobResult = await processStateMonitoringJob({}) - - // Verify that aggregateReconfigAndPotentialSyncOpsStub fails and steps before it succeed - expect(getNodeUsersStub).to.have.been.calledOnceWithExactly( - DISCOVERY_NODE_ENDPOINT, - CONTENT_NODE_ENDPOINT, - LAST_PROCESSED_USER_ID, - NUM_USERS_TO_PROCESS - ) - expect(getUnhealthyPeersStub).to.have.been.calledOnceWithExactly(USERS) - expect( - buildReplicaSetNodesToUserWalletsMapStub - ).to.have.been.calledOnceWithExactly(USERS) - expect( - retrieveClockStatusesForUsersAcrossReplicaSetStub - ).to.have.been.calledOnceWithExactly(REPLICA_SET_NODES_TO_USER_WALLETS_MAP) - expect( - computeUserSecondarySyncSuccessRatesMapStub - ).to.have.been.calledOnceWithExactly(USERS) - expect( - aggregateReconfigAndPotentialSyncOpsStub - ).to.have.been.calledOnceWithExactly( - USERS, - UNHEALTHY_PEERS, - USER_SECONDARY_SYNC_SUCCESS_RATES_MAP, - CNODE_ENDPOINT_TO_SP_ID_MAP, - CONTENT_NODE_ENDPOINT - ) expect(jobResult).to.deep.equal({ lastProcessedUserId: USER_ID, - jobFailed: true + jobFailed: false, + users: USERS, + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: REPLICAS_TO_USER_CLOCK_STATUS_MAP, + userSecondarySyncMetricsMap: USER_SECONDARY_SYNC_SUCCESS_RATES_MAP }) }) @@ -311,10 +261,13 @@ describe('test processStateMonitoringJob', function () { expect( computeUserSecondarySyncSuccessRatesMapStub ).to.have.been.calledOnceWithExactly(USERS) - expect(aggregateReconfigAndPotentialSyncOpsStub).to.not.have.been.called expect(jobResult).to.deep.equal({ lastProcessedUserId: USER_ID, - jobFailed: true + jobFailed: true, + users: USERS, + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: REPLICAS_TO_USER_CLOCK_STATUS_MAP, + userSecondarySyncMetricsMap: {} }) }) @@ -340,10 +293,13 @@ describe('test processStateMonitoringJob', function () { retrieveClockStatusesForUsersAcrossReplicaSetStub ).to.have.been.calledOnceWithExactly(REPLICA_SET_NODES_TO_USER_WALLETS_MAP) expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called - expect(aggregateReconfigAndPotentialSyncOpsStub).to.not.have.been.called expect(jobResult).to.deep.equal({ lastProcessedUserId: USER_ID, - jobFailed: true + jobFailed: true, + users: USERS, + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: {}, + userSecondarySyncMetricsMap: {} }) }) @@ -368,10 +324,13 @@ describe('test processStateMonitoringJob', function () { expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called - expect(aggregateReconfigAndPotentialSyncOpsStub).to.not.have.been.called expect(jobResult).to.deep.equal({ lastProcessedUserId: USER_ID, - jobFailed: true + jobFailed: true, + users: USERS, + unhealthyPeers: UNHEALTHY_PEERS, + replicaSetNodesToUserClockStatusesMap: {}, + userSecondarySyncMetricsMap: {} }) }) @@ -392,10 +351,13 @@ describe('test processStateMonitoringJob', function () { expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called - expect(aggregateReconfigAndPotentialSyncOpsStub).to.not.have.been.called expect(jobResult).to.deep.equal({ lastProcessedUserId: USER_ID, - jobFailed: true + jobFailed: true, + users: USERS, + unhealthyPeers: new Set(), + replicaSetNodesToUserClockStatusesMap: {}, + userSecondarySyncMetricsMap: {} }) }) @@ -416,10 +378,13 @@ describe('test processStateMonitoringJob', function () { expect(retrieveClockStatusesForUsersAcrossReplicaSetStub).to.not.have.been .called expect(computeUserSecondarySyncSuccessRatesMapStub).to.not.have.been.called - expect(aggregateReconfigAndPotentialSyncOpsStub).to.not.have.been.called expect(jobResult).to.deep.equal({ lastProcessedUserId: LAST_PROCESSED_USER_ID, - jobFailed: true + jobFailed: true, + users: [{ user_id: LAST_PROCESSED_USER_ID }], + unhealthyPeers: new Set(), + replicaSetNodesToUserClockStatusesMap: {}, + userSecondarySyncMetricsMap: {} }) }) }) diff --git a/creator-node/test/snapbackSM.test.js b/creator-node/test/snapbackSM.test.js index 07bf485a482..43b34f0d92d 100644 --- a/creator-node/test/snapbackSM.test.js +++ b/creator-node/test/snapbackSM.test.js @@ -471,6 +471,9 @@ describe('test SnapbackSM -- issueUpdateReplicaSetOp', function () { } ) const snapback = new mockSnapback(nodeConfig, getLibsMock()) + snapback.getLatestUserId = async () => { return 0 } + snapback.inittedJobProcessors = true + await snapback.init() const newReplicaSet = { newPrimary: constants.primaryEndpoint, @@ -540,6 +543,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if the self node is the secondary and a primary spId is different from what is on chain, issue reconfig', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.getLatestUserId = async () => { return 0 } + snapback.inittedJobProcessors = true + await snapback.init() // Mock that one of the nodes got reregistered from spId 3 to spId 4 snapback.peerSetManager.endpointToSPIdMap = { @@ -586,6 +592,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if the self node is the primary and a secondary spId is different from what is on chain, issue reconfig', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() // Mock that one of the nodes got reregistered from spId 3 to spId 4 snapback.peerSetManager.endpointToSPIdMap = { @@ -632,6 +641,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if the self node (primary) is the same as the SP with a different spId, do not issue reconfig', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() // Mock that one of the nodes got reregistered from spId 3 to spId 4 snapback.peerSetManager.endpointToSPIdMap = { @@ -680,6 +692,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if the self node (secondary) is the same as the SP with a different spId, do not issue reconfig', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() // Mock that one of the nodes got reregistered from spId 3 to spId 4 snapback.peerSetManager.endpointToSPIdMap = { @@ -725,6 +740,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if any replica set node is not in the map, issue reconfig', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() // Mock the deregistered node to not have any spId snapback.peerSetManager.endpointToSPIdMap = { @@ -769,6 +787,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () it('if the self node (primary) and 1 secondary are healthy but not the other secondary, issue reconfig for the unhealthy secondary', async function () { const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() snapback.peerSetManager.endpointToSPIdMap = { 'http://some_healthy_primary.co': 1, @@ -819,6 +840,9 @@ describe('test SnapbackSM -- aggregateReconfigAndPotentialSyncOps', function () nodeConfig.set('minimumFailedSyncRequestsBeforeReconfig', 5) nodeConfig.set('minimumSecondaryUserSyncSuccessPercent', 25) const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() snapback.peerSetManager.endpointToSPIdMap = { 'http://some_healthy_primary.co': 1, @@ -1411,6 +1435,9 @@ describe('test SnapbackSM -- processStateMachineOperation', function () { } ) const snapback = new mockSnapback(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() snapback.endpoint = 'http://healthyCn1.co' snapback.retrieveClockStatusesForUsersAcrossReplicaSet = retrieveClockStatusesForUsersAcrossReplicaSetStub snapback.computeUserSecondarySyncSuccessRatesMap = computeUserSecondarySyncSuccessRatesMapStub @@ -1470,6 +1497,9 @@ describe('test SnapbackSM -- additionalSyncIsRequired', function () { ) const snapback = new mockSnapback(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() snapback._retrieveClockValueForUserFromReplica = retrieveClockValueForUserFromReplicaStub retrieveClockValueForUserFromReplicaStub.resolves(finalSecondaryClockValue) retrieveClockValueForUserFromReplicaStub.onCall(0).resolves(initialSecondaryClockValue) @@ -1516,6 +1546,9 @@ describe('test SnapbackSM -- additionalSyncIsRequired', function () { ) const snapback = new mockSnapback(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() snapback._retrieveClockValueForUserFromReplica = retrieveClockValueForUserFromReplicaStub retrieveClockValueForUserFromReplicaStub.resolves(finalSecondaryClockValue) @@ -1632,6 +1665,9 @@ describe('test SnapbackSM -- computeUserSecondarySyncSuccessRatesMap', function } const snapback = new SnapbackSM(nodeConfig, getLibsMock()) + snapback.inittedJobProcessors = true + snapback.getLatestUserId = async () => { return 0 } + await snapback.init() const userSecondarySyncMetricsMap = await snapback.computeUserSecondarySyncSuccessRatesMap(nodeUsers) expect(userSecondarySyncMetricsMap).to.deep.equal(expectedUserSecondarySyncMetricsMap) diff --git a/discovery-provider/.test.env b/discovery-provider/.test.env index c0b7161145e..6b605d15e11 100644 --- a/discovery-provider/.test.env +++ b/discovery-provider/.test.env @@ -1,4 +1,7 @@ # TODO: dummy ganache keys for local setup; should wire with dynamically generated keys audius_delegate_owner_wallet=0x1D9c77BcfBfa66D37390BF2335f0140979a6122B audius_delegate_private_key=0x3873ed01bfb13621f9301487cc61326580614a5b99f3c33cf39c6f9da3a19cad -audius_solana_rewards_manager_account=8MzNUaBHskteN7poTrZG5wgSNSbXQwieMDB4wk9fgB7f \ No newline at end of file +audius_solana_rewards_manager_account=8MzNUaBHskteN7poTrZG5wgSNSbXQwieMDB4wk9fgB7f + +audius_elasticsearch_url=http://localhost:9200 +audius_db_url=postgresql+psycopg2://postgres:postgres@localhost:5432/test_audius_discovery \ No newline at end of file diff --git a/discovery-provider/.version.json b/discovery-provider/.version.json index 41fe2883a50..8cbb4cfcb8c 100644 --- a/discovery-provider/.version.json +++ b/discovery-provider/.version.json @@ -1,4 +1,4 @@ { - "version": "0.3.58", + "version": "0.3.59", "service": "discovery-node" } \ No newline at end of file diff --git a/discovery-provider/compose/docker-compose.backend.yml b/discovery-provider/compose/docker-compose.backend.yml index a57ca7e3a8b..3e87ed06e56 100644 --- a/discovery-provider/compose/docker-compose.backend.yml +++ b/discovery-provider/compose/docker-compose.backend.yml @@ -13,6 +13,9 @@ services: - audius_redis_url=redis://${COMPOSE_PROJECT_NAME}_redis-server_1:6379/00 - audius_db_url=postgresql+psycopg2://postgres:postgres@${COMPOSE_PROJECT_NAME}_discovery-provider-db_1:5432/audius_discovery - audius_db_url_read_replica=postgresql+psycopg2://postgres:postgres@${COMPOSE_PROJECT_NAME}_discovery-provider-db_1:5432/audius_discovery + - audius_elasticsearch_url=http://elasticsearch:9200 + - audius_elasticsearch_run_indexer=true + - audius_elasticsearch_search_enabled=true - audius_delegate_owner_wallet=${audius_delegate_owner_wallet} - audius_delegate_private_key=${audius_delegate_private_key} - audius_ipfs_host=${COMPOSE_PROJECT_NAME}-ipfs-node diff --git a/discovery-provider/compose/env/commonEnv.sh b/discovery-provider/compose/env/commonEnv.sh index 0ab329e7ecf..d52dc58178f 100755 --- a/discovery-provider/compose/env/commonEnv.sh +++ b/discovery-provider/compose/env/commonEnv.sh @@ -3,3 +3,4 @@ export audius_delegate_private_key=AUDIUS_DELEGATE_PRIVATE_KEY echo $audius_delegate_owner_wallet echo $audius_delegate_private_key export COMPOSE_HTTP_TIMEOUT=200 +export audius_discprov_loglevel_flask=DEBUG diff --git a/discovery-provider/default_config.ini b/discovery-provider/default_config.ini index f59e92b4387..5a2711646d5 100644 --- a/discovery-provider/default_config.ini +++ b/discovery-provider/default_config.ini @@ -1,6 +1,6 @@ [discprov] start_block = 0x0 -loglevel_flask = DEBUG +loglevel_flask = INFO ; do not configure the log level here as this gets overridden by celery lib during setup ; set log level via command line in docker yml files instead ; loglevel_celery = INFO diff --git a/discovery-provider/es-indexer/README.md b/discovery-provider/es-indexer/README.md index 44f64b9699b..4fb40cf356f 100644 --- a/discovery-provider/es-indexer/README.md +++ b/discovery-provider/es-indexer/README.md @@ -21,6 +21,22 @@ If you are adding a new denormalization (attaching data from a related model), t - For "catchup" mode this is the `checkpointSql` function. See UserIndexer or TrackIndexer for an example - For listen / notify mode, this is the handler code in `listen.ts` +When working on mapping changes, I might put code like this at top of `main.ts main()` function: + +```ts +await new Promise((r) => setTimeout(r, 100)) // don't ask... will fix haha +await indexer.playlists.createIndex({ drop: true }) +await indexer.playlists.catchup() +process.exit(0) +``` + +and then: + +``` +source .env +npm run dev +``` + ## How it works Program attempts to avoid any gaps by doing a "catchup" on boot... when complete it swithces to processing "batches" which are events collected from postgres LISTEN / NOTIFY. @@ -43,8 +59,7 @@ Check "elasticsearch" health info in `/health_check?verbose=true` endpoint. (instructions for sandbox3... subject to change): Use Kibana: -Uncomment the kibana container and restart discovery-provider. - +Uncomment the kibana container and restart discovery-provider. List indices: diff --git a/discovery-provider/es-indexer/package.json b/discovery-provider/es-indexer/package.json index e3d5e3dd6c7..19a729ada9d 100644 --- a/discovery-provider/es-indexer/package.json +++ b/discovery-provider/es-indexer/package.json @@ -6,7 +6,7 @@ "dev": "ts-node src/main.ts", "nuke": "ts-node nuke.ts", "start": "tsc && pm2-runtime build/src/main.js --restart-delay=3000", - "test": "echo \"Error: no test specified\" && exit 1" + "test": "tsc --noEmit" }, "keywords": [], "author": "", diff --git a/discovery-provider/es-indexer/src/conn.ts b/discovery-provider/es-indexer/src/conn.ts index 5659758207c..f347f4ca25a 100644 --- a/discovery-provider/es-indexer/src/conn.ts +++ b/discovery-provider/es-indexer/src/conn.ts @@ -43,6 +43,14 @@ export async function waitForHealthyCluster() { ) } +export async function ensureSaneCluterSettings() { + return dialEs().cluster.putSettings({ + persistent: { + 'action.auto_create_index': false, + }, + }) +} + /** * Gets the max(blocknumber) from elasticsearch indexes * Used for incremental indexing to understand "where we were" so we can load new data from postgres diff --git a/discovery-provider/es-indexer/src/indexNames.ts b/discovery-provider/es-indexer/src/indexNames.ts index 697ea9c2e5e..eaf16e1f735 100644 --- a/discovery-provider/es-indexer/src/indexNames.ts +++ b/discovery-provider/es-indexer/src/indexNames.ts @@ -1,7 +1,7 @@ export const indexNames = { - playlists: 'playlists2', + playlists: 'playlists6', reposts: 'reposts2', saves: 'saves2', - tracks: 'tracks2', - users: 'users2', + tracks: 'tracks6', + users: 'users6', } diff --git a/discovery-provider/es-indexer/src/indexers/BaseIndexer.ts b/discovery-provider/es-indexer/src/indexers/BaseIndexer.ts index c08ce528326..faee4d1a79d 100644 --- a/discovery-provider/es-indexer/src/indexers/BaseIndexer.ts +++ b/discovery-provider/es-indexer/src/indexers/BaseIndexer.ts @@ -44,6 +44,12 @@ export abstract class BaseIndexer { } } + async refreshIndex() { + const { es, logger, indexName } = this + logger.info('refreshing index: ' + indexName) + await es.indices.refresh({ index: indexName }) + } + async cutoverAlias() { const { es, logger, indexName, tableName } = this @@ -173,7 +179,7 @@ export abstract class BaseIndexer { ]) } - async withBatch(rows: Array) {} + async withBatch(rows: Array) { } - withRow(row: RowType) {} + withRow(row: RowType) { } } diff --git a/discovery-provider/es-indexer/src/indexers/PlaylistIndexer.ts b/discovery-provider/es-indexer/src/indexers/PlaylistIndexer.ts index 6244a1eaaf0..d8d03ae8017 100644 --- a/discovery-provider/es-indexer/src/indexers/PlaylistIndexer.ts +++ b/discovery-provider/es-indexer/src/indexers/PlaylistIndexer.ts @@ -1,10 +1,15 @@ import { IndicesCreateRequest } from '@elastic/elasticsearch/lib/api/types' -import { keyBy } from 'lodash' +import { keyBy, merge } from 'lodash' import { dialPg } from '../conn' import { indexNames } from '../indexNames' import { BlocknumberCheckpoint } from '../types/blocknumber_checkpoint' import { PlaylistDoc } from '../types/docs' import { BaseIndexer } from './BaseIndexer' +import { + sharedIndexSettings, + standardSuggest, + standardText, +} from './sharedIndexSettings' export class PlaylistIndexer extends BaseIndexer { tableName = 'playlists' @@ -13,13 +18,13 @@ export class PlaylistIndexer extends BaseIndexer { mapping: IndicesCreateRequest = { index: indexNames.playlists, - settings: { + settings: merge(sharedIndexSettings, { index: { number_of_shards: 1, number_of_replicas: 0, refresh_interval: '5s', }, - }, + }), mappings: { dynamic: false, properties: { @@ -30,9 +35,37 @@ export class PlaylistIndexer extends BaseIndexer { is_album: { type: 'boolean' }, is_private: { type: 'boolean' }, is_delete: { type: 'boolean' }, - playlist_name: { type: 'text' }, + suggest: standardSuggest, + playlist_name: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, 'playlist_contents.track_ids.track': { type: 'keyword' }, + user: { + properties: { + handle: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, + name: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, + location: { type: 'keyword' }, + follower_count: { type: 'integer' }, + is_verified: { type: 'boolean' }, + created_at: { type: 'date' }, + updated_at: { type: 'date' }, + }, + }, + // saves saved_by: { type: 'keyword' }, save_count: { type: 'integer' }, @@ -58,7 +91,17 @@ export class PlaylistIndexer extends BaseIndexer { return ` -- etl playlists select - *, + playlists.*, + + json_build_object( + 'handle', users.handle, + 'name', users.name, + 'location', users.location, + 'follower_count', follower_count, + 'is_verified', users.is_verified, + 'created_at', users.created_at, + 'updated_at', users.updated_at + ) as user, array( select user_id @@ -83,7 +126,11 @@ export class PlaylistIndexer extends BaseIndexer { ) as saved_by from playlists - where is_current = true + join users on playlist_owner_id = user_id + left join aggregate_user on users.user_id = aggregate_user.user_id + where + playlists.is_current + AND users.is_current ` } @@ -132,6 +179,9 @@ export class PlaylistIndexer extends BaseIndexer { } withRow(row: PlaylistDoc) { + row.suggest = [row.playlist_name, row.user.handle, row.user.name] + .filter((x) => x) + .join(' ') row.repost_count = row.reposted_by.length row.save_count = row.saved_by.length } diff --git a/discovery-provider/es-indexer/src/indexers/TrackIndexer.ts b/discovery-provider/es-indexer/src/indexers/TrackIndexer.ts index f05bb78c0de..0e9eb7ec2f2 100644 --- a/discovery-provider/es-indexer/src/indexers/TrackIndexer.ts +++ b/discovery-provider/es-indexer/src/indexers/TrackIndexer.ts @@ -1,8 +1,14 @@ import { IndicesCreateRequest } from '@elastic/elasticsearch/lib/api/types' +import { merge } from 'lodash' import { indexNames } from '../indexNames' import { BlocknumberCheckpoint } from '../types/blocknumber_checkpoint' import { TrackDoc } from '../types/docs' import { BaseIndexer } from './BaseIndexer' +import { + sharedIndexSettings, + standardSuggest, + standardText, +} from './sharedIndexSettings' export class TrackIndexer extends BaseIndexer { tableName = 'tracks' @@ -12,44 +18,55 @@ export class TrackIndexer extends BaseIndexer { mapping: IndicesCreateRequest = { index: indexNames.tracks, - settings: { + settings: merge(sharedIndexSettings, { + analysis: { + tokenizer: { + comma_tokenizer: { + // @ts-ignore - es client typings lagging + type: 'simple_pattern_split', + pattern: ',', + }, + }, + analyzer: { + // @ts-ignore - es client typings lagging + comma_analyzer: { + tokenizer: 'comma_tokenizer', + filter: ['lowercase', 'asciifolding'], + }, + }, + }, index: { number_of_shards: 1, number_of_replicas: 0, refresh_interval: '5s', - - analysis: { - normalizer: { - lower_ascii: { - type: 'custom', - char_filter: [], - filter: ['lowercase', 'asciifolding'], - }, - }, - }, }, - }, + }), mappings: { dynamic: false, properties: { blocknumber: { type: 'integer' }, owner_id: { type: 'keyword' }, created_at: { type: 'date' }, + updated_at: { type: 'date' }, permalink: { type: 'keyword' }, route_id: { type: 'keyword' }, routes: { type: 'keyword' }, - title: { type: 'text' }, - description: { type: 'text' }, + title: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, length: { type: 'integer' }, tags: { - type: 'keyword', - normalizer: 'lower_ascii', + type: 'text', + analyzer: 'comma_analyzer', }, genre: { type: 'keyword' }, mood: { type: 'keyword' }, is_delete: { type: 'boolean' }, is_unlisted: { type: 'boolean' }, - is_downloadable: { type: 'boolean' }, + downloadable: { type: 'boolean' }, // saves saved_by: { type: 'keyword' }, @@ -58,13 +75,27 @@ export class TrackIndexer extends BaseIndexer { reposted_by: { type: 'keyword' }, repost_count: { type: 'integer' }, - artist: { + suggest: standardSuggest, + + user: { properties: { - handle: { type: 'keyword' }, + handle: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, + name: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, location: { type: 'keyword' }, - name: { type: 'text' }, // should it be keyword with a `searchable` treatment? follower_count: { type: 'integer' }, is_verified: { type: 'boolean' }, + created_at: { type: 'date' }, + updated_at: { type: 'date' }, }, }, @@ -85,19 +116,19 @@ export class TrackIndexer extends BaseIndexer { -- etl tracks select tracks.*, - (tracks.download->>'is_downloadable')::boolean as is_downloadable, + (tracks.download->>'is_downloadable')::boolean as downloadable, coalesce(aggregate_plays.count, 0) as play_count, json_build_object( 'handle', users.handle, 'name', users.name, 'location', users.location, - 'follower_count', coalesce(follower_count, 0), - 'is_verified', is_verified, - 'balance', balance, - 'associated_wallets_balance', associated_wallets_balance - ) as artist, - + 'follower_count', follower_count, + 'is_verified', users.is_verified, + 'created_at', users.created_at, + 'updated_at', users.updated_at + ) as user, + array( select slug from track_routes r @@ -131,7 +162,6 @@ export class TrackIndexer extends BaseIndexer { from tracks join users on owner_id = user_id left join aggregate_user on users.user_id = aggregate_user.user_id - left join user_balances on users.user_id = user_balances.user_id left join aggregate_plays on tracks.track_id = aggregate_plays.play_item_id WHERE tracks.is_current = true AND users.is_current = true @@ -153,16 +183,18 @@ export class TrackIndexer extends BaseIndexer { } withRow(row: TrackDoc) { - row.tags = row.tags ? row.tags.split(',') : [] + row.suggest = [row.title, row.user.handle, row.user.name] + .filter((x) => x) + .join(' ') + row.tags = row.tags row.repost_count = row.reposted_by.length - row.save_count = row.saved_by.length - - row.length = Math.ceil( + row.favorite_count = row.saved_by.length + row.duration = Math.ceil( row.track_segments.reduce((acc, s) => acc + parseFloat(s.duration), 0) ) // permalink const currentRoute = row.routes[row.routes.length - 1] - row.permalink = `/${row.artist.handle}/${currentRoute}` + row.permalink = `/${row.user.handle}/${currentRoute}` } } diff --git a/discovery-provider/es-indexer/src/indexers/UserIndexer.ts b/discovery-provider/es-indexer/src/indexers/UserIndexer.ts index ca8060ca8c8..d4024edfce5 100644 --- a/discovery-provider/es-indexer/src/indexers/UserIndexer.ts +++ b/discovery-provider/es-indexer/src/indexers/UserIndexer.ts @@ -1,10 +1,15 @@ import { IndicesCreateRequest } from '@elastic/elasticsearch/lib/api/types' -import { groupBy, keyBy } from 'lodash' +import { groupBy, keyBy, merge } from 'lodash' import { dialPg } from '../conn' import { indexNames } from '../indexNames' import { BlocknumberCheckpoint } from '../types/blocknumber_checkpoint' import { UserDoc } from '../types/docs' import { BaseIndexer } from './BaseIndexer' +import { + sharedIndexSettings, + standardSuggest, + standardText, +} from './sharedIndexSettings' export class UserIndexer extends BaseIndexer { tableName = 'users' @@ -13,25 +18,35 @@ export class UserIndexer extends BaseIndexer { mapping: IndicesCreateRequest = { index: indexNames.users, - settings: { + settings: merge(sharedIndexSettings, { index: { number_of_shards: 1, number_of_replicas: 0, refresh_interval: '5s', }, - }, + }), mappings: { dynamic: false, properties: { blocknumber: { type: 'integer' }, created_at: { type: 'date' }, wallet: { type: 'keyword' }, - handle: { type: 'keyword' }, // should have a "searchable" treatment - name: { type: 'text' }, // default should be keyword, with a searchable treatment + suggest: standardSuggest, + handle: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, + name: { + type: 'keyword', + fields: { + searchable: standardText, + }, + }, is_creator: { type: 'boolean' }, is_verified: { type: 'boolean' }, is_deactivated: { type: 'boolean' }, - bio: { type: 'text' }, location: { type: 'keyword' }, // following @@ -58,21 +73,26 @@ export class UserIndexer extends BaseIndexer { -- etl users select users.*, - user_balances.balance, - user_balances.associated_wallets_balance, - user_balances.waudio, + coalesce(user_balances.balance, '0') as balance, + coalesce(user_balances.associated_wallets_balance, '0') as associated_wallets_balance, + coalesce(user_balances.waudio, '0') as waudio, + coalesce(user_balances.waudio, '0') as waudio_balance, -- do we need both waudio and waudio_balance user_balances.associated_sol_wallets_balance, + user_bank_accounts.bank_account as spl_wallet, coalesce(track_count, 0) as track_count, coalesce(playlist_count, 0) as playlist_count, coalesce(album_count, 0) as album_count, coalesce(follower_count, 0) as follower_count, coalesce(following_count, 0) as following_count, coalesce(repost_count, 0) as repost_count, - coalesce(track_save_count, 0) as track_save_count + coalesce(track_save_count, 0) as track_save_count, + coalesce(supporter_count, 0) as supporter_count, + coalesce(supporting_count, 0) as supporting_count from users left join aggregate_user on users.user_id = aggregate_user.user_id left join user_balances on users.user_id = user_balances.user_id + left join user_bank_accounts on users.wallet = user_bank_accounts.ethereum_address where is_current = true ` @@ -107,6 +127,7 @@ export class UserIndexer extends BaseIndexer { } withRow(row: UserDoc) { + row.suggest = [row.handle, row.name].filter((x) => x).join(' ') row.following_count = row.following_ids.length } diff --git a/discovery-provider/es-indexer/src/indexers/sharedIndexSettings.ts b/discovery-provider/es-indexer/src/indexers/sharedIndexSettings.ts new file mode 100644 index 00000000000..d10d32bc03b --- /dev/null +++ b/discovery-provider/es-indexer/src/indexers/sharedIndexSettings.ts @@ -0,0 +1,26 @@ +import { + IndicesIndexSettings, + MappingProperty, +} from '@elastic/elasticsearch/lib/api/types' + +export const sharedIndexSettings: IndicesIndexSettings = { + analysis: { + analyzer: { + standard_asciifolding: { + type: 'custom', + tokenizer: 'standard', + filter: ['asciifolding', 'lowercase'], + }, + }, + }, +} + +export const standardSuggest: MappingProperty = { + type: 'search_as_you_type', + analyzer: 'standard_asciifolding', +} + +export const standardText: MappingProperty = { + type: 'text', + analyzer: 'standard_asciifolding', +} diff --git a/discovery-provider/es-indexer/src/main.ts b/discovery-provider/es-indexer/src/main.ts index 9d28e66590b..81670c28ca8 100644 --- a/discovery-provider/es-indexer/src/main.ts +++ b/discovery-provider/es-indexer/src/main.ts @@ -7,7 +7,11 @@ import { UserIndexer } from './indexers/UserIndexer' import { PendingUpdates, startListener, takePending } from './listener' import { logger } from './logger' import { setupTriggers } from './setup' -import { getBlocknumberCheckpoints, waitForHealthyCluster } from './conn' +import { + ensureSaneCluterSettings, + getBlocknumberCheckpoints, + waitForHealthyCluster, +} from './conn' export const indexer = { playlists: new PlaylistIndexer(), @@ -31,6 +35,7 @@ async function processPending(pending: PendingUpdates) { async function start() { const health = await waitForHealthyCluster() logger.info(health, 'booting') + await ensureSaneCluterSettings() // create indexes const indexers = Object.values(indexer) @@ -45,6 +50,10 @@ async function start() { logger.info(checkpoints, 'catchup from blocknumbers') await Promise.all(Object.values(indexer).map((i) => i.catchup(checkpoints))) + // refresh indexes before cutting over + logger.info(checkpoints, 'refreshing indexes') + await Promise.all(Object.values(indexer).map((i) => i.refreshIndex())) + // cutover aliases logger.info('catchup done... cutting over aliases') await Promise.all(indexers.map((ix) => ix.cutoverAlias())) diff --git a/discovery-provider/es-indexer/src/types/db.ts b/discovery-provider/es-indexer/src/types/db.ts index 7a9cf5ff2d4..1b87c690557 100644 --- a/discovery-provider/es-indexer/src/types/db.ts +++ b/discovery-provider/es-indexer/src/types/db.ts @@ -400,7 +400,7 @@ export interface TrackRow { is_delete: boolean owner_id: number title?: string | null - length?: number | null + duration?: number | null cover_art?: string | null tags?: string | null genre?: string | null diff --git a/discovery-provider/es-indexer/src/types/docs.ts b/discovery-provider/es-indexer/src/types/docs.ts index e57ed68b70b..1d5c4222c9e 100644 --- a/discovery-provider/es-indexer/src/types/docs.ts +++ b/discovery-provider/es-indexer/src/types/docs.ts @@ -7,16 +7,28 @@ import { UserRow, } from './db' +export type EntityUserDoc = { + handle: string + name: string + location: string + follower_count: number + created_at: Date + updated_at: Date +} + export type PlaylistDoc = PlaylistRow & { + suggest: string tracks: TrackDoc[] save_count: number saved_by: number[] repost_count: number reposted_by: number[] total_play_count: number + user: EntityUserDoc } export type UserDoc = UserRow & { + suggest: string tracks: TrackRow[] track_count: number following_ids: number[] @@ -24,21 +36,17 @@ export type UserDoc = UserRow & { } export type TrackDoc = TrackRow & { + suggest: string reposted_by: number[] saved_by: number[] routes: string[] permalink: string - tags: any // todo: is it a string or a string[]? + tags: string // comma separated repost_count: number - save_count: number + favorite_count: number play_count: any // todo: is it a string or number? pg returns string - - artist: { - handle: string - name: string - location: string - follower_count: number - } + downloadable: boolean + user: EntityUserDoc } export type RepostDoc = RepostRow & { diff --git a/discovery-provider/flake8_plugins/flask_decorator_plugin/visitor_helpers.py b/discovery-provider/flake8_plugins/flask_decorator_plugin/visitor_helpers.py index 45f713e4a85..34d9cbbd31b 100644 --- a/discovery-provider/flake8_plugins/flask_decorator_plugin/visitor_helpers.py +++ b/discovery-provider/flake8_plugins/flask_decorator_plugin/visitor_helpers.py @@ -23,7 +23,7 @@ def is_route_decorator(decorator: ast.Call): return isinstance(decorator.func, ast.Attribute) and decorator.func.attr == "route" -route_parser_regex = re.compile(".*<[^:]*:?(.*)>.*") +route_parser_regex = re.compile("<[^:]*:?([^>]*)>") def is_route_decorator_documented(route_decorator: ast.Call): diff --git a/discovery-provider/integration_tests/conftest.py b/discovery-provider/integration_tests/conftest.py index 57f9001d2c4..879faec680f 100644 --- a/discovery-provider/integration_tests/conftest.py +++ b/discovery-provider/integration_tests/conftest.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import os +from contextlib import contextmanager import alembic import alembic.config @@ -16,7 +17,7 @@ from src.utils.redis_connection import get_redis from web3 import HTTPProvider, Web3 -DB_URL = "postgresql+psycopg2://postgres:postgres@localhost/test_audius_discovery" +DB_URL = "postgresql+psycopg2://postgres:postgres@localhost:5432/test_audius_discovery" TEST_BROKER_URL = "redis://localhost:5379/0" ENGINE_ARGS_LITERAL = '{ \ "pool_size": 10, \ @@ -36,8 +37,8 @@ } -@pytest.fixture -def app(): +@contextmanager +def app_impl(): # Drop DB, ensuring migration performed at start if database_exists(DB_URL): drop_database(DB_URL) @@ -65,6 +66,18 @@ def app(): yield discovery_provider_app +@pytest.fixture +def app(): + with app_impl() as app: + yield app + + +@pytest.fixture(scope="module") +def app_module(): + with app_impl() as app: + yield app + + @pytest.fixture(scope="session") def celery_config(): return { diff --git a/discovery-provider/integration_tests/queries/test_search.py b/discovery-provider/integration_tests/queries/test_search.py index 500977cf86d..ef774844ab3 100644 --- a/discovery-provider/integration_tests/queries/test_search.py +++ b/discovery-provider/integration_tests/queries/test_search.py @@ -1,6 +1,10 @@ +import os +import subprocess from datetime import datetime +import pytest from src.models import Block, Follow, Playlist, Save, SaveType, Track, User, UserBalance +from src.queries.search_es import search_es_full from src.queries.search_queries import ( playlist_search_query, track_search_query, @@ -11,7 +15,11 @@ from src.utils.db_session import get_db -def setup_search(db): +@pytest.fixture(autouse=True, scope="module") +def setup_search(app_module): + with app_module.app_context(): + db = get_db() + # Import app so that it'll run migrations against the db now = datetime.now() blocks = [ @@ -143,7 +151,7 @@ def setup_search(db): is_album=False, is_private=False, playlist_name="playlist 1", - playlist_contents={"track_ids": [{"track": 1}]}, + playlist_contents={"track_ids": [{"track": 1, "time": 1}]}, is_current=True, is_delete=False, updated_at=now, @@ -157,7 +165,7 @@ def setup_search(db): is_album=True, is_private=False, playlist_name="album 1", - playlist_contents={"track_ids": [{"track": 2}]}, + playlist_contents={"track_ids": [{"track": 2, "time": 2}]}, is_current=True, is_delete=False, updated_at=now, @@ -243,156 +251,373 @@ def setup_search(db): session.execute("REFRESH MATERIALIZED VIEW playlist_lexeme_dict;") session.execute("REFRESH MATERIALIZED VIEW album_lexeme_dict;") + try: + output = subprocess.run( + ["npm", "run", "dev"], + env=os.environ, + capture_output=True, + text=True, + cwd="es-indexer", + timeout=5, + ) + raise Exception( + f"Elasticsearch indexing stopped: {output.stderr}. With env: {os.environ}" + ) + except subprocess.TimeoutExpired as timeout: + if "catchup done" not in timeout.output.decode("utf-8"): + raise Exception("Elasticsearch failed to index") + -def test_get_tracks_external(app): +def test_get_tracks_external(app_module): """Tests we get all tracks, including downloaded""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = track_search_query(session, "the track", 10, 0, False, None, False) assert len(res["all"]) == 2 assert len(res["saved"]) == 0 + search_args = { + "is_auto_complete": False, + "kind": "tracks", + "query": "the track", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["tracks"]) == 2 + -def test_get_autocomplete_tracks(app): +def test_get_autocomplete_tracks(app_module): """Tests we get all tracks with autocomplete""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = track_search_query(session, "the track", 10, 0, True, None, False) assert len(res["all"]) == 2 assert len(res["saved"]) == 0 + search_args = { + "is_auto_complete": True, + "kind": "tracks", + "query": "the track", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["tracks"]) == 2 -def test_get_tracks_internal(app): + +def test_get_tracks_internal(app_module): """Tests we get all tracks when a user is logged in""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = track_search_query(session, "the track", 10, 0, False, 1, False) assert len(res["all"]) == 2 assert len(res["saved"]) == 1 + search_args = { + "is_auto_complete": False, + "kind": "tracks", + "query": "the track", + "current_user_id": 1, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["tracks"]) == 2 + assert len(es_res["saved_tracks"]) == 1 -def test_get_downloadable_tracks(app): + +def test_get_downloadable_tracks(app_module): """Tests we get only downloadable results""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = track_search_query(session, "the track", 10, 0, False, None, True) assert len(res["all"]) == 1 assert len(res["saved"]) == 0 + search_args = { + "is_auto_complete": False, + "kind": "tracks", + "query": "the track", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": True, + } + es_res = search_es_full(search_args) -def test_get_external_users(app): + assert len(es_res["tracks"]) == 1 + assert len(es_res["saved_tracks"]) == 0 + + +def test_get_external_users(app_module): """Tests we get all users""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = user_search_query(session, "user", 10, 0, False, None) assert len(res["all"]) == 2 assert len(res["followed"]) == 0 + search_args = { + "is_auto_complete": False, + "kind": "users", + "query": "user", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["users"]) == 2 + assert len(es_res["followed_users"]) == 0 + -def test_get_autocomplete_users(app): +def test_get_autocomplete_users(app_module): """Tests we get all users with autocomplete""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = user_search_query(session, "user", 10, 0, True, None) assert len(res["all"]) == 2 assert len(res["followed"]) == 0 + search_args = { + "is_auto_complete": True, + "kind": "users", + "query": "user", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["users"]) == 2 + assert len(es_res["followed_users"]) == 0 + -def test_get_internal_users(app): +def test_get_internal_users(app_module): """Tests we get all users when a user is logged in""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = user_search_query(session, "user", 10, 0, False, 2) assert len(res["all"]) == 2 assert len(res["followed"]) == 1 + search_args = { + "is_auto_complete": False, + "kind": "users", + "query": "user", + "current_user_id": 2, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + + assert len(es_res["users"]) == 2 + assert len(es_res["followed_users"]) == 1 -def test_get_internal_users_no_following(app): + +def test_get_internal_users_no_following(app_module): """Tests we get all users for a user that doesn't follow anyone""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = user_search_query(session, "user", 10, 0, False, 1) assert len(res["all"]) == 2 assert len(res["followed"]) == 0 - -def test_get_external_playlists(app): + search_args = { + "is_auto_complete": False, + "kind": "users", + "query": "user", + "current_user_id": 1, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["users"]) == 2 + assert len(es_res["followed_users"]) == 0 + + +def test_get_external_playlists(app_module): """Tests we get all playlists""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "playlist", 10, 0, False, False, None) assert len(res["all"]) == 1 assert len(res["saved"]) == 0 - -def test_get_autocomplete_playlists(app): + search_args = { + "is_auto_complete": False, + "kind": "playlists", + "query": "playlist", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["playlists"]) == 1 + assert len(es_res["saved_playlists"]) == 0 + + +def test_get_autocomplete_playlists(app_module): """Tests we get all tracks with autocomplete""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "playlist", 10, 0, False, True, None) assert len(res["all"]) == 1 assert len(res["saved"]) == 0 - -def test_get_internal_playlists(app): + search_args = { + "is_auto_complete": True, + "kind": "playlists", + "query": "playlist", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["playlists"]) == 1 + assert len(es_res["saved_playlists"]) == 0 + + +def test_get_internal_playlists(app_module): """Tests we get playlists when a user is logged in""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "playlist", 10, 0, False, False, 1) assert len(res["all"]) == 1 assert len(res["saved"]) == 1 - -def test_get_external_albums(app): + search_args = { + "is_auto_complete": False, + "kind": "playlists", + "query": "playlist", + "current_user_id": 1, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["playlists"]) == 1 + assert len(es_res["saved_playlists"]) == 1 + + +def test_get_external_albums(app_module): """Tests we get all albums""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "album", 10, 0, True, False, None) assert len(res["all"]) == 1 assert len(res["saved"]) == 0 - -def test_get_autocomplete_albums(app): + search_args = { + "is_auto_complete": False, + "kind": "albums", + "query": "album", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["albums"]) == 1 + assert len(es_res["saved_albums"]) == 0 + + +def test_get_autocomplete_albums(app_module): """Tests we get all albums with autocomplete""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "album", 10, 0, True, True, None) assert len(res["all"]) == 1 assert len(res["saved"]) == 0 - -def test_get_internal_albums(app): + search_args = { + "is_auto_complete": True, + "kind": "albums", + "query": "album", + "current_user_id": None, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["albums"]) == 1 + assert len(es_res["saved_albums"]) == 0 + + +def test_get_internal_albums(app_module): """Tests we get albums when a user is logged in""" - with app.app_context(): + with app_module.app_context(): db = get_db() - setup_search(db) + with db.scoped_session() as session: res = playlist_search_query(session, "album", 10, 0, True, False, 1) assert len(res["all"]) == 1 assert len(res["saved"]) == 1 + + search_args = { + "is_auto_complete": True, + "kind": "albums", + "query": "album", + "current_user_id": 1, + "with_users": True, + "limit": 10, + "offset": 0, + "only_downloadable": False, + } + es_res = search_es_full(search_args) + assert len(es_res["albums"]) == 1 + assert len(es_res["saved_albums"]) == 1 diff --git a/discovery-provider/integration_tests/tasks/test_es_indexer.py b/discovery-provider/integration_tests/tasks/test_es_indexer.py index a623284f645..89f90fe81d4 100644 --- a/discovery-provider/integration_tests/tasks/test_es_indexer.py +++ b/discovery-provider/integration_tests/tasks/test_es_indexer.py @@ -5,16 +5,12 @@ import pytest from elasticsearch import Elasticsearch -from integration_tests.conftest import DB_URL from integration_tests.utils import populate_mock_db from src.utils.db_session import get_db logger = logging.getLogger(__name__) -os.environ["audius_db_url"] = DB_URL -os.environ["audius_elasticsearch_url"] = "http://localhost:9200" -os.environ["audius_elasticsearch_run_indexer"] = "true" -esclient = Elasticsearch("http://localhost:9200") +esclient = Elasticsearch(os.environ["audius_elasticsearch_url"]) basic_entities = { "aggregate_plays": [{"play_item_id": 1, "count": 1}], diff --git a/discovery-provider/scripts/search_quality.py b/discovery-provider/scripts/search_quality.py new file mode 100644 index 00000000000..abf1b353401 --- /dev/null +++ b/discovery-provider/scripts/search_quality.py @@ -0,0 +1,174 @@ +from src.queries.search_es import search_es_full + + +def test_search(args): + print("\n\n==========", args) + found = search_es_full(args) + search_type = args.get("kind", "all") + + def print_entity(title, entities): + if not entities: + return + print(f"\n[ {title} ]") + for entity in entities: + print( + " ", + [ + entity["user"]["handle"], + entity["user"]["name"], + entity.get("title") or entity.get("playlist_name"), + f"{entity['repost_count']} reposts", + f"{entity['user']['follower_count']} followers", + f"{entity.get('_score')} score", + ], + ) + + def print_users(title, users): + if not users: + return + print(f"\n[ {title} ]") + for user in users: + print( + " ", + [ + user["handle"], + user["name"], + f"{user.get('follower_count')} followers", + f"{user.get('is_verified')} verified", + f"{user.get('_score')} score", + ], + ) + + if search_type == "tracks" or search_type == "all": + print_entity("tracks", found["tracks"]) + print_entity("saved tracks", found["saved_tracks"]) + if search_type == "users" or search_type == "all": + print_users("users", found["users"]) + print_users("followed_users", found["followed_users"]) + if search_type == "playlists" or search_type == "all": + print_entity("playlists", found["playlists"]) + print_entity("saved_playlists", found["saved_playlists"]) + if search_type == "albums" or search_type == "all": + print_entity("albums", found["albums"]) + print_entity("saved_albums", found["saved_albums"]) + + +test_search({"query": "space fm lido", "limit": 3, "kind": "tracks"}) + +test_search({"query": "issac solo", "limit": 3, "kind": "users"}) # misspell + + +test_search( + { + "query": "the cycle of change", + "limit": 4, + "is_auto_complete": True, + } +) + + +test_search( + { + "query": "isaac pho", + "limit": 4, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "RAC wat", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": True, + } +) +test_search( + { + "query": "RAC water", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": False, + } +) + +test_search( + { + "query": "deadmau", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": False, + } +) + +# should have disclosure at the top +test_search( + { + "query": "waterfal", + "limit": 10, + "current_user_id": 1, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "closer 2 u ray", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": True, + } +) +test_search( + { + "query": "raymont", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "low", + "limit": 4, + "current_user_id": 14, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "stereosteve guitar", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "skrillex", + "limit": 4, + "current_user_id": 1, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "camo", + "limit": 4, + "is_auto_complete": True, + } +) + +test_search( + { + "query": "zouai", + "limit": 4, + "is_auto_complete": True, + } +) + +print("\n\n") diff --git a/discovery-provider/scripts/start.sh b/discovery-provider/scripts/start.sh index 48220fdfddd..78051cd6d1d 100755 --- a/discovery-provider/scripts/start.sh +++ b/discovery-provider/scripts/start.sh @@ -1,12 +1,18 @@ #!/bin/bash set -e -mkdir -p /var/log -mkdir -p /var/spool/rsyslog -mkdir -p /etc/rsyslog.d - -if [[ -z "$audius_loggly_disable" ]]; then - if [[ -n "$audius_loggly_token" ]]; then +# enable rsyslog if not explicitly disabled by audius-docker-compose +: "${audius_enable_rsyslog:=true}" + +# $audius_enable_rsyslog should be true +if $audius_enable_rsyslog; then + mkdir -p /var/log + mkdir -p /var/spool/rsyslog + mkdir -p /etc/rsyslog.d + + # $logglyDisable should be empty/null + # $logglyToken should be a nonzero length string + if [[ -z "$audius_loggly_disable" && -n "$audius_loggly_token" ]]; then # use regex to extract domain in url (source: https://stackoverflow.com/a/2506635/8674706) audius_discprov_hostname=$(echo $audius_discprov_url | sed -e 's/[^/]*\/\/\([^@]*@\)\?\([^:/]*\).*/\2/') @@ -34,15 +40,14 @@ if [[ -z "$audius_loggly_disable" ]]; then \$ActionResumeRetryCount -1 # infinite retries if host is down template(name="LogglyFormat" type="string" - string="<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %msgid% [$audius_loggly_token@41058 $audius_loggly_tags] %msg%\n") +string="<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %msgid% [$audius_loggly_token@41058 $audius_loggly_tags] %msg%\n") # Send messages to Loggly over TCP using the template. action(type="omfwd" protocol="tcp" target="logs-01.loggly.com" port="514" template="LogglyFormat") EOF fi -fi -cat >/etc/rsyslog.d/20-file.conf </etc/rsyslog.d/20-file.conf < tail -f /var/log/discprov-server.log - # docker exec -it tail -f /var/log/discprov-worker.log - # docker exec -it tail -f /var/log/discprov-beat.log - ./scripts/dev-server.sh 2>&1 | tee >(logger -t server) & + audius_service=server ./scripts/dev-server.sh 2>&1 | tee >(logger -t server) & if [[ "$audius_no_workers" != "true" ]] && [[ "$audius_no_workers" != "1" ]]; then - watchmedo auto-restart --directory ./ --pattern=*.py --recursive -- celery -A src.worker.celery worker --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t worker) & - celery -A src.worker.celery beat --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t beat) & + audius_service=worker watchmedo auto-restart --directory ./ --pattern=*.py --recursive -- celery -A src.worker.celery worker --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t worker) & + audius_service=beat celery -A src.worker.celery beat --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t beat) & fi else - ./scripts/prod-server.sh 2>&1 | tee >(logger -t server) & + audius_service=server ./scripts/prod-server.sh 2>&1 | tee >(logger -t server) & if [[ "$audius_no_workers" != "true" ]] && [[ "$audius_no_workers" != "1" ]]; then - celery -A src.worker.celery worker --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t worker) & - celery -A src.worker.celery beat --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t beat) & + audius_service=worker celery -A src.worker.celery worker --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t worker) & + audius_service=beat celery -A src.worker.celery beat --loglevel $audius_discprov_loglevel 2>&1 | tee >(logger -t beat) & fi fi diff --git a/discovery-provider/src/api/v1/helpers.py b/discovery-provider/src/api/v1/helpers.py index ec006f1df43..d3e520fcaf7 100644 --- a/discovery-provider/src/api/v1/helpers.py +++ b/discovery-provider/src/api/v1/helpers.py @@ -98,6 +98,27 @@ def add_user_artwork(user): return user +# Helpers +def extend_search(resp): + if "users" in resp: + resp["users"] = list(map(extend_user, resp["users"])) + if "followed_users" in resp: + resp["followed_users"] = list(map(extend_user, resp["followed_users"])) + if "tracks" in resp: + resp["tracks"] = list(map(extend_track, resp["tracks"])) + if "saved_tracks" in resp: + resp["saved_tracks"] = list(map(extend_track, resp["saved_tracks"])) + if "playlists" in resp: + resp["playlists"] = list(map(extend_playlist, resp["playlists"])) + if "saved_playlists" in resp: + resp["saved_playlists"] = list(map(extend_playlist, resp["saved_playlists"])) + if "albums" in resp: + resp["albums"] = list(map(extend_playlist, resp["albums"])) + if "saved_albums" in resp: + resp["saved_albums"] = list(map(extend_playlist, resp["saved_albums"])) + return resp + + def extend_user(user, current_user_id=None): user_id = encode_int_id(user["user_id"]) user["id"] = user_id diff --git a/discovery-provider/src/api/v1/playlists.py b/discovery-provider/src/api/v1/playlists.py index 4585155da06..172c4591524 100644 --- a/discovery-provider/src/api/v1/playlists.py +++ b/discovery-provider/src/api/v1/playlists.py @@ -185,8 +185,7 @@ def get(self): "offset": 0, } response = search(search_args) - playlists = list(map(extend_playlist, response["playlists"])) - return success_response(playlists) + return success_response(response) top_parser = pagination_parser.copy() diff --git a/discovery-provider/src/api/v1/reactions.py b/discovery-provider/src/api/v1/reactions.py index f90e410cdf5..a0b11426d2c 100644 --- a/discovery-provider/src/api/v1/reactions.py +++ b/discovery-provider/src/api/v1/reactions.py @@ -18,7 +18,7 @@ "type", required=False, description="The type of reactions for which to query." ) get_reactions_parser.add_argument( - "tx_signatures", + "reacted_to_ids", required=True, action="split", description="The `reacted_to` transaction id(s) of the reactions in question.", @@ -34,7 +34,7 @@ class BulkReactions(Resource): @record_metrics @ns.doc( id="Bulk get Reactions", - description="Gets reactions by transaction_id and type", + description="Gets reactions by reacted_to_id and type", responses={200: "Success", 400: "Bad request", 500: "Server error"}, ) @ns.expect(get_reactions_parser) @@ -42,9 +42,9 @@ class BulkReactions(Resource): @cache(ttl_sec=5) def get(self): args = get_reactions_parser.parse_args() - tx_ids, type = args.get("tx_signatures"), args.get("type") + reacted_to_ids, type = args.get("reacted_to_ids"), args.get("type") db = get_db_read_replica() with db.scoped_session() as session: - reactions = get_reactions(session, tx_ids, type) + reactions = get_reactions(session, reacted_to_ids, type) reactions = list(map(extend_reaction, reactions)) return success_response(reactions) diff --git a/discovery-provider/src/api/v1/search.py b/discovery-provider/src/api/v1/search.py index 5245e2a5ca5..22d4f273c05 100644 --- a/discovery-provider/src/api/v1/search.py +++ b/discovery-provider/src/api/v1/search.py @@ -1,10 +1,7 @@ -import logging # pylint: disable=C0302 +import logging from flask_restx import Namespace, Resource, fields from src.api.v1.helpers import ( - extend_playlist, - extend_track, - extend_user, format_limit, format_offset, full_search_parser, @@ -23,27 +20,6 @@ full_ns = Namespace("search", description="Full search operations") -# Helpers -def extend_search(resp): - if "users" in resp: - resp["users"] = list(map(extend_user, resp["users"])) - if "followed_users" in resp: - resp["followed_users"] = list(map(extend_user, resp["followed_users"])) - if "tracks" in resp: - resp["tracks"] = list(map(extend_track, resp["tracks"])) - if "saved_tracks" in resp: - resp["saved_tracks"] = list(map(extend_track, resp["saved_tracks"])) - if "playlists" in resp: - resp["playlists"] = list(map(extend_playlist, resp["playlists"])) - if "saved_playlists" in resp: - resp["saved_playlists"] = list(map(extend_playlist, resp["saved_playlists"])) - if "albums" in resp: - resp["albums"] = list(map(extend_playlist, resp["albums"])) - if "saved_albums" in resp: - resp["saved_albums"] = list(map(extend_playlist, resp["saved_albums"])) - return resp - - search_full_response = make_full_response( "search_full_response", full_ns, fields.Nested(search_model) ) @@ -76,8 +52,6 @@ def get(self): "only_downloadable": False, } resp = search(search_args) - resp = extend_search(resp) - return success_response(resp) @@ -117,6 +91,4 @@ def get(self): "only_downloadable": False, } resp = search(search_args) - resp = extend_search(resp) - return success_response(resp) diff --git a/discovery-provider/src/api/v1/tracks.py b/discovery-provider/src/api/v1/tracks.py index 4ad108b2262..3de5dcce139 100644 --- a/discovery-provider/src/api/v1/tracks.py +++ b/discovery-provider/src/api/v1/tracks.py @@ -430,9 +430,7 @@ def get(self): "only_downloadable": args["only_downloadable"], } response = search(search_args) - tracks = response["tracks"] - tracks = list(map(extend_track, tracks)) - return success_response(tracks) + return success_response(response) # Trending diff --git a/discovery-provider/src/api/v1/users.py b/discovery-provider/src/api/v1/users.py index d842df2274a..e21acd746f2 100644 --- a/discovery-provider/src/api/v1/users.py +++ b/discovery-provider/src/api/v1/users.py @@ -588,9 +588,7 @@ def get(self): "offset": 0, } response = search(search_args) - users = response["users"] - users = list(map(extend_user, users)) - return success_response(users) + return success_response(response) followers_response = make_full_response( @@ -948,7 +946,7 @@ def get(self, id: str): class GetSupporters(Resource): @record_metrics @ns.doc( - id="""Get User Supporters""", + id="""Get Supporters""", description="""Gets the supporters of the given user""", params={"id": "A User ID"}, ) @@ -972,17 +970,17 @@ def get(self, id: str): @full_ns.route("//supporters") class FullGetSupporters(Resource): @record_metrics - @ns.doc( - id="""Get User Supporters""", + @full_ns.doc( + id="""Get Supporters""", description="""Gets the supporters of the given user""", params={"id": "A User ID"}, ) - @ns.expect(pagination_with_current_user_parser) - @ns.marshal_with(full_get_supporters_response) + @full_ns.expect(pagination_with_current_user_parser) + @full_ns.marshal_with(full_get_supporters_response) @cache(ttl_sec=5) def get(self, id: str): - args = pagination_parser.parse_args() - decoded_id = decode_with_abort(id, ns) + args = pagination_with_current_user_parser.parse_args() + decoded_id = decode_with_abort(id, full_ns) current_user_id = get_current_user_id(args) args["user_id"] = decoded_id args["current_user_id"] = current_user_id @@ -991,16 +989,47 @@ def get(self, id: str): return success_response(support) +full_get_supporter_response = make_full_response( + "full_get_supporter", full_ns, fields.Nested(supporter_response_full) +) + + +@full_ns.route("//supporters/") +class FullGetSupporter(Resource): + @record_metrics + @full_ns.doc( + id="""Get Supporter""", + description="""Gets the specified supporter of the given user""", + params={"id": "A User ID", "supporter_user_id": "A User ID of a supporter"}, + ) + @full_ns.expect(current_user_parser) + @full_ns.marshal_with(full_get_supporter_response) + @cache(ttl_sec=5) + def get(self, id: str, supporter_user_id: str): + args = current_user_parser.parse_args() + decoded_id = decode_with_abort(id, full_ns) + current_user_id = get_current_user_id(args) + decoded_supporter_user_id = decode_with_abort(supporter_user_id, full_ns) + args["user_id"] = decoded_id + args["current_user_id"] = current_user_id + args["supporter_user_id"] = decoded_supporter_user_id + support = get_support_received_by_user(args) + support = list(map(extend_supporter, support)) + if not support: + abort_not_found(supporter_user_id, full_ns) + return success_response(support[0]) + + get_supporting_response = make_response( "get_supporting", ns, fields.List(fields.Nested(supporting_response)) ) @ns.route("//supporting") -class GetSupporting(Resource): +class GetSupportings(Resource): @record_metrics @ns.doc( - id="""Get User Supporting""", + id="""Get Supportings""", description="""Gets the users that the given user supports""", params={"id": "A User ID"}, ) @@ -1022,10 +1051,10 @@ def get(self, id: str): @full_ns.route("//supporting") -class FullGetSupporting(Resource): +class FullGetSupportings(Resource): @record_metrics @full_ns.doc( - id="""Get User Supporting""", + id="""Get Supportings""", description="""Gets the users that the given user supports""", params={"id": "A User ID"}, ) @@ -1043,6 +1072,40 @@ def get(self, id: str): return success_response(support) +full_get_supporting_response = make_full_response( + "full_get_supporting", full_ns, fields.Nested(supporting_response_full) +) + + +@full_ns.route("//supporting/") +class FullGetSupporting(Resource): + @record_metrics + @full_ns.doc( + id="""Get Supporting""", + description="""Gets the support from the given user to the supported user""", + params={ + "id": "A User ID", + "supported_user_id": "A User ID of a supported user", + }, + ) + @full_ns.expect(current_user_parser) + @full_ns.marshal_with(full_get_supporting_response) + @cache(ttl_sec=5) + def get(self, id: str, supported_user_id: str): + args = current_user_parser.parse_args() + decoded_id = decode_with_abort(id, full_ns) + current_user_id = get_current_user_id(args) + decoded_supported_user_id = decode_with_abort(supported_user_id, full_ns) + args["user_id"] = decoded_id + args["current_user_id"] = current_user_id + args["supported_user_id"] = decoded_supported_user_id + support = get_support_sent_by_user(args) + support = list(map(extend_supporting, support)) + if not support: + abort_not_found(decoded_id, full_ns) + return success_response(support[0]) + + verify_token_response = make_response( "verify_token", ns, fields.List(fields.Nested(decoded_user_token)) ) diff --git a/discovery-provider/src/queries/get_feed_es.py b/discovery-provider/src/queries/get_feed_es.py index a1668f61f9b..ad1407e6975 100644 --- a/discovery-provider/src/queries/get_feed_es.py +++ b/discovery-provider/src/queries/get_feed_es.py @@ -9,8 +9,8 @@ ES_USERS, esclient, pluck_hits, - popuate_user_metadata_es, populate_track_or_playlist_metadata_es, + populate_user_metadata_es, ) @@ -22,21 +22,6 @@ def get_feed_es(args, limit=10): mdsl = [] - def following_ids_terms_lookup(field): - """ - does a "terms lookup" to query a field - with the user_ids that the current user follows - """ - return { - "terms": { - field: { - "index": ES_USERS, - "id": current_user_id, - "path": "following_ids", - }, - } - } - if load_reposts: mdsl.extend( [ @@ -45,7 +30,7 @@ def following_ids_terms_lookup(field): "query": { "bool": { "must": [ - following_ids_terms_lookup("user_id"), + following_ids_terms_lookup(current_user_id, "user_id"), {"term": {"is_delete": False}}, {"range": {"created_at": {"gte": "now-30d"}}}, ] @@ -74,7 +59,7 @@ def following_ids_terms_lookup(field): "query": { "bool": { "must": [ - following_ids_terms_lookup("owner_id"), + following_ids_terms_lookup(current_user_id, "owner_id"), {"term": {"is_unlisted": False}}, {"term": {"is_delete": False}}, ], @@ -89,7 +74,9 @@ def following_ids_terms_lookup(field): "query": { "bool": { "must": [ - following_ids_terms_lookup("playlist_owner_id"), + following_ids_terms_lookup( + current_user_id, "playlist_owner_id" + ), {"term": {"is_private": False}}, {"term": {"is_delete": False}}, ] @@ -214,10 +201,10 @@ def following_ids_terms_lookup(field): user_list = esclient.mget(index=ES_USERS, ids=user_id_list) user_by_id = {d["_id"]: d["_source"] for d in user_list["docs"] if d["found"]} - # popuate_user_metadata_es: + # populate_user_metadata_es: current_user = user_by_id.pop(str(current_user_id)) for id, user in user_by_id.items(): - user_by_id[id] = popuate_user_metadata_es(user, current_user) + user_by_id[id] = populate_user_metadata_es(user, current_user) for item in sorted_feed: # GOTCHA: es ids must be strings, but our ids are ints... @@ -229,17 +216,52 @@ def following_ids_terms_lookup(field): # really it should use an aggregation with top hits # to bucket ~3 saves / reposts per item item_keys = [i["item_key"] for i in sorted_feed] + + (follow_saves, follow_reposts) = fetch_followed_saves_and_reposts( + current_user_id, item_keys, limit * 20 + ) + + for item in sorted_feed: + item["followee_reposts"] = follow_reposts[item["item_key"]] + item["followee_saves"] = follow_saves[item["item_key"]] + + # populate metadata + remove extra fields from items + sorted_feed = [ + populate_track_or_playlist_metadata_es(item, current_user) + for item in sorted_feed + ] + + return sorted_feed[0:limit] + + +def following_ids_terms_lookup(current_user_id, field): + """ + does a "terms lookup" to query a field + with the user_ids that the current user follows + """ + return { + "terms": { + field: { + "index": ES_USERS, + "id": str(current_user_id), + "path": "following_ids", + }, + } + } + + +def fetch_followed_saves_and_reposts(current_user_id, item_keys, limit): save_repost_query = { "query": { "bool": { "must": [ - following_ids_terms_lookup("user_id"), + following_ids_terms_lookup(current_user_id, "user_id"), {"terms": {"item_key": item_keys}}, {"term": {"is_delete": False}}, ] } }, - "size": limit * 20, # how mutch to overfetch? + "size": limit * 20, # how much to overfetch? "sort": {"created_at": "desc"}, } mdsl = [ @@ -260,17 +282,7 @@ def following_ids_terms_lookup(field): for s in saves: follow_saves[s["item_key"]].append(s) - for item in sorted_feed: - item["followee_reposts"] = follow_reposts[item["item_key"]] - item["followee_saves"] = follow_saves[item["item_key"]] - - # populate metadata + remove extra fields from items - sorted_feed = [ - populate_track_or_playlist_metadata_es(item, current_user) - for item in sorted_feed - ] - - return sorted_feed[0:limit] + return (follow_saves, follow_reposts) def item_key(item): @@ -280,5 +292,7 @@ def item_key(item): if item["is_album"]: return "album:" + str(item["playlist_id"]) return "playlist:" + str(item["playlist_id"]) + elif "user_id" in item: + return "user:" + str(item["user_id"]) else: raise Exception("item_key unknown type") diff --git a/discovery-provider/src/queries/get_support_for_user.py b/discovery-provider/src/queries/get_support_for_user.py index e11c3d02579..844ac0d4aa7 100644 --- a/discovery-provider/src/queries/get_support_for_user.py +++ b/discovery-provider/src/queries/get_support_for_user.py @@ -1,10 +1,14 @@ +import logging from typing import Any, Dict, List, Tuple, TypedDict -from sqlalchemy import Integer, column, text +from sqlalchemy import func +from sqlalchemy.orm import aliased from src.models import AggregateUserTips -from src.queries.query_helpers import get_users_by_id +from src.queries.query_helpers import get_users_by_id, paginate_query from src.utils.db_session import get_db_read_replica +logger = logging.getLogger(__name__) + class SupportResponse(TypedDict): rank: int @@ -27,43 +31,85 @@ def query_result_to_support_response( ] -sql_support_received = text( - """ -SELECT - RANK() OVER (ORDER BY amount DESC) AS rank - , sender_user_id - , receiver_user_id - , amount -FROM aggregate_user_tips -WHERE receiver_user_id = :receiver_user_id -ORDER BY amount DESC -LIMIT :limit -OFFSET :offset; -""" -).columns( - column("rank", Integer), - AggregateUserTips.sender_user_id, - AggregateUserTips.receiver_user_id, - AggregateUserTips.amount, -) +# Without supporter_user_id: +# ---------------------------- +# SELECT +# rank() OVER ( +# ORDER BY +# aggregate_user_tips.amount DESC +# ) AS rank, +# aggregate_user_tips.sender_user_id AS aggregate_user_tips_sender_user_id, +# aggregate_user_tips.receiver_user_id AS aggregate_user_tips_receiver_user_id, +# aggregate_user_tips.amount AS aggregate_user_tips_amount +# FROM +# aggregate_user_tips +# WHERE +# aggregate_user_tips.receiver_user_id = %(receiver_user_id_1) s +# ORDER BY +# aggregate_user_tips.amount DESC, aggregate_user_tips.sender_user_id ASC +# LIMIT +# %(param_1) s OFFSET %(param_2) s + + +# With supporter_user_id: +# ---------------------------- +# WITH rankings AS ( +# SELECT +# rank() OVER ( +# ORDER BY +# aggregate_user_tips.amount DESC +# ) AS rank, +# aggregate_user_tips.sender_user_id AS sender_user_id, +# aggregate_user_tips.receiver_user_id AS receiver_user_id, +# aggregate_user_tips.amount AS amount +# FROM +# aggregate_user_tips +# WHERE +# aggregate_user_tips.receiver_user_id = %(receiver_user_id_1) s +# ) +# SELECT +# rankings.rank AS rankings_rank, +# rankings.sender_user_id AS rankings_sender_user_id, +# rankings.receiver_user_id AS rankings_receiver_user_id, +# rankings.amount AS rankings_amount +# FROM +# rankings +# WHERE +# rankings.sender_user_id = %(sender_user_id_1) s def get_support_received_by_user(args) -> List[SupportResponse]: support: List[SupportResponse] = [] receiver_user_id = args.get("user_id") current_user_id = args.get("current_user_id") - limit = args.get("limit", 100) - offset = args.get("offset", 0) + supporter_user_id = args.get("supporter_user_id", None) db = get_db_read_replica() with db.scoped_session() as session: - query = ( - session.query("rank", AggregateUserTips) - .from_statement(sql_support_received) - .params(receiver_user_id=receiver_user_id, limit=limit, offset=offset) - ) - rows: List[Tuple[int, AggregateUserTips]] = query.all() + query = session.query( + func.rank().over(order_by=AggregateUserTips.amount.desc()).label("rank"), + AggregateUserTips, + ).filter(AggregateUserTips.receiver_user_id == receiver_user_id) + + # Filter to supporter we care about after ranking + if supporter_user_id is not None: + rankings = query.cte(name="rankings") + RankingsAggregateUserTips = aliased( + AggregateUserTips, rankings, name="aliased_rankings_tips" + ) + query = ( + session.query(rankings.c.rank, RankingsAggregateUserTips) + .select_from(rankings) + .filter(RankingsAggregateUserTips.sender_user_id == supporter_user_id) + ) + # Only paginate if not looking for single supporter + else: + query = query.order_by( + AggregateUserTips.amount.desc(), AggregateUserTips.sender_user_id.asc() + ) + query = paginate_query(query) + rows: List[Tuple[int, AggregateUserTips]] = query.all() user_ids = [row[1].sender_user_id for row in rows] users = get_users_by_id(session, user_ids, current_user_id) @@ -71,48 +117,123 @@ def get_support_received_by_user(args) -> List[SupportResponse]: return support -sql_support_sent = text( - """ -SELECT rank, sender_user_id, receiver_user_id, amount -FROM ( - SELECT - RANK() OVER (PARTITION BY B.receiver_user_id ORDER BY B.amount DESC) AS rank - , B.sender_user_id - , B.receiver_user_id - , B.amount - FROM aggregate_user_tips A - JOIN aggregate_user_tips B ON A.receiver_user_id = B.receiver_user_id - WHERE A.sender_user_id = :sender_user_id -) rankings -WHERE sender_user_id = :sender_user_id -ORDER BY amount DESC, receiver_user_id ASC -LIMIT :limit -OFFSET :offset; -""" -).columns( - column("rank", Integer), - AggregateUserTips.sender_user_id, - AggregateUserTips.receiver_user_id, - AggregateUserTips.amount, -) +# Without supported_user_id: +# ---------------------------- +# SELECT +# rankings.rank AS rankings_rank, +# rankings.sender_user_id AS rankings_sender_user_id, +# rankings.receiver_user_id AS rankings_receiver_user_id, +# rankings.amount AS rankings_amount +# FROM +# ( +# SELECT +# rank() OVER ( +# PARTITION BY joined_aggregate_tips.receiver_user_id +# ORDER BY joined_aggregate_tips.amount DESC +# ) AS rank, +# joined_aggregate_tips.sender_user_id AS sender_user_id, +# joined_aggregate_tips.receiver_user_id AS receiver_user_id, +# joined_aggregate_tips.amount AS amount +# FROM +# aggregate_user_tips +# JOIN +# aggregate_user_tips AS joined_aggregate_tips +# ON joined_aggregate_tips.receiver_user_id = aggregate_user_tips.receiver_user_id +# WHERE +# aggregate_user_tips.sender_user_id = % (sender_user_id_1)s +# ) +# AS rankings +# WHERE +# rankings.sender_user_id = % (sender_user_id_2)s +# ORDER BY +# rankings.amount DESC, +# rankings.receiver_user_id ASC LIMIT % (param_1)s OFFSET % (param_2)s + + +# With supported_user_id: +# ---------------------------- +# SELECT +# rankings.rank AS rankings_rank, +# rankings.sender_user_id AS rankings_sender_user_id, +# rankings.receiver_user_id AS rankings_receiver_user_id, +# rankings.amount AS rankings_amount +# FROM +# ( +# SELECT +# rank() OVER ( +# PARTITION BY joined_aggregate_tips.receiver_user_id +# ORDER BY joined_aggregate_tips.amount DESC +# ) AS rank, +# joined_aggregate_tips.sender_user_id AS sender_user_id, +# joined_aggregate_tips.receiver_user_id AS receiver_user_id, +# joined_aggregate_tips.amount AS amount +# FROM +# aggregate_user_tips +# JOIN +# aggregate_user_tips AS joined_aggregate_tips +# ON joined_aggregate_tips.receiver_user_id = aggregate_user_tips.receiver_user_id +# WHERE +# aggregate_user_tips.sender_user_id = % (sender_user_id_1)s +# AND aggregate_user_tips.receiver_user_id = % (receiver_user_id_1)s +# ) +# AS rankings +# WHERE +# rankings.sender_user_id = % (sender_user_id_2)s def get_support_sent_by_user(args) -> List[SupportResponse]: support: List[SupportResponse] = [] sender_user_id = args.get("user_id") current_user_id = args.get("current_user_id") - limit = args.get("limit") - offset = args.get("offset") + supported_user_id = args.get("supported_user_id", None) db = get_db_read_replica() with db.scoped_session() as session: + AggregateUserTipsB = aliased(AggregateUserTips, name="joined_aggregate_tips") query = ( - session.query("rank", AggregateUserTips) - .from_statement(sql_support_sent) - .params(sender_user_id=sender_user_id, limit=limit, offset=offset) + session.query( + func.rank() + .over( + partition_by=AggregateUserTipsB.receiver_user_id, + order_by=AggregateUserTipsB.amount.desc(), + ) + .label("rank"), + AggregateUserTipsB, + ) + .select_from(AggregateUserTips) + .join( + AggregateUserTipsB, + AggregateUserTipsB.receiver_user_id + == AggregateUserTips.receiver_user_id, + ) + .filter(AggregateUserTips.sender_user_id == sender_user_id) ) - rows: List[Tuple[int, AggregateUserTips]] = query.all() + # Filter to the receiver we care about early + if supported_user_id is not None: + query = query.filter( + AggregateUserTips.receiver_user_id == supported_user_id + ) + + subquery = query.subquery(name="rankings") + AggregateUserTipsAlias = aliased( + AggregateUserTips, subquery, name="aggregate_user_tips_alias" + ) + query = ( + session.query(subquery.c.rank, AggregateUserTipsAlias) + .select_from(subquery) + .filter(AggregateUserTipsAlias.sender_user_id == sender_user_id) + ) + + # Only paginate if not looking for single supporting + if supported_user_id is None: + query = query.order_by( + AggregateUserTipsAlias.amount.desc(), + AggregateUserTipsAlias.receiver_user_id.asc(), + ) + query = paginate_query(query) + + rows: List[Tuple[int, AggregateUserTips]] = query.all() user_ids = [row[1].receiver_user_id for row in rows] users = get_users_by_id(session, user_ids, current_user_id) diff --git a/discovery-provider/src/queries/get_tracks.py b/discovery-provider/src/queries/get_tracks.py index 68efd930606..75c2db5f42e 100644 --- a/discovery-provider/src/queries/get_tracks.py +++ b/discovery-provider/src/queries/get_tracks.py @@ -209,7 +209,8 @@ def get_tracks_and_ids(): for track in tracks: if track["user"][0]["is_deactivated"] or track["is_delete"]: track["track_segments"] = [] - track["download"]["cid"] = None + if track["download"] is not None: + track["download"]["cid"] = None tracks = populate_track_metadata(session, track_ids, tracks, current_user_id) diff --git a/discovery-provider/src/queries/notifications.py b/discovery-provider/src/queries/notifications.py index 8c2c195a544..24e58712e23 100644 --- a/discovery-provider/src/queries/notifications.py +++ b/discovery-provider/src/queries/notifications.py @@ -41,6 +41,7 @@ latest_sol_listen_count_milestones_slot_key, latest_sol_rewards_manager_slot_key, ) +from src.utils.spl_audio import to_wei_string logger = logging.getLogger(__name__) bp = Blueprint("notifications", __name__) @@ -1155,13 +1156,15 @@ def solana_notifications(): const.solana_notification_metadata: { const.notification_entity_id: user_tip.sender_user_id, const.notification_entity_type: "user", - const.solana_notification_tip_amount: str(user_tip.amount), + const.solana_notification_tip_amount: to_wei_string( + user_tip.amount + ), const.solana_notification_tip_signature: user_tip.signature, }, } ) - reaction_results: List[Reaction] = ( + reaction_results: List[Tuple[Reaction, int]] = ( session.query(Reaction, User.user_id) .join(User, User.wallet == Reaction.sender_wallet) .filter( @@ -1172,8 +1175,20 @@ def solana_notifications(): .all() ) + # Get tips associated with a given reaction + tip_signatures = [ + e.reacted_to for (e, _) in reaction_results if e.reaction_type == "tip" + ] + reaction_tips: List[UserTip] = ( + session.query(UserTip).filter(UserTip.signature.in_(tip_signatures)) + ).all() + tips_map = {e.signature: e for e in reaction_tips} + reactions = [] for (reaction, user_id) in reaction_results: + tip = tips_map[reaction.reacted_to] + if not tip: + continue reactions.append( { const.solana_notification_type: const.solana_notification_type_reaction, @@ -1182,7 +1197,13 @@ def solana_notifications(): const.solana_notification_metadata: { const.solana_notification_reaction_type: reaction.reaction_type, const.solana_notification_reaction_reaction_value: reaction.reaction_value, - const.solana_notification_reaction_reacted_to: reaction.reacted_to, + const.solana_notification_reaction_reacted_to_entity: { + const.solana_notification_tip_signature: tip.signature, + const.solana_notification_tip_amount: to_wei_string( + tip.amount + ), + const.solana_notification_tip_sender_id: tip.sender_user_id, + }, }, } ) diff --git a/discovery-provider/src/queries/reactions.py b/discovery-provider/src/queries/reactions.py index 901176480be..1948e737635 100644 --- a/discovery-provider/src/queries/reactions.py +++ b/discovery-provider/src/queries/reactions.py @@ -1,5 +1,6 @@ -from typing import List, Optional, Tuple, TypedDict +from typing import List, Optional, TypedDict +from sqlalchemy import desc from sqlalchemy.orm.session import Session from src.models.models import User from src.models.reaction import Reaction @@ -13,19 +14,22 @@ class ReactionResponse(TypedDict): def get_reactions( - session: Session, transaction_ids: List[str], type: Optional[str] + session: Session, reacted_to_ids: List[str], type: Optional[str] ) -> List[ReactionResponse]: - filters = [Reaction.reacted_to.in_(transaction_ids), User.is_current == True] + filters = [Reaction.reacted_to.in_(reacted_to_ids), User.is_current == True] if type: filters.append(Reaction.reaction_type == type) - results: List[Tuple[Reaction, int]] = ( + r: Reaction + user_id: int + r, user_id = ( session.query(Reaction, User.user_id) .join(User, User.wallet == Reaction.sender_wallet) .filter( *filters, ) - .all() + .order_by(desc(Reaction.slot)) + .first() ) return [ @@ -35,5 +39,4 @@ def get_reactions( "reacted_to": r.reacted_to, "sender_user_id": user_id, } - for (r, user_id) in results ] diff --git a/discovery-provider/src/queries/response_name_constants.py b/discovery-provider/src/queries/response_name_constants.py index 4a81fc9130c..9bfbd4c8931 100644 --- a/discovery-provider/src/queries/response_name_constants.py +++ b/discovery-provider/src/queries/response_name_constants.py @@ -126,10 +126,11 @@ solana_notification_tip_rank = "rank" solana_notification_tip_amount = "amount" solana_notification_tip_signature = "tx_signature" +solana_notification_tip_sender_id = "tip_sender_id" solana_notification_reaction_type = "reaction_type" solana_notification_reaction_type_tip = "tip" -solana_notification_reaction_reacted_to = "reacted_to" +solana_notification_reaction_reacted_to_entity = "reacted_to_entity" solana_notification_reaction_reaction_value = "reaction_value" diff --git a/discovery-provider/src/queries/search_es.py b/discovery-provider/src/queries/search_es.py new file mode 100644 index 00000000000..470bfb56800 --- /dev/null +++ b/discovery-provider/src/queries/search_es.py @@ -0,0 +1,420 @@ +import logging +from typing import Any, Dict + +from src.api.v1.helpers import ( + extend_favorite, + extend_playlist, + extend_repost, + extend_track, + extend_user, +) +from src.queries.get_feed_es import fetch_followed_saves_and_reposts, item_key +from src.utils.elasticdsl import ( + ES_PLAYLISTS, + ES_TRACKS, + ES_USERS, + esclient, + pluck_hits, + populate_track_or_playlist_metadata_es, + populate_user_metadata_es, +) + +logger = logging.getLogger(__name__) + + +def search_es_full(args: dict): + if not esclient: + raise Exception("esclient is None") + + search_str = args.get("query") + current_user_id = args.get("current_user_id") + limit = args.get("limit", 10) + offset = args.get("offset", 0) + search_type = args.get("kind", "all") + only_downloadable = args.get("only_downloadable") + do_tracks = search_type == "all" or search_type == "tracks" + do_users = search_type == "all" or search_type == "users" + do_playlists = search_type == "all" or search_type == "playlists" + do_albums = search_type == "all" or search_type == "albums" + + mdsl: Any = [] + + # Scoring Summary + # Query score * Function score multiplier + # Query score = boosted on text similarity, verified artists, personalization (current user saved or reposted or followed) + # Function score multiplier = popularity (repost count) + + # tracks + if do_tracks: + mdsl.extend( + [ + {"index": ES_TRACKS}, + track_dsl( + search_str, + current_user_id, + must_saved=False, + only_downloadable=only_downloadable, + ), + ] + ) + + # saved tracks + if current_user_id: + mdsl.extend( + [ + {"index": ES_TRACKS}, + track_dsl( + search_str, + current_user_id, + must_saved=True, + only_downloadable=only_downloadable, + ), + ] + ) + + # users + if do_users: + mdsl.extend( + [ + {"index": ES_USERS}, + user_dsl(search_str, current_user_id), + ] + ) + if current_user_id: + mdsl.extend( + [ + {"index": ES_USERS}, + user_dsl(search_str, current_user_id, True), + ] + ) + + # playlists + if do_playlists: + mdsl.extend( + [ + {"index": ES_PLAYLISTS}, + playlist_dsl(search_str, current_user_id), + ] + ) + + # saved playlists + if current_user_id: + mdsl.extend( + [ + {"index": ES_PLAYLISTS}, + playlist_dsl(search_str, current_user_id, True), + ] + ) + + # albums + if do_albums: + mdsl.extend( + [ + {"index": ES_PLAYLISTS}, + album_dsl(search_str, current_user_id), + ] + ) + # saved albums + if current_user_id: + mdsl.extend( + [ + {"index": ES_PLAYLISTS}, + album_dsl(search_str, current_user_id, True), + ] + ) + + # add size and limit with some + # over-fetching for sake of drop_copycats + index_name = "" + for dsl in mdsl: + if "index" in dsl: + index_name = dsl["index"] + continue + dsl["size"] = limit + dsl["from"] = offset + if index_name == ES_USERS: + dsl["size"] = limit + 5 + + mfound = esclient.msearch(searches=mdsl) + + response: Dict = { + "tracks": [], + "saved_tracks": [], + "users": [], + "followed_users": [], + "playlists": [], + "saved_playlists": [], + "albums": [], + "saved_albums": [], + } + + if do_tracks: + response["tracks"] = pluck_hits(mfound["responses"].pop(0)) + if current_user_id: + response["saved_tracks"] = pluck_hits(mfound["responses"].pop(0)) + + if do_users: + response["users"] = pluck_hits(mfound["responses"].pop(0)) + if current_user_id: + response["followed_users"] = pluck_hits(mfound["responses"].pop(0)) + + if do_playlists: + response["playlists"] = pluck_hits(mfound["responses"].pop(0)) + if current_user_id: + response["saved_playlists"] = pluck_hits(mfound["responses"].pop(0)) + + if do_albums: + response["albums"] = pluck_hits(mfound["responses"].pop(0)) + if current_user_id: + response["saved_albums"] = pluck_hits(mfound["responses"].pop(0)) + + # hydrate users, saves, reposts + item_keys = [] + user_ids = set() + if current_user_id: + user_ids.add(current_user_id) + + # collect keys for fetching + for k in [ + "tracks", + "saved_tracks", + "playlists", + "saved_playlists", + "albums", + "saved_albums", + ]: + for item in response[k]: + item_keys.append(item_key(item)) + user_ids.add(item.get("owner_id", item.get("playlist_owner_id"))) + + # fetch users + users_by_id = {} + current_user = None + + if user_ids: + users_mget = esclient.mget(index=ES_USERS, ids=list(user_ids)) + users_by_id = {d["_id"]: d["_source"] for d in users_mget["docs"] if d["found"]} + if current_user_id: + current_user = users_by_id.get(str(current_user_id)) + for id, user in users_by_id.items(): + users_by_id[id] = populate_user_metadata_es(user, current_user) + + # fetch followed saves + reposts + # TODO: instead of limit param (20) should do an agg to get 3 saves / reposts per item_key + (follow_saves, follow_reposts) = fetch_followed_saves_and_reposts( + current_user_id, item_keys, 20 + ) + + # tracks: finalize + for k in ["tracks", "saved_tracks"]: + tracks = response[k] + hydrate_user(tracks, users_by_id) + hydrate_saves_reposts(tracks, follow_saves, follow_reposts) + response[k] = transform_tracks(tracks, users_by_id, current_user) + + # users: finalize + for k in ["users", "followed_users"]: + users = drop_copycats(response[k]) + users = users[:limit] + response[k] = [ + extend_user(populate_user_metadata_es(user, current_user)) for user in users + ] + + # playlists: finalize + for k in ["playlists", "saved_playlists"]: + playlists = response[k] + hydrate_saves_reposts(playlists, follow_saves, follow_reposts) + hydrate_user(playlists, users_by_id) + response[k] = [ + extend_playlist(populate_track_or_playlist_metadata_es(item, current_user)) + for item in playlists + ] + + return response + + +def base_match(search_str: str, operator="or"): + return [ + { + "multi_match": { + "query": search_str, + "fields": [ + "suggest", + "suggest._2gram", + "suggest._3gram", + ], + "operator": operator, + "type": "bool_prefix", + "fuzziness": "AUTO", + } + } + ] + + +def be_saved(current_user_id): + return {"term": {"saved_by": {"value": current_user_id, "boost": 1.2}}} + + +def be_reposted(current_user_id): + return {"term": {"reposted_by": {"value": current_user_id, "boost": 1.2}}} + + +def be_followed(current_user_id): + return { + "terms": { + "_id": { + "index": ES_USERS, + "id": str(current_user_id), + "path": "following_ids", + }, + } + } + + +def personalize_dsl(dsl, current_user_id, must_saved): + if current_user_id and must_saved: + dsl["must"].append(be_saved(current_user_id)) + + if current_user_id: + dsl["should"].append(be_saved(current_user_id)) + dsl["should"].append(be_reposted(current_user_id)) + + +def default_function_score(dsl, ranking_field): + return { + "query": { + "function_score": { + "query": {"bool": dsl}, + "functions": [ + { + "field_value_factor": { + "field": ranking_field, + "modifier": "ln2p", + } + }, + ], + } + }, + } + + +def track_dsl(search_str, current_user_id, must_saved=False, only_downloadable=False): + dsl = { + "must": [ + *base_match(search_str), + {"term": {"is_unlisted": {"value": False}}}, + {"term": {"is_delete": False}}, + ], + "must_not": [ + {"exists": {"field": "stem_of"}}, + ], + "should": [ + *base_match(search_str, operator="and"), + ], + } + + if only_downloadable: + dsl["must"].append({"term": {"downloadable": {"value": True}}}) + + personalize_dsl(dsl, current_user_id, must_saved) + return default_function_score(dsl, "repost_count") + + +def user_dsl(search_str, current_user_id, must_saved=False): + dsl = { + "must": [ + *base_match(search_str), + {"term": {"is_deactivated": {"value": False}}}, + ], + "must_not": [], + "should": [ + *base_match(search_str, operator="and"), + {"term": {"is_verified": {"value": True}}}, + ], + } + + if current_user_id and must_saved: + dsl["must"].append(be_followed(current_user_id)) + + if current_user_id: + dsl["should"].append(be_followed(current_user_id)) + + return default_function_score(dsl, "follower_count") + + +def base_playlist_dsl(search_str, is_album): + return { + "must": [ + *base_match(search_str), + {"term": {"is_private": {"value": False}}}, + {"term": {"is_delete": False}}, + {"term": {"is_album": {"value": is_album}}}, + ], + "should": [ + *base_match(search_str, operator="and"), + {"term": {"is_verified": {"value": True}}}, + ], + } + + +def playlist_dsl(search_str, current_user_id, must_saved=False): + dsl = base_playlist_dsl(search_str, False) + personalize_dsl(dsl, current_user_id, must_saved) + return default_function_score(dsl, "repost_count") + + +def album_dsl(search_str, current_user_id, must_saved=False): + dsl = base_playlist_dsl(search_str, True) + personalize_dsl(dsl, current_user_id, must_saved) + return default_function_score(dsl, "repost_count") + + +def drop_copycats(users): + """Filters out users with copy cat names. + e.g. if a verified deadmau5 is in the result set + filter out all non-verified users with same name. + """ + reserved = set() + for user in users: + if user["is_verified"]: + reserved.add(lower_ascii_name(user["name"])) + + filtered = [] + for user in users: + if not user["is_verified"] and lower_ascii_name(user["name"]) in reserved: + continue + filtered.append(user) + return filtered + + +def lower_ascii_name(name): + if not name: + return "" + n = name.lower() + n = n.encode("ascii", "ignore") + return n.decode() + + +def hydrate_user(items, users_by_id): + for item in items: + uid = str(item.get("owner_id", item.get("playlist_owner_id"))) + user = users_by_id.get(uid) + if user: + item["user"] = user + + +def hydrate_saves_reposts(items, follow_saves, follow_reposts): + for item in items: + ik = item_key(item) + item["followee_reposts"] = [extend_repost(r) for r in follow_reposts[ik]] + item["followee_favorites"] = [extend_favorite(x) for x in follow_saves[ik]] + + +def transform_tracks(tracks, users_by_id, current_user): + tracks_out = [] + for track in tracks: + track = populate_track_or_playlist_metadata_es(track, current_user) + track = extend_track(track) + tracks_out.append(track) + + return tracks_out diff --git a/discovery-provider/src/queries/search_queries.py b/discovery-provider/src/queries/search_queries.py index ea1079a2ed9..28c13239f73 100644 --- a/discovery-provider/src/queries/search_queries.py +++ b/discovery-provider/src/queries/search_queries.py @@ -1,11 +1,13 @@ import concurrent.futures import logging # pylint: disable=C0302 +import os from enum import Enum from functools import cmp_to_key import sqlalchemy from flask import Blueprint, request from src import api_helpers, exceptions +from src.api.v1.helpers import extend_search from src.models import Follow, RepostType, Save, SaveType from src.queries import response_name_constants from src.queries.get_unpopulated_playlists import get_unpopulated_playlists @@ -33,6 +35,7 @@ user_handle_exact_match_boost, user_name_weight, ) +from src.queries.search_es import search_es_full from src.queries.search_track_tags import search_track_tags from src.queries.search_user_tags import search_user_tags from src.utils.db_session import get_db_read_replica @@ -262,6 +265,14 @@ def search(args): """Perform a search. `args` should contain `is_auto_complete`, `query`, `kind`, `current_user_id`, and `only_downloadable` """ + + if os.getenv("audius_elasticsearch_search_enabled"): + try: + resp = search_es_full(args) + return resp + except Exception as e: + logger.error(f"Elasticsearch error: {e}") + search_str = args.get("query") # when creating query table, we substitute this too @@ -357,7 +368,7 @@ def submit_and_add(search_type): if user_id is not None: user = users[user_id] result["user"] = user - return results + return extend_search(results) def track_search_query( diff --git a/discovery-provider/src/tasks/index_materialized_views.py b/discovery-provider/src/tasks/index_materialized_views.py index 6b9c6aaf6bf..95be0b958d7 100644 --- a/discovery-provider/src/tasks/index_materialized_views.py +++ b/discovery-provider/src/tasks/index_materialized_views.py @@ -1,4 +1,5 @@ import logging +import os import time from src.tasks.celery_app import celery @@ -11,12 +12,18 @@ def update_views(self, db): with db.scoped_session() as session: start_time = time.time() + if os.getenv("audius_elasticsearch_search_enabled"): + session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY tag_track_user") + logger.info( + f"index_materialized_views.py | Finished updating tag_track_user in: {time.time() - start_time} sec." + ) + return + logger.info("index_materialized_views.py | Updating materialized views") session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY user_lexeme_dict") session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY track_lexeme_dict") session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY playlist_lexeme_dict") session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY album_lexeme_dict") - session.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY tag_track_user") logger.info( f"index_materialized_views.py | Finished updating materialized views in: {time.time() - start_time} sec." diff --git a/discovery-provider/src/tasks/index_solana_plays.py b/discovery-provider/src/tasks/index_solana_plays.py index ce6d89f8132..61d6066ae8f 100644 --- a/discovery-provider/src/tasks/index_solana_plays.py +++ b/discovery-provider/src/tasks/index_solana_plays.py @@ -632,7 +632,7 @@ def process_solana_plays(solana_client_manager: SolanaClientManager, redis: Redi ) raise e - if last_tx: + if last_tx and transaction_signatures: redis.set(latest_sol_plays_slot_key, last_tx["slot"]) elif latest_global_slot is not None: redis.set(latest_sol_plays_slot_key, latest_global_slot) diff --git a/discovery-provider/src/utils/elasticdsl.py b/discovery-provider/src/utils/elasticdsl.py index 67985dfbfd2..976eacd2a5a 100644 --- a/discovery-provider/src/utils/elasticdsl.py +++ b/discovery-provider/src/utils/elasticdsl.py @@ -2,6 +2,7 @@ import os from elasticsearch import Elasticsearch +from src.utils.spl_audio import to_wei es_url = os.getenv("audius_elasticsearch_url") esclient = None @@ -25,7 +26,12 @@ def listify(things): def pluck_hits(found): - return [h["_source"] for h in found["hits"]["hits"]] + res = [h["_source"] for h in found["hits"]["hits"]] + + # add score for search_quality.py script + for i in range(len(found["hits"]["hits"])): + res[i]["_score"] = found["hits"]["hits"][i]["_score"] + return res def docs_and_ids(found, id_set=False): @@ -43,18 +49,38 @@ def hits_by_id(found): return {h["_id"]: h["_source"] for h in found["hits"]["hits"]} -def popuate_user_metadata_es(user, current_user): - user_following = user.get("following_ids", []) - current_user_following = current_user.get("following_ids", []) - user["does_current_user_follow"] = user["user_id"] in current_user_following - user["does_follow_current_user"] = current_user["user_id"] in user_following +def populate_user_metadata_es(user, current_user): + user["total_balance"] = str( + int(user.get("balance", "0") or "0") + + int(user.get("associated_wallets_balance", "0") or "0") + + to_wei(user.get("associated_sol_wallets_balance", "0") or 0) + + to_wei(user.get("waudio", "0") or 0) + ) + + # Mutual box on profile page will fetch the data to compute this number + # using the /v1/full/users/xyz/related?user_id=abc endpoint + # Avoid extra round trips by not computing it here + user["current_user_followee_follow_count"] = None + + if current_user: + user_following = user.get("following_ids", []) + current_user_following = current_user.get("following_ids", []) + user["does_current_user_follow"] = user["user_id"] in current_user_following + user["does_follow_current_user"] = current_user["user_id"] in user_following + else: + user["does_current_user_follow"] = False + user["does_follow_current_user"] = False return omit_indexed_fields(user) def populate_track_or_playlist_metadata_es(item, current_user): - my_id = current_user["user_id"] - item["has_current_user_reposted"] = my_id in item["reposted_by"] - item["has_current_user_saved"] = my_id in item["saved_by"] + if current_user: + my_id = current_user["user_id"] + item["has_current_user_reposted"] = my_id in item["reposted_by"] + item["has_current_user_saved"] = my_id in item["saved_by"] + else: + item["has_current_user_reposted"] = False + item["has_current_user_saved"] = False return omit_indexed_fields(item) diff --git a/discovery-provider/src/utils/helpers.py b/discovery-provider/src/utils/helpers.py index 7230318c0b3..69c6ff62d45 100644 --- a/discovery-provider/src/utils/helpers.py +++ b/discovery-provider/src/utils/helpers.py @@ -209,6 +209,10 @@ def tuple_to_model_dictionary(t, model): "level": "levelname", "msg": "message", "timestamp": "asctime", + "pathname": "pathname", + "funcName": "funcName", + "lineno": "lineno", + "service": os.getenv("audius_service", "default"), } formatter = JsonFormatter(log_format, ensure_ascii=False, mix_extra=True) diff --git a/eth-contracts/README.md b/eth-contracts/README.md index 54d9a67a983..110340b9422 100644 --- a/eth-contracts/README.md +++ b/eth-contracts/README.md @@ -1,7 +1,5 @@ # Audius Ethereum smart contracts -[![Coverage Status](https://coveralls.io/repos/github/AudiusProject/audius-protocol/badge.svg)](https://coveralls.io/github/AudiusProject/audius-protocol) - Audius has two sets of contracts - the one in this directory, which runs on Ethereum mainnet in production, and the one [here](https://github.com/AudiusProject/audius-protocol/tree/master/contracts) which runs on POA diff --git a/eth-contracts/scripts/lint.sh b/eth-contracts/scripts/lint.sh index 258a45ae77e..9032da615fe 100755 --- a/eth-contracts/scripts/lint.sh +++ b/eth-contracts/scripts/lint.sh @@ -2,7 +2,6 @@ set -e set -o pipefail - < /dev/null; then +if docker ps | grep 'audius_ganache_cli_eth_contracts_test' >/dev/null; then # killing the container seems to be faster than restarting printf 'Remove old containers and build artifacts\n' docker rm -f audius_ganache_cli_eth_contracts_test @@ -36,18 +36,14 @@ docker run --name audius_ganache_cli_eth_contracts_test -d -p 8556:8545 truffles ./node_modules/.bin/truffle compile # run truffle tests -if [ $# -eq 0 ] - then - node_modules/.bin/truffle test test/*.js --network=test_local -elif [ $1 == '--audius-random' ] && [ $# -eq 1 ] - then - node_modules/.bin/truffle test test/random/random.test.js --network=test_local -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ] - then - node_modules/.bin/truffle test test/*.js --network=test_local --verbose-rpc -elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ] - then - node_modules/.bin/truffle test test/*.js --network=test_local --verbose-rpc $2 +if [ $# -eq 0 ]; then + node_modules/.bin/truffle test test/*.js --network=test_local +elif [ $1 == '--audius-random' ] && [ $# -eq 1 ]; then + node_modules/.bin/truffle test test/random/random.test.js --network=test_local +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 1 ]; then + node_modules/.bin/truffle test test/*.js --network=test_local --verbose-rpc +elif [ $1 == '--verbose-rpc' ] && [ $# -eq 2 ]; then + node_modules/.bin/truffle test test/*.js --network=test_local --verbose-rpc $2 else node_modules/.bin/truffle test test/*.js --network=test_local $1 fi diff --git a/identity-service/package-lock.json b/identity-service/package-lock.json index 15fb2252d86..550cfa0a0b8 100644 --- a/identity-service/package-lock.json +++ b/identity-service/package-lock.json @@ -97,9 +97,9 @@ } }, "@audius/libs": { - "version": "1.2.117", - "resolved": "https://registry.npmjs.org/@audius/libs/-/libs-1.2.117.tgz", - "integrity": "sha512-ZtvpCI71O+jd+pLlZRegZIEXbJjn3yUoYu60JlD+Dx0LqnCo6lZ6+jtWFzhDE29HKSdx0NE9XmHKuNKFcgdP5Q==", + "version": "1.2.118", + "resolved": "https://registry.npmjs.org/@audius/libs/-/libs-1.2.118.tgz", + "integrity": "sha512-M1xDe8i9bGRIziTT4xRb2NAG+wGZ3qapSFEaMSulkTCKmXwP3AlztFt1gfuk+fgqvvn1MMXa66I3sl6H/pZe8A==", "requires": { "@audius/anchor-audius-data": "0.0.2", "@audius/hedgehog": "1.0.12", @@ -9429,25 +9429,25 @@ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" }, "ipfs-unixfs": { - "version": "6.0.7", - "resolved": "https://registry.npmjs.org/ipfs-unixfs/-/ipfs-unixfs-6.0.7.tgz", - "integrity": "sha512-5mKbQgvux6n5lQ+upGWWPKcoswXahdOcyGQ2SbIIRV6eBJMzxLprzKsyb0GMsg80tHX2wnNOxBKSCiSGjb+54A==", + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/ipfs-unixfs/-/ipfs-unixfs-6.0.9.tgz", + "integrity": "sha512-0DQ7p0/9dRB6XCb0mVCTli33GzIzSVx5udpJuVM47tGcD+W+Bl4LsnoLswd3ggNnNEakMv1FdoFITiEnchXDqQ==", "requires": { "err-code": "^3.0.1", "protobufjs": "^6.10.2" } }, "ipfs-unixfs-importer": { - "version": "9.0.8", - "resolved": "https://registry.npmjs.org/ipfs-unixfs-importer/-/ipfs-unixfs-importer-9.0.8.tgz", - "integrity": "sha512-Vye6kLDz9zTFDbuSeggm/dUUW85im+JTs7vYK0Kk9xXmJzzIqOMZCv4fY6kVnWlQOk6z4IVntJWW6LnmjD6dkw==", + "version": "9.0.10", + "resolved": "https://registry.npmjs.org/ipfs-unixfs-importer/-/ipfs-unixfs-importer-9.0.10.tgz", + "integrity": "sha512-W+tQTVcSmXtFh7FWYWwPBGXJ1xDgREbIyI1E5JzDcimZLIyT5gGMfxR3oKPxxWj+GKMpP5ilvMQrbsPzWcm3Fw==", "requires": { "@ipld/dag-pb": "^2.0.2", "@multiformats/murmur3": "^1.0.3", "bl": "^5.0.0", "err-code": "^3.0.1", "hamt-sharding": "^2.0.0", - "interface-blockstore": "^1.0.0", + "interface-blockstore": "^2.0.3", "ipfs-unixfs": "^6.0.0", "it-all": "^1.0.5", "it-batch": "^1.0.8", @@ -9457,27 +9457,6 @@ "multiformats": "^9.4.2", "rabin-wasm": "^0.1.4", "uint8arrays": "^3.0.0" - }, - "dependencies": { - "interface-blockstore": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/interface-blockstore/-/interface-blockstore-1.0.2.tgz", - "integrity": "sha512-e8rHqaBSOsBPpSaB+wwVa9mR5ntU+t1yzXpOFC16cSKCNsV+h6n8SjekPQcdODVBN2h8t45CsOqRAnUfm1guEw==", - "requires": { - "err-code": "^3.0.1", - "interface-store": "^1.0.2", - "it-all": "^1.0.5", - "it-drain": "^1.0.4", - "it-filter": "^1.0.2", - "it-take": "^1.0.1", - "multiformats": "^9.0.4" - } - }, - "interface-store": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/interface-store/-/interface-store-1.0.2.tgz", - "integrity": "sha512-rUBLYsgoWwxuUpnQoSUr+DR/3dH3reVeIu5aOHFZK31lAexmb++kR6ZECNRgrx6WvoaM3Akdo0A7TDrqgCzZaQ==" - } } }, "is-accessor-descriptor": { @@ -10047,16 +10026,6 @@ "resolved": "https://registry.npmjs.org/it-batch/-/it-batch-1.0.9.tgz", "integrity": "sha512-7Q7HXewMhNFltTsAMdSz6luNhyhkhEtGGbYek/8Xb/GiqYMtwUmopE1ocPSiJKKp3rM4Dt045sNFoUu+KZGNyA==" }, - "it-drain": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/it-drain/-/it-drain-1.0.5.tgz", - "integrity": "sha512-r/GjkiW1bZswC04TNmUnLxa6uovme7KKwPhc+cb1hHU65E3AByypHH6Pm91WHuvqfFsm+9ws0kPtDBV3/8vmIg==" - }, - "it-filter": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/it-filter/-/it-filter-1.0.3.tgz", - "integrity": "sha512-EI3HpzUrKjTH01miLHWmhNWy3Xpbx4OXMXltgrNprL5lDpF3giVpHIouFpr5l+evXw6aOfxhnt01BIB+4VQA+w==" - }, "it-first": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/it-first/-/it-first-1.0.7.tgz", @@ -10070,11 +10039,6 @@ "it-batch": "^1.0.9" } }, - "it-take": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/it-take/-/it-take-1.0.2.tgz", - "integrity": "sha512-u7I6qhhxH7pSevcYNaMECtkvZW365ARqAIt9K+xjdK1B2WUDEjQSfETkOCT8bxFq/59LqrN3cMLUtTgmDBaygw==" - }, "jayson": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/jayson/-/jayson-3.6.2.tgz", diff --git a/identity-service/package.json b/identity-service/package.json index 05c3bc71134..54f4c400c16 100644 --- a/identity-service/package.json +++ b/identity-service/package.json @@ -18,7 +18,7 @@ }, "dependencies": { "@amplitude/node": "^1.9.2", - "@audius/libs": "1.2.117", + "@audius/libs": "1.2.118", "@certusone/wormhole-sdk": "0.1.1", "@improbable-eng/grpc-web-node-http-transport": "^0.15.0", "@optimizely/optimizely-sdk": "^4.6.0", diff --git a/identity-service/src/app.js b/identity-service/src/app.js index 661c7c09a0a..18430dfb0ce 100644 --- a/identity-service/src/app.js +++ b/identity-service/src/app.js @@ -17,7 +17,7 @@ const NotificationProcessor = require('./notifications/index.js') const { generateWalletLockKey } = require('./relay/txRelay.js') const { generateETHWalletLockKey } = require('./relay/ethTxRelay.js') -const { SlackReporter } = require('./utils/rewardsReporter') +const { SlackReporter } = require('./utils/slackReporter') const { sendResponse, errorResponseServerError } = require('./apiHelpers') const { fetchAnnouncements } = require('./announcements') const { logger, loggingMiddleware } = require('./logging') diff --git a/identity-service/src/config.js b/identity-service/src/config.js index 778eb85faee..4add6a87289 100644 --- a/identity-service/src/config.js +++ b/identity-service/src/config.js @@ -738,6 +738,12 @@ const config = convict({ env: 'errorWormholeReporterSlackUrl', default: '' }, + verifiedUserReporterSlackUrl: { + doc: 'The slack url to post messages for new verified users', + format: String, + env: 'verifiedUserReporterSlackUrl', + default: '' + }, wormholeRPCHosts: { doc: 'Wormhole RPC Host', format: String, diff --git a/identity-service/src/routes/instagram.js b/identity-service/src/routes/instagram.js index bb148ca1464..5fc7dc7635e 100644 --- a/identity-service/src/routes/instagram.js +++ b/identity-service/src/routes/instagram.js @@ -4,6 +4,12 @@ const models = require('../models') const txRelay = require('../relay/txRelay') const { handleResponse, successResponse, errorResponseBadRequest, errorResponseServerError } = require('../apiHelpers') +const { VerifiedUserReporter } = require('../utils/verifiedUserReporter.js') + +const verifiedUserReporter = new VerifiedUserReporter({ + slackUrl: config.get('verifiedUserReporterSlackUrl'), + source: 'instagram' +}) const getInstagramURL = (username) => { const instagramProfileUrl = config.get('instagramProfileUrl') || 'https://www.instagram.com/%USERNAME%/channel/?__a=1' @@ -106,9 +112,11 @@ module.exports = function (app) { try { // Verify the user user id exists in the DB before updating it - const igUser = await models.InstagramUser.findOne({ where: { - uuid: profile.username - } }) + const igUser = await models.InstagramUser.findOne({ + where: { + uuid: profile.username + } + }) if (!igUser) throw new Error(`Could not find matching ig user in the db: ${profile.username}`) igUser.profile = profile igUser.verified = profile.is_verified || false @@ -153,6 +161,7 @@ module.exports = function (app) { gasLimit: null } await txRelay.sendTransaction(req, false, txProps, 'instagramVerified') + await verifiedUserReporter.report({ userId, handle }) } catch (e) { return errorResponseBadRequest(e) } diff --git a/identity-service/src/routes/notifications.js b/identity-service/src/routes/notifications.js index 67d35ba8308..43d431904f8 100644 --- a/identity-service/src/routes/notifications.js +++ b/identity-service/src/routes/notifications.js @@ -211,7 +211,8 @@ const formatTipSend = (notification) => ({ ...getCommonNotificationsFields(notification), type: notification.type, amount: notification.metadata.amount, - recipientId: notification.entityId + entityId: notification.entityId, + entityType: Entity.User }) const formatTipReceive = (notification) => ({ @@ -219,31 +220,35 @@ const formatTipReceive = (notification) => ({ type: notification.type, amount: notification.metadata.amount, reactionValue: notification.metadata.reactionValue, - senderId: notification.entityId, - tipTxSignature: notification.metadata.tipTxSignature + entityId: notification.entityId, + tipTxSignature: notification.metadata.tipTxSignature, + entityType: Entity.User }) const formatSupportingRankUp = (notification) => ({ ...getCommonNotificationsFields(notification), type: notification.type, - supportedUserId: notification.metadata.supportedUserId, - rank: notification.entityId + entityId: notification.metadata.supportedUserId, + rank: notification.entityId, + entityType: Entity.User }) const formatSupporterRankUp = (notification) => ({ ...getCommonNotificationsFields(notification), type: notification.type, - supportingUser: notification.metadata.supportingUserId, - rank: notification.entityId + entityId: notification.metadata.supportingUserId, + rank: notification.entityId, + entityType: Entity.User }) const formatReaction = (notification) => ({ ...getCommonNotificationsFields(notification), type: notification.type, - reactingUser: notification.entityId, + entityId: notification.entityId, reactionType: notification.metadata.reactionType, reactionValue: notification.metadata.reactionValue, - reactedTo: notification.metadata.reactedTo + reactedTo: notification.metadata.reactedTo, + entityType: Entity.User }) const getCommonNotificationsFields = (notification) => ({ diff --git a/identity-service/src/routes/twitter.js b/identity-service/src/routes/twitter.js index 9a515889a7b..57f661cc443 100644 --- a/identity-service/src/routes/twitter.js +++ b/identity-service/src/routes/twitter.js @@ -6,6 +6,12 @@ const uuidv4 = require('uuid/v4') const txRelay = require('../relay/txRelay') const { handleResponse, successResponse, errorResponseBadRequest } = require('../apiHelpers') +const { VerifiedUserReporter } = require('../utils/verifiedUserReporter.js') + +const verifiedUserReporter = new VerifiedUserReporter({ + slackUrl: config.get('verifiedUserReporterSlackUrl'), + source: 'twitter' +}) /** * This file contains the twitter endpoints for oauth @@ -150,6 +156,7 @@ module.exports = function (app) { gasLimit: null } await txRelay.sendTransaction(req, false, txProps, 'twitterVerified') + await verifiedUserReporter.report({ userId, handle }) } catch (e) { return errorResponseBadRequest(e) } diff --git a/identity-service/src/utils/rewardsReporter.js b/identity-service/src/utils/rewardsReporter.js index 74091c8307d..801f3ed83f4 100644 --- a/identity-service/src/utils/rewardsReporter.js +++ b/identity-service/src/utils/rewardsReporter.js @@ -1,5 +1,5 @@ -const axios = require('axios') const AnalyticsProvider = require('../analytics') +const { SlackReporter } = require('./slackReporter') const RewardEventNames = { REWARDS_CLAIM_SUCCESS: 'Rewards Claim: Success', @@ -11,33 +11,6 @@ const RewardEventNames = { REWARDS_CLAIM_BLOCKED: 'Rewards Claim: Blocked' } -class SlackReporter { - constructor ({ - slackUrl, - childLogger - }) { - this.slackUrl = slackUrl - this.childLogger = childLogger - } - - getJsonSlackMessage (obj) { - return `\`\`\` -${Object.entries(obj).map(([key, value]) => `${key}: ${value}`).join('\n')} -\`\`\`` - } - - async postToSlack ({ - message - }) { - try { - if (!this.slackUrl) return - await axios.post(this.slackUrl, { text: message }) - } catch (e) { - this.childLogger.info(`Error posting to slack in slack reporter ${e.toString()}`) - } - } -} - class RewardsReporter { constructor ({ successSlackUrl, @@ -183,6 +156,5 @@ class RewardsReporter { } module.exports = { - SlackReporter, RewardsReporter } diff --git a/identity-service/src/utils/slackReporter.js b/identity-service/src/utils/slackReporter.js new file mode 100644 index 00000000000..8e74141cc70 --- /dev/null +++ b/identity-service/src/utils/slackReporter.js @@ -0,0 +1,32 @@ +const axios = require('axios') + +class SlackReporter { + constructor ({ + slackUrl, + childLogger + }) { + this.slackUrl = slackUrl + this.childLogger = childLogger + } + + getJsonSlackMessage (obj) { + return `\`\`\` +${Object.entries(obj).map(([key, value]) => `${key}: ${value}`).join('\n')} +\`\`\`` + } + + async postToSlack ({ + message + }) { + try { + if (!this.slackUrl) return + await axios.post(this.slackUrl, { text: message }) + } catch (e) { + this.childLogger.info(`Error posting to slack in slack reporter ${e.toString()}`) + } + } +} + +module.exports = { + SlackReporter +} diff --git a/identity-service/src/utils/verifiedUserReporter.js b/identity-service/src/utils/verifiedUserReporter.js new file mode 100644 index 00000000000..e52e833cd40 --- /dev/null +++ b/identity-service/src/utils/verifiedUserReporter.js @@ -0,0 +1,36 @@ +const { SlackReporter } = require('./slackReporter') +const config = require('../config') + +const WEBSITE_HOST = config.get('websiteHost') + +class VerifiedUserReporter { + constructor ({ + slackUrl, + source, + childLogger = console + }) { + this.reporter = new SlackReporter({ slackUrl, childLogger }) + this.source = source + this.childLogger = childLogger + } + + async report ({ userId, handle }) { + try { + const report = { + userId, + handle, + link: `${WEBSITE_HOST}/${handle}`, + source: this.source + } + const message = this.reporter.getJsonSlackMessage(report) + await this.reporter.postToSlack({ message }) + this.childLogger.info(report, `Verified User Reporter`) + } catch (e) { + console.error(`Report failure: ${JSON.stringify(e)}`) + } + } +} + +module.exports = { + VerifiedUserReporter +} diff --git a/libs/data-contracts/signatureSchemas.js b/libs/data-contracts/signatureSchemas.ts similarity index 60% rename from libs/data-contracts/signatureSchemas.js rename to libs/data-contracts/signatureSchemas.ts index 8f87cf4f39e..fa6c64c5f7f 100644 --- a/libs/data-contracts/signatureSchemas.js +++ b/libs/data-contracts/signatureSchemas.ts @@ -7,9 +7,22 @@ * modeled off: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md */ -const domains = {} - -function getDomainData (contractName, signatureVersion, chainId, contractAddress) { +import type { + EIP712Domain, + EIP712Message, + EIP712TypedData, + EIP712TypeProperty, + EIP712Types +} from 'eth-sig-util' + +type DomainFn = (chainId: number, contactAddress: string) => EIP712Domain + +function getDomainData( + contractName: string, + signatureVersion: string, + chainId: number, + contractAddress: string +): EIP712Domain { return { name: contractName, version: signatureVersion, @@ -18,38 +31,51 @@ function getDomainData (contractName, signatureVersion, chainId, contractAddress } } -domains.getSocialFeatureFactoryDomain = function (chainId, contractAddress) { +const getSocialFeatureFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('Social Feature Factory', '1', chainId, contractAddress) } -domains.getUserFactoryDomain = function (chainId, contractAddress) { +const getUserFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('User Factory', '1', chainId, contractAddress) } -domains.getTrackFactoryDomain = function (chainId, contractAddress) { +const getTrackFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('Track Factory', '1', chainId, contractAddress) } -domains.getPlaylistFactoryDomain = function (chainId, contractAddress) { +const getPlaylistFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('Playlist Factory', '1', chainId, contractAddress) } -domains.getUserLibraryFactoryDomain = function (chainId, contractAddress) { +const getUserLibraryFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('User Library Factory', '1', chainId, contractAddress) } -domains.getIPLDBlacklistFactoryDomain = function (chainId, contractAddress) { +const getIPLDBlacklistFactoryDomain: DomainFn = (chainId, contractAddress) => { return getDomainData('IPLD Blacklist Factory', '1', chainId, contractAddress) } -domains.getUserReplicaSetManagerDomain = function (chainId, contractAddress) { - return getDomainData('User Replica Set Manager', '1', chainId, contractAddress) +const getUserReplicaSetManagerDomain: DomainFn = (chainId, contractAddress) => { + return getDomainData( + 'User Replica Set Manager', + '1', + chainId, + contractAddress + ) } -const schemas = {} +export const domains = { + getSocialFeatureFactoryDomain, + getUserFactoryDomain, + getTrackFactoryDomain, + getPlaylistFactoryDomain, + getUserLibraryFactoryDomain, + getIPLDBlacklistFactoryDomain, + getUserReplicaSetManagerDomain +} /* contract signing domain */ -schemas.domain = [ +const domain = [ { name: 'name', type: 'string' }, { name: 'version', type: 'string' }, { name: 'chainId', type: 'uint256' }, @@ -57,33 +83,33 @@ schemas.domain = [ ] /* user factory requests */ -schemas.addUserRequest = [ +const addUserRequest = [ { name: 'handle', type: 'bytes16' }, { name: 'nonce', type: 'bytes32' } ] /* rather than having a schema type for every update op, we have a type for each unique * structure */ -schemas.updateUserBytes32 = [ +const updateUserBytes32 = [ { name: 'userId', type: 'uint' }, { name: 'newValue', type: 'bytes32' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updateUserString = [ +const updateUserString = [ { name: 'userId', type: 'uint' }, { name: 'newValue', type: 'string' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updateUserBool = [ +const updateUserBool = [ { name: 'userId', type: 'uint' }, { name: 'newValue', type: 'bool' }, { name: 'nonce', type: 'bytes32' } ] /* track factory requests */ -schemas.addTrackRequest = [ +const addTrackRequest = [ { name: 'trackOwnerId', type: 'uint' }, { name: 'multihashDigest', type: 'bytes32' }, { name: 'multihashHashFn', type: 'uint8' }, @@ -91,7 +117,7 @@ schemas.addTrackRequest = [ { name: 'nonce', type: 'bytes32' } ] -schemas.updateTrackRequest = [ +const updateTrackRequest = [ { name: 'trackId', type: 'uint' }, { name: 'trackOwnerId', type: 'uint' }, { name: 'multihashDigest', type: 'bytes32' }, @@ -100,37 +126,37 @@ schemas.updateTrackRequest = [ { name: 'nonce', type: 'bytes32' } ] -schemas.deleteTrackRequest = [ +const deleteTrackRequest = [ { name: 'trackId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] /* social features */ -schemas.addTrackRepostRequest = [ +const addTrackRepostRequest = [ { name: 'userId', type: 'uint' }, { name: 'trackId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deleteTrackRepostRequest = schemas.addTrackRepostRequest +const deleteTrackRepostRequest = addTrackRepostRequest -schemas.addPlaylistRepostRequest = [ +const addPlaylistRepostRequest = [ { name: 'userId', type: 'uint' }, { name: 'playlistId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deletePlaylistRepostRequest = schemas.addPlaylistRepostRequest +const deletePlaylistRepostRequest = addPlaylistRepostRequest -schemas.userFollowRequest = [ +const userFollowRequest = [ { name: 'followerUserId', type: 'uint' }, { name: 'followeeUserId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deleteUserFollowRequest = schemas.userFollowRequest +const deleteUserFollowRequest = userFollowRequest -schemas.createPlaylistRequest = [ +const createPlaylistRequest = [ { name: 'playlistOwnerId', type: 'uint' }, { name: 'playlistName', type: 'string' }, { name: 'isPrivate', type: 'bool' }, @@ -139,83 +165,83 @@ schemas.createPlaylistRequest = [ { name: 'nonce', type: 'bytes32' } ] -schemas.deletePlaylistRequest = [ +const deletePlaylistRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.addPlaylistTrackRequest = [ +const addPlaylistTrackRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'addedTrackId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deletePlaylistTrackRequest = [ +const deletePlaylistTrackRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'deletedTrackId', type: 'uint' }, { name: 'deletedTrackTimestamp', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.orderPlaylistTracksRequest = [ +const orderPlaylistTracksRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'trackIdsHash', type: 'bytes32' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updatePlaylistPrivacyRequest = [ +const updatePlaylistPrivacyRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'updatedPlaylistPrivacy', type: 'bool' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updatePlaylistNameRequest = [ +const updatePlaylistNameRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'updatedPlaylistName', type: 'string' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updatePlaylistCoverPhotoRequest = [ +const updatePlaylistCoverPhotoRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'playlistImageMultihashDigest', type: 'bytes32' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updatePlaylistDescriptionRequest = [ +const updatePlaylistDescriptionRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'playlistDescription', type: 'string' }, { name: 'nonce', type: 'bytes32' } ] -schemas.updatePlaylistUPCRequest = [ +const updatePlaylistUPCRequest = [ { name: 'playlistId', type: 'uint' }, { name: 'playlistUPC', type: 'bytes32' }, { name: 'nonce', type: 'bytes32' } ] -schemas.trackSaveRequest = [ +const trackSaveRequest = [ { name: 'userId', type: 'uint' }, { name: 'trackId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deleteTrackSaveRequest = schemas.trackSaveRequest +const deleteTrackSaveRequest = trackSaveRequest -schemas.playlistSaveRequest = [ +const playlistSaveRequest = [ { name: 'userId', type: 'uint' }, { name: 'playlistId', type: 'uint' }, { name: 'nonce', type: 'bytes32' } ] -schemas.deletePlaylistSaveRequest = schemas.playlistSaveRequest +const deletePlaylistSaveRequest = playlistSaveRequest -schemas.addIPLDBlacklist = [ +const addIPLDBlacklist = [ { name: 'multihashDigest', type: 'bytes32' }, { name: 'nonce', type: 'bytes32' } ] // User replica set manager schemas -schemas.proposeAddOrUpdateContentNode = [ +const proposeAddOrUpdateContentNode = [ { name: 'cnodeSpId', type: 'uint' }, { name: 'cnodeDelegateOwnerWallet', type: 'address' }, { name: 'cnodeOwnerWallet', type: 'address' }, @@ -223,7 +249,7 @@ schemas.proposeAddOrUpdateContentNode = [ { name: 'nonce', type: 'bytes32' } ] -schemas.updateReplicaSet = [ +const updateReplicaSet = [ { name: 'userId', type: 'uint' }, { name: 'primaryId', type: 'uint' }, { name: 'secondaryIdsHash', type: 'bytes32' }, @@ -232,11 +258,52 @@ schemas.updateReplicaSet = [ { name: 'nonce', type: 'bytes32' } ] -const generators = {} +export const schemas = { + domain, + addUserRequest, + updateUserBytes32, + updateUserString, + updateUserBool, + addTrackRequest, + updateTrackRequest, + deleteTrackRequest, + addTrackRepostRequest, + deleteTrackRepostRequest, + addPlaylistRepostRequest, + deletePlaylistRepostRequest, + userFollowRequest, + deleteUserFollowRequest, + createPlaylistRequest, + deletePlaylistRequest, + addPlaylistTrackRequest, + deletePlaylistTrackRequest, + orderPlaylistTracksRequest, + updatePlaylistPrivacyRequest, + updatePlaylistNameRequest, + updatePlaylistCoverPhotoRequest, + updatePlaylistDescriptionRequest, + updatePlaylistUPCRequest, + trackSaveRequest, + deleteTrackSaveRequest, + playlistSaveRequest, + deletePlaylistSaveRequest, + addIPLDBlacklist, + proposeAddOrUpdateContentNode, + updateReplicaSet +} + +type MessageSchema = readonly EIP712TypeProperty[] -function getRequestData (domainDataFn, chainId, contractAddress, messageTypeName, messageSchema, message) { +function getRequestData( + domainDataFn: DomainFn, + chainId: number, + contractAddress: string, + messageTypeName: string, + messageSchema: MessageSchema, + message: EIP712Message +): EIP712TypedData { const domainData = domainDataFn(chainId, contractAddress) - const types = { + const types: EIP712Types = { EIP712Domain: schemas.domain } types[messageTypeName] = messageSchema @@ -249,7 +316,12 @@ function getRequestData (domainDataFn, chainId, contractAddress, messageTypeName } /* User Factory Generators */ -generators.getAddUserRequestData = function (chainId, contractAddress, handle, nonce) { +const getAddUserRequestData = ( + chainId: number, + contractAddress: string, + handle: string, + nonce: string +) => { const message = { handle: handle, nonce: nonce @@ -264,7 +336,15 @@ generators.getAddUserRequestData = function (chainId, contractAddress, handle, n ) } -function _getUpdateUserRequestData (chainId, contractAddress, messageTypeName, schema, userId, newValue, nonce) { +function _getUpdateUserRequestData( + chainId: number, + contractAddress: string, + messageTypeName: string, + schema: MessageSchema, + userId: number, + newValue: unknown, + nonce: string +) { const message = { userId: userId, newValue: newValue, @@ -280,7 +360,21 @@ function _getUpdateUserRequestData (chainId, contractAddress, messageTypeName, s ) } -generators.getUpdateUserMultihashRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +export type UserUpdateRequestFn = ( + chainId: number, + contactAddress: string, + userId: number, + newValue: unknown, + nonce: string +) => EIP712TypedData + +const getUpdateUserMultihashRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -292,7 +386,13 @@ generators.getUpdateUserMultihashRequestData = function (chainId, contractAddres ) } -generators.getUpdateUserNameRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserNameRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -304,7 +404,13 @@ generators.getUpdateUserNameRequestData = function (chainId, contractAddress, us ) } -generators.getUpdateUserLocationRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserLocationRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -316,7 +422,13 @@ generators.getUpdateUserLocationRequestData = function (chainId, contractAddress ) } -generators.getUpdateUserProfilePhotoRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserProfilePhotoRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -328,7 +440,13 @@ generators.getUpdateUserProfilePhotoRequestData = function (chainId, contractAdd ) } -generators.getUpdateUserCoverPhotoRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserCoverPhotoRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -340,7 +458,13 @@ generators.getUpdateUserCoverPhotoRequestData = function (chainId, contractAddre ) } -generators.getUpdateUserBioRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserBioRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -352,7 +476,13 @@ generators.getUpdateUserBioRequestData = function (chainId, contractAddress, use ) } -generators.getUpdateUserCreatorNodeRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserCreatorNodeRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -364,7 +494,13 @@ generators.getUpdateUserCreatorNodeRequestData = function (chainId, contractAddr ) } -generators.getUpdateUserCreatorRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserCreatorRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -376,7 +512,13 @@ generators.getUpdateUserCreatorRequestData = function (chainId, contractAddress, ) } -generators.getUpdateUserVerifiedRequestData = function (chainId, contractAddress, userId, newValue, nonce) { +const getUpdateUserVerifiedRequestData: UserUpdateRequestFn = ( + chainId, + contractAddress, + userId, + newValue, + nonce +) => { return _getUpdateUserRequestData( chainId, contractAddress, @@ -389,7 +531,15 @@ generators.getUpdateUserVerifiedRequestData = function (chainId, contractAddress } /* Track Factory Generators */ -generators.getAddTrackRequestData = function (chainId, contractAddress, trackOwnerId, multihashDigest, multihashHashFn, multihashSize, nonce) { +const getAddTrackRequestData = ( + chainId: number, + contractAddress: string, + trackOwnerId: number, + multihashDigest: string, + multihashHashFn: number, + multihashSize: number, + nonce: string +) => { const message = { trackOwnerId: trackOwnerId, multihashDigest: multihashDigest, @@ -407,7 +557,16 @@ generators.getAddTrackRequestData = function (chainId, contractAddress, trackOwn ) } -generators.getUpdateTrackRequestData = function (chainId, contractAddress, trackId, trackOwnerId, multihashDigest, multihashHashFn, multihashSize, nonce) { +const getUpdateTrackRequestData = ( + chainId: number, + contractAddress: string, + trackId: number, + trackOwnerId: number, + multihashDigest: string, + multihashHashFn: number, + multihashSize: number, + nonce: string +) => { const message = { trackId: trackId, trackOwnerId: trackOwnerId, @@ -426,7 +585,12 @@ generators.getUpdateTrackRequestData = function (chainId, contractAddress, track ) } -generators.getDeleteTrackRequestData = function (chainId, contractAddress, trackId, nonce) { +const getDeleteTrackRequestData = ( + chainId: number, + contractAddress: string, + trackId: number, + nonce: string +) => { const message = { trackId: trackId, nonce: nonce @@ -442,7 +606,13 @@ generators.getDeleteTrackRequestData = function (chainId, contractAddress, track } /* Social Feature Factory Generators */ -generators.getAddTrackRepostRequestData = function (chainId, contractAddress, userId, trackId, nonce) { +const getAddTrackRepostRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + trackId: number, + nonce: string +) => { const message = { userId: userId, trackId: trackId, @@ -458,7 +628,13 @@ generators.getAddTrackRepostRequestData = function (chainId, contractAddress, us ) } -generators.getDeleteTrackRepostRequestData = function (chainId, contractAddress, userId, trackId, nonce) { +const getDeleteTrackRepostRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + trackId: number, + nonce: string +) => { const message = { userId: userId, trackId: trackId, @@ -474,7 +650,13 @@ generators.getDeleteTrackRepostRequestData = function (chainId, contractAddress, ) } -generators.getAddPlaylistRepostRequestData = function (chainId, contractAddress, userId, playlistId, nonce) { +const getAddPlaylistRepostRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + playlistId: number, + nonce: string +) => { const message = { userId: userId, playlistId: playlistId, @@ -490,7 +672,13 @@ generators.getAddPlaylistRepostRequestData = function (chainId, contractAddress, ) } -generators.getDeletePlaylistRepostRequestData = function (chainId, contractAddress, userId, playlistId, nonce) { +const getDeletePlaylistRepostRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + playlistId: number, + nonce: string +) => { const message = { userId: userId, playlistId: playlistId, @@ -506,7 +694,13 @@ generators.getDeletePlaylistRepostRequestData = function (chainId, contractAddre ) } -generators.getUserFollowRequestData = function (chainId, contractAddress, followerUserId, followeeUserId, nonce) { +const getUserFollowRequestData = ( + chainId: number, + contractAddress: string, + followerUserId: number, + followeeUserId: number, + nonce: string +) => { const message = { followerUserId: followerUserId, followeeUserId: followeeUserId, @@ -522,7 +716,13 @@ generators.getUserFollowRequestData = function (chainId, contractAddress, follow ) } -generators.getDeleteUserFollowRequestData = function (chainId, contractAddress, followerUserId, followeeUserId, nonce) { +const getDeleteUserFollowRequestData = ( + chainId: number, + contractAddress: string, + followerUserId: number, + followeeUserId: number, + nonce: string +) => { const message = { followerUserId: followerUserId, followeeUserId: followeeUserId, @@ -538,7 +738,13 @@ generators.getDeleteUserFollowRequestData = function (chainId, contractAddress, ) } -generators.getTrackSaveRequestData = function (chainId, contractAddress, userId, trackId, nonce) { +const getTrackSaveRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + trackId: number, + nonce: string +) => { const message = { userId: userId, trackId: trackId, @@ -555,7 +761,13 @@ generators.getTrackSaveRequestData = function (chainId, contractAddress, userId, ) } -generators.getDeleteTrackSaveRequestData = function (chainId, contractAddress, userId, trackId, nonce) { +const getDeleteTrackSaveRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + trackId: number, + nonce: string +) => { const message = { userId: userId, trackId: trackId, @@ -572,7 +784,13 @@ generators.getDeleteTrackSaveRequestData = function (chainId, contractAddress, u ) } -generators.getPlaylistSaveRequestData = function (chainId, contractAddress, userId, playlistId, nonce) { +const getPlaylistSaveRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + playlistId: number, + nonce: string +) => { const message = { userId: userId, playlistId: playlistId, @@ -589,7 +807,13 @@ generators.getPlaylistSaveRequestData = function (chainId, contractAddress, user ) } -generators.getDeletePlaylistSaveRequestData = function (chainId, contractAddress, userId, playlistId, nonce) { +const getDeletePlaylistSaveRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + playlistId: number, + nonce: string +) => { const message = { userId: userId, playlistId: playlistId, @@ -611,7 +835,16 @@ generators.getDeletePlaylistSaveRequestData = function (chainId, contractAddress /* NOTE: Ensure the value for trackIds hash is generated using the following snippet prior to calling this generator function: * web3New.utils.soliditySha3(web3New.eth.abi.encodeParameter('uint[]', trackIds)) */ -generators.getCreatePlaylistRequestData = function (chainId, contractAddress, playlistOwnerId, playlistName, isPrivate, isAlbum, trackIdsHash, nonce) { +const getCreatePlaylistRequestData = ( + chainId: number, + contractAddress: string, + playlistOwnerId: number, + playlistName: string, + isPrivate: boolean, + isAlbum: boolean, + trackIdsHash: string | null, + nonce: string +) => { const message = { playlistOwnerId: playlistOwnerId, playlistName: playlistName, @@ -631,7 +864,12 @@ generators.getCreatePlaylistRequestData = function (chainId, contractAddress, pl ) } -generators.getDeletePlaylistRequestData = function (chainId, contractAddress, playlistId, nonce) { +const getDeletePlaylistRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + nonce: string +) => { const message = { playlistId: playlistId, nonce: nonce @@ -646,7 +884,13 @@ generators.getDeletePlaylistRequestData = function (chainId, contractAddress, pl ) } -generators.getAddPlaylistTrackRequestData = function (chainId, contractAddress, playlistId, addedTrackId, nonce) { +const getAddPlaylistTrackRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + addedTrackId: number, + nonce: string +) => { const message = { playlistId: playlistId, addedTrackId: addedTrackId, @@ -663,7 +907,14 @@ generators.getAddPlaylistTrackRequestData = function (chainId, contractAddress, ) } -generators.getDeletePlaylistTrackRequestData = function (chainId, contractAddress, playlistId, deletedTrackId, deletedTrackTimestamp, nonce) { +const getDeletePlaylistTrackRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + deletedTrackId: number, + deletedTrackTimestamp: number, + nonce: string +) => { const message = { playlistId: playlistId, deletedTrackId: deletedTrackId, @@ -681,7 +932,13 @@ generators.getDeletePlaylistTrackRequestData = function (chainId, contractAddres ) } -generators.getOrderPlaylistTracksRequestData = function (chainId, contractAddress, playlistId, trackIdsHash, nonce) { +const getOrderPlaylistTracksRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + trackIdsHash: string | null, + nonce: string +) => { const message = { playlistId: playlistId, trackIdsHash: trackIdsHash, @@ -698,7 +955,13 @@ generators.getOrderPlaylistTracksRequestData = function (chainId, contractAddres ) } -generators.getUpdatePlaylistNameRequestData = function (chainId, contractAddress, playlistId, updatedPlaylistName, nonce) { +const getUpdatePlaylistNameRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + updatedPlaylistName: string, + nonce: string +) => { const message = { playlistId: playlistId, updatedPlaylistName: updatedPlaylistName, @@ -715,7 +978,13 @@ generators.getUpdatePlaylistNameRequestData = function (chainId, contractAddress ) } -generators.getUpdatePlaylistPrivacyRequestData = function (chainId, contractAddress, playlistId, updatedPlaylistPrivacy, nonce) { +const getUpdatePlaylistPrivacyRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + updatedPlaylistPrivacy: string, + nonce: string +) => { const message = { playlistId: playlistId, updatedPlaylistPrivacy: updatedPlaylistPrivacy, @@ -732,7 +1001,13 @@ generators.getUpdatePlaylistPrivacyRequestData = function (chainId, contractAddr ) } -generators.getUpdatePlaylistCoverPhotoRequestData = function (chainId, contractAddress, playlistId, playlistImageMultihashDigest, nonce) { +const getUpdatePlaylistCoverPhotoRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + playlistImageMultihashDigest: string, + nonce: string +) => { const message = { playlistId: playlistId, playlistImageMultihashDigest: playlistImageMultihashDigest, @@ -745,10 +1020,17 @@ generators.getUpdatePlaylistCoverPhotoRequestData = function (chainId, contractA contractAddress, 'UpdatePlaylistCoverPhotoRequest', schemas.updatePlaylistCoverPhotoRequest, - message) + message + ) } -generators.getUpdatePlaylistUPCRequestData = function (chainId, contractAddress, playlistId, playlistUPC, nonce) { +const getUpdatePlaylistUPCRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + playlistUPC: string, + nonce: string +) => { const message = { playlistId: playlistId, playlistUPC: playlistUPC, @@ -761,10 +1043,17 @@ generators.getUpdatePlaylistUPCRequestData = function (chainId, contractAddress, contractAddress, 'UpdatePlaylistUPCRequest', schemas.updatePlaylistUPCRequest, - message) + message + ) } -generators.getUpdatePlaylistDescriptionRequestData = function (chainId, contractAddress, playlistId, playlistDescription, nonce) { +const getUpdatePlaylistDescriptionRequestData = ( + chainId: number, + contractAddress: string, + playlistId: number, + playlistDescription: string, + nonce: string +) => { const message = { playlistId: playlistId, playlistDescription: playlistDescription, @@ -777,10 +1066,16 @@ generators.getUpdatePlaylistDescriptionRequestData = function (chainId, contract contractAddress, 'UpdatePlaylistDescriptionRequest', schemas.updatePlaylistDescriptionRequest, - message) + message + ) } -generators.addIPLDToBlacklistRequestData = function (chainId, contractAddress, multihashDigest, nonce) { +const addIPLDToBlacklistRequestData = ( + chainId: number, + contractAddress: string, + multihashDigest: string, + nonce: string +) => { const message = { multihashDigest: multihashDigest, nonce: nonce @@ -796,15 +1091,15 @@ generators.addIPLDToBlacklistRequestData = function (chainId, contractAddress, m } /* User Replica Set Manager Generators */ -generators.getProposeAddOrUpdateContentNodeRequestData = function ( - chainId, - contractAddress, - cnodeSpId, - cnodeDelegateOwnerWallet, - cnodeOwnerWallet, - proposerSpId, - nonce -) { +const getProposeAddOrUpdateContentNodeRequestData = ( + chainId: number, + contractAddress: string, + cnodeSpId: number, + cnodeDelegateOwnerWallet: string, + cnodeOwnerWallet: string, + proposerSpId: number, + nonce: string +) => { const message = { cnodeSpId, cnodeDelegateOwnerWallet, @@ -822,16 +1117,16 @@ generators.getProposeAddOrUpdateContentNodeRequestData = function ( ) } -generators.getUpdateReplicaSetRequestData = function ( - chainId, - contractAddress, - userId, - primaryId, - secondaryIdsHash, - oldPrimaryId, - oldSecondaryIdsHash, - nonce -) { +const getUpdateReplicaSetRequestData = ( + chainId: number, + contractAddress: string, + userId: number, + primaryId: number, + secondaryIdsHash: string | null, + oldPrimaryId: number, + oldSecondaryIdsHash: string | null, + nonce: string +) => { const message = { userId, primaryId, @@ -850,12 +1145,53 @@ generators.getUpdateReplicaSetRequestData = function ( ) } +export const generators = { + getUpdateUserMultihashRequestData, + getAddUserRequestData, + getUpdateUserNameRequestData, + getUpdateUserLocationRequestData, + getUpdateUserProfilePhotoRequestData, + getUpdateUserCoverPhotoRequestData, + getUpdateUserBioRequestData, + getUpdateUserCreatorNodeRequestData, + getUpdateUserCreatorRequestData, + getUpdateUserVerifiedRequestData, + getAddTrackRequestData, + getUpdateTrackRequestData, + getDeleteTrackRequestData, + getAddTrackRepostRequestData, + getDeleteTrackRepostRequestData, + getAddPlaylistRepostRequestData, + getDeletePlaylistRepostRequestData, + getUserFollowRequestData, + getDeleteUserFollowRequestData, + getTrackSaveRequestData, + getDeleteTrackSaveRequestData, + getPlaylistSaveRequestData, + getDeletePlaylistSaveRequestData, + getCreatePlaylistRequestData, + getDeletePlaylistRequestData, + getAddPlaylistTrackRequestData, + getDeletePlaylistTrackRequestData, + getOrderPlaylistTracksRequestData, + getUpdatePlaylistNameRequestData, + getUpdatePlaylistPrivacyRequestData, + getUpdatePlaylistCoverPhotoRequestData, + getUpdatePlaylistUPCRequestData, + getUpdatePlaylistDescriptionRequestData, + addIPLDToBlacklistRequestData, + getProposeAddOrUpdateContentNodeRequestData, + getUpdateReplicaSetRequestData +} + +type NodeCrypto = { randomBytes: (size: number) => Buffer } + /** Return a secure random hex string of nChar length in a browser-compatible way * Taken from https://stackoverflow.com/questions/37378237/how-to-generate-a-random-token-of-32-bit-in-javascript */ -function browserRandomHash (nChar) { +function browserRandomHash(nChar: number) { // convert number of characters to number of bytes - const nBytes = Math.ceil(nChar = (+nChar || 8) / 2) + const nBytes = Math.ceil((nChar = (+nChar || 8) / 2)) // create a typed array of that many bytes const u = new Uint8Array(nBytes) @@ -864,7 +1200,7 @@ function browserRandomHash (nChar) { window.crypto.getRandomValues(u) // convert it to an Array of Strings (e.g. '01', 'AF', ..) - const zpad = function (str) { + const zpad = function (str: string) { return '00'.slice(str.length) + str } const a = Array.prototype.map.call(u, function (x) { @@ -882,20 +1218,19 @@ function browserRandomHash (nChar) { // We need to detect whether the nodejs crypto module is available to determine how to // generate secure random numbers below -let nodeCrypto +let nodeCrypto: NodeCrypto | null + try { nodeCrypto = require('crypto') } catch (e) { nodeCrypto = null } -function getNonce () { +export function getNonce() { // detect whether we are in browser or in nodejs, and use the correct csprng if (typeof window === 'undefined' || window === null) { - return '0x' + nodeCrypto.randomBytes(32).toString('hex') + return '0x' + (nodeCrypto as NodeCrypto).randomBytes(32).toString('hex') } else { return '0x' + browserRandomHash(64) } } - -module.exports = { domains, schemas, generators, getNonce } diff --git a/libs/package-lock.json b/libs/package-lock.json index afce0645ce6..d12bea09078 100644 --- a/libs/package-lock.json +++ b/libs/package-lock.json @@ -1,6 +1,6 @@ { "name": "@audius/libs", - "version": "1.2.117", + "version": "1.2.118", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -4803,11 +4803,20 @@ "resolved": "https://registry.npmjs.org/borsh/-/borsh-0.4.0.tgz", "integrity": "sha512-aX6qtLya3K0AkT66CmYWCCDr77qsE9arV05OmdFpmat9qu8Pg9J5tBUPDztAW5fNh/d/MyVG/OYziP52Ndzx1g==", "requires": { + "@types/bn.js": "^4.11.5", "bn.js": "^5.0.0", "bs58": "^4.0.0", "text-encoding-utf-8": "^1.0.2" }, "dependencies": { + "@types/bn.js": { + "version": "4.11.6", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", + "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", + "requires": { + "@types/node": "*" + } + }, "bn.js": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/bn.js/-/bn.js-5.2.0.tgz", @@ -6709,12 +6718,23 @@ "resolved": "https://registry.npmjs.org/ethereumjs-util/-/ethereumjs-util-6.2.1.tgz", "integrity": "sha512-W2Ktez4L01Vexijrm5EB6w7dg4n/TgpoYU4avuT5T3Vmnw/eCRtiBrJfQYS/DCSvDIOLn2k57GcHdeBcgVxAqw==", "requires": { + "@types/bn.js": "^4.11.3", "bn.js": "^4.11.0", "create-hash": "^1.1.2", "elliptic": "^6.5.2", "ethereum-cryptography": "^0.1.3", "ethjs-util": "0.1.6", "rlp": "^2.2.3" + }, + "dependencies": { + "@types/bn.js": { + "version": "4.11.6", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", + "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", + "requires": { + "@types/node": "*" + } + } } }, "ethereumjs-wallet": { @@ -12423,6 +12443,7 @@ "resolved": "https://registry.npmjs.org/web3-core/-/web3-core-1.7.1.tgz", "integrity": "sha512-HOyDPj+4cNyeNPwgSeUkhtS0F+Pxc2obcm4oRYPW5ku6jnTO34pjaij0us+zoY3QEusR8FfAKVK1kFPZnS7Dzw==", "requires": { + "@types/bn.js": "^4.11.5", "@types/node": "^12.12.6", "bignumber.js": "^9.0.0", "web3-core-helpers": "1.7.1", @@ -12431,6 +12452,14 @@ "web3-utils": "1.7.1" }, "dependencies": { + "@types/bn.js": { + "version": "4.11.6", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", + "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", + "requires": { + "@types/node": "*" + } + }, "@types/node": { "version": "12.20.47", "resolved": "https://registry.npmjs.org/@types/node/-/node-12.20.47.tgz", @@ -12845,6 +12874,7 @@ "resolved": "https://registry.npmjs.org/web3-eth-contract/-/web3-eth-contract-1.7.1.tgz", "integrity": "sha512-HpnbkPYkVK3lOyos2SaUjCleKfbF0SP3yjw7l551rAAi5sIz/vwlEzdPWd0IHL7ouxXbO0tDn7jzWBRcD3sTbA==", "requires": { + "@types/bn.js": "^4.11.5", "web3-core": "1.7.1", "web3-core-helpers": "1.7.1", "web3-core-method": "1.7.1", @@ -12854,6 +12884,14 @@ "web3-utils": "1.7.1" }, "dependencies": { + "@types/bn.js": { + "version": "4.11.6", + "resolved": "https://registry.npmjs.org/@types/bn.js/-/bn.js-4.11.6.tgz", + "integrity": "sha512-pqr857jrp2kPuO9uRjZ3PwnJTjoQy+fcdxvBTvHm6dkmEL9q+hDD/2j/0ELOBPtPnS8LjCX0gI9nbl8lVkadpg==", + "requires": { + "@types/node": "*" + } + }, "ethereumjs-util": { "version": "7.1.4", "resolved": "https://registry.npmjs.org/ethereumjs-util/-/ethereumjs-util-7.1.4.tgz", diff --git a/libs/package.json b/libs/package.json index 151eea12d9a..c1a3fa0fc89 100644 --- a/libs/package.json +++ b/libs/package.json @@ -1,6 +1,6 @@ { "name": "@audius/libs", - "version": "1.2.117", + "version": "1.2.118", "description": "", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -62,6 +62,7 @@ "lodash": "4.17.21", "node-localstorage": "^1.3.1", "proper-url-join": "1.2.0", + "safe-buffer": "5.2.1", "secp256k1": "4.0.2", "semver": "6.3.0", "web3": "1.7.1" diff --git a/libs/src/index.js b/libs/src/index.js index 26de2fc99da..8c683ba8901 100644 --- a/libs/src/index.js +++ b/libs/src/index.js @@ -5,7 +5,7 @@ const { SolanaAudiusData } = require('./services/solanaAudiusData/index') const { Web3Manager } = require('./services/web3Manager') const { EthContracts } = require('./services/ethContracts') const SolanaWeb3Manager = require('./services/solanaWeb3Manager/index') -const AudiusContracts = require('./services/dataContracts/index') +const { AudiusContracts } = require('./services/dataContracts') const { IdentityService } = require('./services/identity') const { Comstock } = require('./services/comstock') const { Hedgehog } = require('./services/hedgehog') diff --git a/libs/src/services/contracts/ProviderSelection.ts b/libs/src/services/contracts/ProviderSelection.ts index ffecbeee385..044cbabd2e6 100644 --- a/libs/src/services/contracts/ProviderSelection.ts +++ b/libs/src/services/contracts/ProviderSelection.ts @@ -1,7 +1,7 @@ import Web3 from 'web3' import { ServiceSelection } from '../../service-selection' +import type { EthWeb3Manager } from '../ethWeb3Manager' import type { Web3Manager } from '../web3Manager' -import type { ContractClient } from './ContractClient' /** * This class provides the logic to select a healthy gateway @@ -24,7 +24,7 @@ export class ProviderSelection extends ServiceSelection { * * @param client object used for making transaction calls */ - override async select(client: ContractClient) { + override async select(client: { web3Manager: Web3Manager | EthWeb3Manager }) { const web3Manager = client.web3Manager as Web3Manager const filteredServices = this.filterOutKnownUnhealthy( await this.getServices() diff --git a/libs/src/services/creatorNode/CreatorNodeSelection.ts b/libs/src/services/creatorNode/CreatorNodeSelection.ts index dc394039a33..5a69b864128 100644 --- a/libs/src/services/creatorNode/CreatorNodeSelection.ts +++ b/libs/src/services/creatorNode/CreatorNodeSelection.ts @@ -65,7 +65,7 @@ interface Decision { export class CreatorNodeSelection extends ServiceSelection { override decisionTree: Decision[] - currentVersion: string = '' + currentVersion: string | null = '' ethContracts: EthContracts creatorNode: CreatorNode numberOfNodes: number @@ -381,7 +381,7 @@ export class CreatorNodeSelection extends ServiceSelection { if (resp.response) { const isUp = resp.response.status === 200 const versionIsUpToDate = this.ethContracts.hasSameMajorAndMinorVersion( - this.currentVersion, + this.currentVersion as string, resp.response.data.data.version ) const { storagePathSize, storagePathUsed, maxStorageUsedPercent } = diff --git a/libs/src/services/dataContracts/index.js b/libs/src/services/dataContracts/AudiusContracts.ts similarity index 55% rename from libs/src/services/dataContracts/index.js rename to libs/src/services/dataContracts/AudiusContracts.ts index 4f32699d495..8c170e02dd6 100644 --- a/libs/src/services/dataContracts/index.js +++ b/libs/src/services/dataContracts/AudiusContracts.ts @@ -1,25 +1,37 @@ -const { Utils } = require('../../utils') +import { Utils, Logger } from '../../utils' // load classes wrapping contracts -const RegistryClient = require('./registryClient') -const UserFactoryClient = require('./userFactoryClient') -const TrackFactoryClient = require('./trackFactoryClient') -const SocialFeatureFactoryClient = require('./socialFeatureFactoryClient') -const PlaylistFactoryClient = require('./playlistFactoryClient') -const UserLibraryFactoryClient = require('./userLibraryFactoryClient') -const IPLDBlacklistFactoryClient = require('./IPLDBlacklistFactoryClient') -const UserReplicaSetManagerClient = require('./userReplicaSetManagerClient') +import { RegistryClient } from './RegistryClient' +import { UserFactoryClient } from './UserFactoryClient' +import { TrackFactoryClient } from './TrackFactoryClient' +import { SocialFeatureFactoryClient } from './SocialFeatureFactoryClient' +import { PlaylistFactoryClient } from './PlaylistFactoryClient' +import { UserLibraryFactoryClient } from './UserLibraryFactoryClient' +import { IPLDBlacklistFactoryClient } from './IPLDBlacklistFactoryClient' +import { UserReplicaSetManagerClient } from './UserReplicaSetManagerClient' +import type { Web3Manager } from '../web3Manager' +import type { ContractClient } from '../contracts/ContractClient' // Make sure the json file exists before importing because it could silently fail // import data contract ABI's const RegistryABI = Utils.importDataContractABI('Registry.json').abi const UserFactoryABI = Utils.importDataContractABI('UserFactory.json').abi const TrackFactoryABI = Utils.importDataContractABI('TrackFactory.json').abi -const SocialFeatureFactoryABI = Utils.importDataContractABI('SocialFeatureFactory.json').abi -const PlaylistFactoryABI = Utils.importDataContractABI('PlaylistFactory.json').abi -const UserLibraryFactoryABI = Utils.importDataContractABI('UserLibraryFactory.json').abi -const IPLDBlacklistFactoryABI = Utils.importDataContractABI('IPLDBlacklistFactory.json').abi -const UserReplicaSetManagerABI = Utils.importDataContractABI('UserReplicaSetManager.json').abi +const SocialFeatureFactoryABI = Utils.importDataContractABI( + 'SocialFeatureFactory.json' +).abi +const PlaylistFactoryABI = Utils.importDataContractABI( + 'PlaylistFactory.json' +).abi +const UserLibraryFactoryABI = Utils.importDataContractABI( + 'UserLibraryFactory.json' +).abi +const IPLDBlacklistFactoryABI = Utils.importDataContractABI( + 'IPLDBlacklistFactory.json' +).abi +const UserReplicaSetManagerABI = Utils.importDataContractABI( + 'UserReplicaSetManager.json' +).abi // define contract registry keys const UserFactoryRegistryKey = 'UserFactory' @@ -30,8 +42,29 @@ const UserLibraryFactoryRegistryKey = 'UserLibraryFactory' const IPLDBlacklistFactoryRegistryKey = 'IPLDBlacklistFactory' const UserReplicaSetManagerRegistryKey = 'UserReplicaSetManager' -class AudiusContracts { - constructor (web3Manager, registryAddress, isServer, logger = console) { +export class AudiusContracts { + web3Manager: Web3Manager + registryAddress: string + isServer: boolean + logger: Logger + RegistryClient: RegistryClient + UserFactoryClient: UserFactoryClient + TrackFactoryClient: TrackFactoryClient + SocialFeatureFactoryClient: SocialFeatureFactoryClient + PlaylistFactoryClient: PlaylistFactoryClient + UserLibraryFactoryClient: UserLibraryFactoryClient + IPLDBlacklistFactoryClient: IPLDBlacklistFactoryClient + contractClients: ContractClient[] + UserReplicaSetManagerClient: UserReplicaSetManagerClient | undefined | null + contracts: Record | undefined + contractAddresses: Record | undefined + + constructor( + web3Manager: Web3Manager, + registryAddress: string, + isServer: boolean, + logger = console + ) { this.web3Manager = web3Manager this.registryAddress = registryAddress this.isServer = isServer @@ -42,7 +75,8 @@ class AudiusContracts { RegistryABI, this.registryAddress ) - this.getRegistryAddressForContract = this.getRegistryAddressForContract.bind(this) + this.getRegistryAddressForContract = + this.getRegistryAddressForContract.bind(this) this.UserFactoryClient = new UserFactoryClient( this.web3Manager, @@ -102,9 +136,11 @@ class AudiusContracts { ] } - async init () { + async init() { if (this.isServer) { - await Promise.all(this.contractClients.map(client => client.init())) + await Promise.all( + this.contractClients.map(async (client) => await client.init()) + ) await this.initUserReplicaSetManagerClient() } } @@ -112,11 +148,12 @@ class AudiusContracts { // Special case initialization flow for UserReplicaSetManagerClient backwards compatibility // Until the contract is deployed and added to the data contract registry, replica set // operations will flow through the existing UserFactory - async initUserReplicaSetManagerClient (selectNewEndpointOnRetry = false) { + async initUserReplicaSetManagerClient() { try { if ( this.UserReplicaSetManagerClient && - this.UserReplicaSetManagerClient._contractAddress !== '0x0000000000000000000000000000000000000000' + this.UserReplicaSetManagerClient._contractAddress !== + '0x0000000000000000000000000000000000000000' ) { return } @@ -128,17 +165,27 @@ class AudiusContracts { this.getRegistryAddressForContract, this.logger ) - await this.UserReplicaSetManagerClient.init(selectNewEndpointOnRetry) - if (this.UserReplicaSetManagerClient._contractAddress === '0x0000000000000000000000000000000000000000') { - throw new Error(`Failed retrieve address for ${this.UserReplicaSetManagerClient.contractRegistryKey}`) + await this.UserReplicaSetManagerClient.init() + if ( + this.UserReplicaSetManagerClient._contractAddress === + '0x0000000000000000000000000000000000000000' + ) { + throw new Error( + `Failed retrieve address for ${this.UserReplicaSetManagerClient.contractRegistryKey}` + ) } - const seedComplete = await this.UserReplicaSetManagerClient.getSeedComplete() + const seedComplete = + await this.UserReplicaSetManagerClient.getSeedComplete() if (!seedComplete) { throw new Error('UserReplicaSetManager pending seed operation') } } catch (e) { // Nullify failed attempt to initialize - console.log(`Failed to initialize UserReplicaSetManagerClient with error ${e.message}`) + console.log( + `Failed to initialize UserReplicaSetManagerClient with error ${ + (e as Error).message + }` + ) this.UserReplicaSetManagerClient = null } } @@ -150,31 +197,38 @@ class AudiusContracts { * Refreshes cache if cached value is empty or zero address. * Value is empty during first time call, and zero if call is made before contract is deployed, * since Registry sets default value of all contract keys to zero address if not registered. - * @param {string} contractName registry key of contract + * @param contractName registry key of contract */ - async getRegistryAddressForContract (contractName) { + async getRegistryAddressForContract(contractName: string) { // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Object_initializer#Computed_property_names - this.contracts = this.contracts || { [this.registryAddress]: 'registry' } - this.contractAddresses = this.contractAddresses || { registry: this.registryAddress } + this.contracts = this.contracts ?? { [this.registryAddress]: 'registry' } + this.contractAddresses = this.contractAddresses ?? { + registry: this.registryAddress + } - if (!this.contractAddresses[contractName] || Utils.isZeroAddress(this.contractAddresses[contractName])) { - const address = await this.RegistryClient.getContract(contractName) + if ( + !this.contractAddresses[contractName] || + Utils.isZeroAddress(this.contractAddresses[contractName] as string) + ) { + const address = (await this.RegistryClient.getContract( + contractName + )) as string this.contracts[address] = contractName this.contractAddresses[contractName] = address } - return this.contractAddresses[contractName] + return this.contractAddresses[contractName] as string } - async getRegistryContractForAddress (address) { + async getRegistryContractForAddress(address: string) { if (!this.contracts) { throw new Error('No contracts found. Have you called init() yet?') } const contractRegistryKey = this.contracts[address] if (!contractRegistryKey) { - throw new Error(`No registry contract found for contract address ${address}`) + throw new Error( + `No registry contract found for contract address ${address}` + ) } return contractRegistryKey } } - -module.exports = AudiusContracts diff --git a/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.js b/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.js deleted file mode 100644 index 1ba3e27a6e1..00000000000 --- a/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.js +++ /dev/null @@ -1,49 +0,0 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') -const sigUtil = require('eth-sig-util') -const BufferSafe = require('safe-buffer').Buffer - -class IPLDBlacklistFactoryClient extends ContractClient { - async addIPLDToBlacklist (multihashDigest, privateKey = null) { - const [nonce, sig] = await this.getUpdateNonceAndSig( - signatureSchemas.generators.addIPLDToBlacklistRequestData, - multihashDigest, - privateKey - ) - const method = await this.getMethod('addIPLDToBlacklist', - multihashDigest, - nonce, - sig - ) - - const receipt = await method.send({ from: this.web3Manager.getWalletAddress(), gas: 200000 }) - return receipt - } - - /* ------- HELPERS ------- */ - - /** - * Gets a nonce and generates a signature for the given function. Private key is optional and - * will use that private key to create the signature. Otherwise the web3Manager private key - * will be used. - * @param {Object} generatorFn signature scheme object function - * @param {number} userId blockchain userId - * @param {string} privateKey optional. if this is passed in, the signature will be from - * this private key. the type is a 64 character hex string - */ - async getUpdateNonceAndSig (generatorFn, multihashDigest, privateKey) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = generatorFn(chainId, contractAddress, multihashDigest, nonce) - let sig - if (privateKey) { - sig = sigUtil.signTypedData(BufferSafe.from(privateKey, 'hex'), { data: signatureData }) - } else { - sig = await this.web3Manager.signTypedData(signatureData) - } - return [nonce, sig] - } -} - -module.exports = IPLDBlacklistFactoryClient diff --git a/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.ts b/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.ts new file mode 100644 index 00000000000..6f909313ac7 --- /dev/null +++ b/libs/src/services/dataContracts/IPLDBlacklistFactoryClient.ts @@ -0,0 +1,73 @@ +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import sigUtil, { EIP712TypedData } from 'eth-sig-util' +import { Buffer as SafeBuffer } from 'safe-buffer' +import type { Web3Manager } from '../web3Manager' + +type GeneratorFn = ( + chainId: number, + contractAddress: string, + multihashDigest: string, + nonce: string +) => EIP712TypedData + +export class IPLDBlacklistFactoryClient extends ContractClient { + async addIPLDToBlacklist(multihashDigest: string, privateKey = null) { + const [nonce, sig] = await this.getUpdateNonceAndSig( + signatureSchemas.generators.addIPLDToBlacklistRequestData, + multihashDigest, + privateKey + ) + const method = await this.getMethod( + 'addIPLDToBlacklist', + multihashDigest, + nonce, + sig + ) + + const receipt = await method.send({ + from: this.web3Manager.getWalletAddress(), + gas: 200000 + }) + return receipt + } + + /* ------- HELPERS ------- */ + + /** + * Gets a nonce and generates a signature for the given function. Private key is optional and + * will use that private key to create the signature. Otherwise the web3Manager private key + * will be used. + * @param generatorFn signature scheme object function + * @param multihashDigest blockchain userId + * @param privateKey optional. if this is passed in, the signature will be from + * this private key. the type is a 64 character hex string + */ + async getUpdateNonceAndSig( + generatorFn: GeneratorFn, + multihashDigest: string, + privateKey: string | null + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = generatorFn( + chainId, + contractAddress, + multihashDigest, + nonce + ) + let sig + if (privateKey) { + sig = sigUtil.signTypedData( + SafeBuffer.from(privateKey, 'hex') as unknown as Buffer, + { + data: signatureData + } + ) + } else { + sig = await (this.web3Manager as Web3Manager).signTypedData(signatureData) + } + return [nonce, sig] + } +} diff --git a/libs/src/services/dataContracts/PlaylistFactoryClient.ts b/libs/src/services/dataContracts/PlaylistFactoryClient.ts new file mode 100644 index 00000000000..3b889ff50ca --- /dev/null +++ b/libs/src/services/dataContracts/PlaylistFactoryClient.ts @@ -0,0 +1,370 @@ +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { Web3Manager } from '../web3Manager' + +const MAX_PLAYLIST_LENGTH = 199 + +export class PlaylistFactoryClient extends ContractClient { + override web3Manager!: Web3Manager + /* ------- SETTERS ------- */ + + async createPlaylist( + userId: number, + playlistName: string, + isPrivate: boolean, + isAlbum: boolean, + trackIds: number[] + ) { + if (!Array.isArray(trackIds) || trackIds.length > MAX_PLAYLIST_LENGTH) { + throw new Error( + `Cannot create playlist - trackIds must be array with length <= ${MAX_PLAYLIST_LENGTH}` + ) + } + + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const trackIdsHash = this.web3Manager + .getWeb3() + .utils.soliditySha3( + this.web3Manager.getWeb3().eth.abi.encodeParameter('uint[]', trackIds) + ) + const signatureData = + signatureSchemas.generators.getCreatePlaylistRequestData( + chainId, + contractAddress, + userId, + playlistName, + isPrivate, + isAlbum, + trackIdsHash, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'createPlaylist', + userId, + playlistName, + isPrivate, + isAlbum, + trackIds, + nonce, + sig + ) + + const tx = await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + return { + playlistId: parseInt( + tx.events?.['PlaylistCreated']?.returnValues._playlistId, + 10 + ), + txReceipt: tx + } + } + + async deletePlaylist(playlistId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getDeletePlaylistRequestData( + chainId, + contractAddress, + playlistId, + nonce + ) + + const sig = await this.web3Manager.signTypedData(signatureData) + const method = await this.getMethod( + 'deletePlaylist', + playlistId, + nonce, + sig + ) + + const tx = await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + return { + playlistId: parseInt( + tx.events?.['PlaylistDeleted']?.returnValues._playlistId, + 10 + ), + txReceipt: tx + } + } + + async addPlaylistTrack(playlistId: number, addedTrackId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getAddPlaylistTrackRequestData( + chainId, + contractAddress, + playlistId, + addedTrackId, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'addPlaylistTrack', + playlistId, + addedTrackId, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async deletePlaylistTrack( + playlistId: number, + deletedTrackId: number, + deletedPlaylistTimestamp: number, + retries: number + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getDeletePlaylistTrackRequestData( + chainId, + contractAddress, + playlistId, + deletedTrackId, + deletedPlaylistTimestamp, + nonce + ) + + const sig = await this.web3Manager.signTypedData(signatureData) + const method = await this.getMethod( + 'deletePlaylistTrack', + playlistId, + deletedTrackId, + deletedPlaylistTimestamp, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress, + retries + ) + } + + async orderPlaylistTracks( + playlistId: number, + trackIds: number[], + retries: number + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const trackIdsHash = this.web3Manager + .getWeb3() + .utils.soliditySha3( + this.web3Manager.getWeb3().eth.abi.encodeParameter('uint[]', trackIds) + ) + const signatureData = + signatureSchemas.generators.getOrderPlaylistTracksRequestData( + chainId, + contractAddress, + playlistId, + trackIdsHash, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'orderPlaylistTracks', + playlistId, + trackIds, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, // contractMethod + this.contractRegistryKey, + contractAddress, + retries + ) + } + + async updatePlaylistPrivacy( + playlistId: number, + updatedPlaylistPrivacy: string + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getUpdatePlaylistPrivacyRequestData( + chainId, + contractAddress, + playlistId, + updatedPlaylistPrivacy, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'updatePlaylistPrivacy', + playlistId, + updatedPlaylistPrivacy, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async updatePlaylistName(playlistId: number, updatedPlaylistName: string) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getUpdatePlaylistNameRequestData( + chainId, + contractAddress, + playlistId, + updatedPlaylistName, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'updatePlaylistName', + playlistId, + updatedPlaylistName, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async updatePlaylistCoverPhoto( + playlistId: number, + updatedPlaylistImageMultihashDigest: string + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getUpdatePlaylistCoverPhotoRequestData( + chainId, + contractAddress, + playlistId, + updatedPlaylistImageMultihashDigest, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + + const method = await this.getMethod( + 'updatePlaylistCoverPhoto', + playlistId, + updatedPlaylistImageMultihashDigest, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async updatePlaylistDescription( + playlistId: number, + updatedPlaylistDescription: string + ) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getUpdatePlaylistDescriptionRequestData( + chainId, + contractAddress, + playlistId, + updatedPlaylistDescription, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const method = await this.getMethod( + 'updatePlaylistDescription', + playlistId, + updatedPlaylistDescription, + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async updatePlaylistUPC(playlistId: number, updatedPlaylistUPC: string) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getUpdatePlaylistUPCRequestData( + chainId, + contractAddress, + playlistId, + this.web3Manager.getWeb3().utils.utf8ToHex(updatedPlaylistUPC), + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const method = await this.getMethod( + 'updatePlaylistUPC', + playlistId, + this.web3Manager.getWeb3().utils.utf8ToHex(updatedPlaylistUPC), + nonce, + sig + ) + + return await this.web3Manager.sendTransaction( + method, + this.contractRegistryKey, + contractAddress + ) + } + + async isTrackInPlaylist(playlistId: number, trackId: number) { + const method = await this.getMethod( + 'isTrackInPlaylist', + playlistId, + trackId + ) + const result = await method.call() + return result + } +} diff --git a/libs/src/services/dataContracts/registryClient.js b/libs/src/services/dataContracts/RegistryClient.ts similarity index 51% rename from libs/src/services/dataContracts/registryClient.js rename to libs/src/services/dataContracts/RegistryClient.ts index 186f0723e39..0e0254aad66 100644 --- a/libs/src/services/dataContracts/registryClient.js +++ b/libs/src/services/dataContracts/RegistryClient.ts @@ -1,9 +1,21 @@ -const { Utils } = require('../../utils') -const { Web3Manager } = require('../web3Manager') -const { ProviderSelection } = require('../contracts/ProviderSelection') +import { ContractABI, Utils } from '../../utils' +import { Web3Manager } from '../web3Manager' +import { ProviderSelection } from '../contracts/ProviderSelection' +import type { HttpProvider } from 'web3-core' +import type { Contract } from 'web3-eth-contract' -class RegistryClient { - constructor (web3Manager, contractABI, contractAddress) { +export class RegistryClient { + web3Manager: Web3Manager + contractABI: ContractABI['abi'] + contractAddress: string + Registry: Contract + providerSelector: ProviderSelection | null + + constructor( + web3Manager: Web3Manager, + contractABI: ContractABI['abi'], + contractAddress: string + ) { this.web3Manager = web3Manager this.contractABI = contractABI this.contractAddress = contractAddress @@ -23,7 +35,7 @@ class RegistryClient { } } - async getContract (contractRegistryKey) { + async getContract(contractRegistryKey: string): Promise { try { Utils.checkStrLen(contractRegistryKey, 32) const contract = await this.Registry.methods @@ -40,11 +52,11 @@ class RegistryClient { return } - return this.retryInit(contractRegistryKey) + return await this.retryInit(contractRegistryKey) } } - async retryInit (contractRegistryKey) { + async retryInit(contractRegistryKey: string) { try { await this.selectNewEndpoint() const web3 = this.web3Manager.getWeb3() @@ -54,28 +66,30 @@ class RegistryClient { ) return await this.getContract(contractRegistryKey) } catch (e) { - console.error(e.message) + console.error((e as Error).message) + return undefined } } - async selectNewEndpoint () { - this.providerSelector.addUnhealthy( - this.web3Manager.getWeb3().currentProvider.host - ) + async selectNewEndpoint() { + const currentHost = ( + this.web3Manager.getWeb3().currentProvider as HttpProvider + ).host + if (this.providerSelector) { + this.providerSelector.addUnhealthy(currentHost) - if ( - this.providerSelector.getUnhealthySize() === - this.providerSelector.getServicesSize() - ) { - throw new Error( - `No available, healthy providers to get contract ${JSON.stringify( - this.contractABI - )}` - ) - } + if ( + this.providerSelector.getUnhealthySize() === + this.providerSelector.getServicesSize() + ) { + throw new Error( + `No available, healthy providers to get contract ${JSON.stringify( + this.contractABI + )}` + ) + } - await this.providerSelector.select(this) + await this.providerSelector.select(this) + } } } - -module.exports = RegistryClient diff --git a/libs/src/services/dataContracts/socialFeatureFactoryClient.js b/libs/src/services/dataContracts/SocialFeatureFactoryClient.ts similarity index 52% rename from libs/src/services/dataContracts/socialFeatureFactoryClient.js rename to libs/src/services/dataContracts/SocialFeatureFactoryClient.ts index 6f09fcacccf..ebe6ae2a093 100644 --- a/libs/src/services/dataContracts/socialFeatureFactoryClient.js +++ b/libs/src/services/dataContracts/SocialFeatureFactoryClient.ts @@ -1,122 +1,135 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { Web3Manager } from '../web3Manager' -class SocialFeatureFactoryClient extends ContractClient { - async addTrackRepost (userId, trackId) { +export class SocialFeatureFactoryClient extends ContractClient { + override web3Manager!: Web3Manager + + async addTrackRepost(userId: number, trackId: number) { // generate new track repost request const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getAddTrackRepostRequestData( - chainId, - contractAddress, - userId, - trackId, - nonce - ) + const signatureData = + signatureSchemas.generators.getAddTrackRepostRequestData( + chainId, + contractAddress, + userId, + trackId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) // add new trackRepost to chain - const method = await this.getMethod('addTrackRepost', + const method = await this.getMethod( + 'addTrackRepost', userId, trackId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } - async deleteTrackRepost (userId, trackId) { + async deleteTrackRepost(userId: number, trackId: number) { // generate new delete track repost request const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeleteTrackRepostRequestData( - chainId, - contractAddress, - userId, - trackId, - nonce - ) + const signatureData = + signatureSchemas.generators.getDeleteTrackRepostRequestData( + chainId, + contractAddress, + userId, + trackId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) // delete trackRepost from chain - const method = await this.getMethod('deleteTrackRepost', + const method = await this.getMethod( + 'deleteTrackRepost', userId, trackId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } - async addPlaylistRepost (userId, playlistId) { + async addPlaylistRepost(userId: number, playlistId: number) { // generate new playlist repost request const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getAddPlaylistRepostRequestData( - chainId, - contractAddress, - userId, - playlistId, - nonce - ) + const signatureData = + signatureSchemas.generators.getAddPlaylistRepostRequestData( + chainId, + contractAddress, + userId, + playlistId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) // add new playlistRepost to chain - const method = await this.getMethod('addPlaylistRepost', + const method = await this.getMethod( + 'addPlaylistRepost', userId, playlistId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } - async deletePlaylistRepost (userId, playlistId) { + async deletePlaylistRepost(userId: number, playlistId: number) { // generate new delete playlist repost request const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeletePlaylistRepostRequestData( - chainId, - contractAddress, - userId, - playlistId, - nonce - ) + const signatureData = + signatureSchemas.generators.getDeletePlaylistRepostRequestData( + chainId, + contractAddress, + userId, + playlistId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) // delete playlistRepost from chain - const method = await this.getMethod('deletePlaylistRepost', + const method = await this.getMethod( + 'deletePlaylistRepost', userId, playlistId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } - async addUserFollow (followerUserId, followeeUserId) { + async addUserFollow(followerUserId: number, followeeUserId: number) { if (followerUserId === followeeUserId) { - throw new Error(`addUserFollow - identical value provided for follower and followee ${followerUserId}`) + throw new Error( + `addUserFollow - identical value provided for follower and followee ${followerUserId}` + ) } // generate new UserFollow request const nonce = signatureSchemas.getNonce() @@ -132,49 +145,52 @@ class SocialFeatureFactoryClient extends ContractClient { const sig = await this.web3Manager.signTypedData(signatureData) // add new UserFollow to chain - const method = await this.getMethod('addUserFollow', + const method = await this.getMethod( + 'addUserFollow', followerUserId, followeeUserId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } - async deleteUserFollow (followerUserId, followeeUserId) { + async deleteUserFollow(followerUserId: number, followeeUserId: number) { if (followerUserId === followeeUserId) { - throw new Error(`deleteUserFollow - Invalid identical value provided for follower and followee ${followerUserId}`) + throw new Error( + `deleteUserFollow - Invalid identical value provided for follower and followee ${followerUserId}` + ) } // generate new deleteUserFollow request const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeleteUserFollowRequestData( - chainId, - contractAddress, - followerUserId, - followeeUserId, - nonce - ) + const signatureData = + signatureSchemas.generators.getDeleteUserFollowRequestData( + chainId, + contractAddress, + followerUserId, + followeeUserId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) // delete UserFollow from chain - const method = await this.getMethod('deleteUserFollow', + const method = await this.getMethod( + 'deleteUserFollow', followerUserId, followeeUserId, nonce, sig ) - return this.web3Manager.sendTransaction( + return await this.web3Manager.sendTransaction( method, this.contractRegistryKey, contractAddress ) } } - -module.exports = SocialFeatureFactoryClient diff --git a/libs/src/services/dataContracts/trackFactoryClient.js b/libs/src/services/dataContracts/TrackFactoryClient.ts similarity index 70% rename from libs/src/services/dataContracts/trackFactoryClient.js rename to libs/src/services/dataContracts/TrackFactoryClient.ts index 2df80429172..cd564bdecbe 100644 --- a/libs/src/services/dataContracts/trackFactoryClient.js +++ b/libs/src/services/dataContracts/TrackFactoryClient.ts @@ -1,10 +1,12 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { Web3Manager } from '../web3Manager' -class TrackFactoryClient extends ContractClient { +export class TrackFactoryClient extends ContractClient { + override web3Manager!: Web3Manager /* ------- GETTERS ------- */ - async getTrack (trackId) { + async getTrack(trackId: string) { const method = await this.getMethod('getTrack', trackId) return method.call() } @@ -12,7 +14,12 @@ class TrackFactoryClient extends ContractClient { /* ------- SETTERS ------- */ /** uint _userId, bytes32 _multihashDigest, uint8 _multihashHashFn, uint8 _multihashSize */ - async addTrack (userId, multihashDigest, multihashHashFn, multihashSize) { + async addTrack( + userId: number, + multihashDigest: string, + multihashHashFn: number, + multihashSize: number + ) { const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() @@ -27,7 +34,8 @@ class TrackFactoryClient extends ContractClient { ) const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('addTrack', + const method = await this.getMethod( + 'addTrack', userId, multihashDigest, multihashHashFn, @@ -42,13 +50,19 @@ class TrackFactoryClient extends ContractClient { contractAddress ) return { - trackId: parseInt(tx.events.NewTrack.returnValues._id, 10), + trackId: parseInt(tx.events?.['NewTrack']?.returnValues._id, 10), txReceipt: tx } } /** uint _trackId, uint _trackOwnerId, bytes32 _multihashDigest, uint8 _multihashHashFn, uint8 _multihashSize */ - async updateTrack (trackId, trackOwnerId, multihashDigest, multihashHashFn, multihashSize) { + async updateTrack( + trackId: number, + trackOwnerId: number, + multihashDigest: string, + multihashHashFn: number, + multihashSize: number + ) { const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() @@ -64,7 +78,8 @@ class TrackFactoryClient extends ContractClient { ) const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('updateTrack', + const method = await this.getMethod( + 'updateTrack', trackId, trackOwnerId, multihashDigest, @@ -81,16 +96,15 @@ class TrackFactoryClient extends ContractClient { ) return { - trackId: parseInt(tx.events.UpdateTrack.returnValues._trackId, 10), + trackId: parseInt(tx.events?.['UpdateTrack']?.returnValues._trackId, 10), txReceipt: tx } } /** - * @param {uint} trackId - * @return {uint} deleted trackId from on-chain event log + * @return deleted trackId from on-chain event log */ - async deleteTrack (trackId) { + async deleteTrack(trackId: number) { const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() @@ -110,10 +124,8 @@ class TrackFactoryClient extends ContractClient { contractAddress ) return { - trackId: parseInt(tx.events.TrackDeleted.returnValues._trackId, 10), + trackId: parseInt(tx.events?.['TrackDeleted']?.returnValues._trackId, 10), txReceipt: tx } } } - -module.exports = TrackFactoryClient diff --git a/libs/src/services/dataContracts/userFactoryClient.js b/libs/src/services/dataContracts/UserFactoryClient.ts similarity index 63% rename from libs/src/services/dataContracts/userFactoryClient.js rename to libs/src/services/dataContracts/UserFactoryClient.ts index 5794d0e536b..1742fad64c8 100644 --- a/libs/src/services/dataContracts/userFactoryClient.js +++ b/libs/src/services/dataContracts/UserFactoryClient.ts @@ -1,20 +1,24 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') -const { Utils } = require('../../utils') -const sigUtil = require('eth-sig-util') -const BufferSafe = require('safe-buffer').Buffer +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { UserUpdateRequestFn } from '../../../data-contracts/signatureSchemas' +import { Utils } from '../../utils' +import sigUtil from 'eth-sig-util' +import { Buffer as SafeBuffer } from 'safe-buffer' +import type { Web3Manager } from '../web3Manager' -class UserFactoryClient extends ContractClient { +export class UserFactoryClient extends ContractClient { + override web3Manager!: Web3Manager /* ------- GETTERS ------- */ - async getUser (userId) { + async getUser(userId: number) { const method = await this.getMethod('getUser', userId) return method.call() } /** valid = does not exist and meets handle requirements (defined on chain) */ - async handleIsValid (handle) { - const method = await this.getMethod('handleIsValid', + async handleIsValid(handle: string) { + const method = await this.getMethod( + 'handleIsValid', Utils.utf8ToHex(handle) ) return method.call() @@ -22,7 +26,7 @@ class UserFactoryClient extends ContractClient { /* ------- SETTERS ------- */ - async addUser (handle) { + async addUser(handle: string) { Utils.checkStrLen(handle, 16) const nonce = signatureSchemas.getNonce() @@ -36,7 +40,8 @@ class UserFactoryClient extends ContractClient { ) const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('addUser', + const method = await this.getMethod( + 'addUser', this.web3Manager.getWalletAddress(), Utils.utf8ToHex(handle), nonce, @@ -50,17 +55,18 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - userId: parseInt(tx.events.AddUser.returnValues._userId, 10) + userId: parseInt(tx.events?.['AddUser']?.returnValues._userId, 10) } } - async updateMultihash (userId, multihashDigest) { + async updateMultihash(userId: number, multihashDigest: string) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserMultihashRequestData, userId, multihashDigest ) - const method = await this.getMethod('updateMultihash', + const method = await this.getMethod( + 'updateMultihash', userId, multihashDigest, nonce, @@ -75,11 +81,12 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - multihashDigest: tx.events.UpdateMultihash.returnValues._multihashDigest + multihashDigest: + tx.events?.['UpdateMultihash']?.returnValues._multihashDigest } } - async updateName (userId, name) { + async updateName(userId: number, name: string) { Utils.checkStrLen(name, 32) const [nonce, sig] = await this.getUpdateNonceAndSig( @@ -87,7 +94,8 @@ class UserFactoryClient extends ContractClient { userId, name ) - const method = await this.getMethod('updateName', + const method = await this.getMethod( + 'updateName', userId, Utils.utf8ToHex(name), nonce, @@ -102,11 +110,11 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - name: Utils.hexToUtf8(tx.events.UpdateName.returnValues._name) + name: Utils.hexToUtf8(tx.events?.['UpdateName']?.returnValues._name) } } - async updateLocation (userId, location) { + async updateLocation(userId: number, location: string) { const maxLength = 32 Utils.checkStrLen(location, maxLength, /* minLen */ 0) @@ -115,7 +123,8 @@ class UserFactoryClient extends ContractClient { userId, location ) - const method = await this.getMethod('updateLocation', + const method = await this.getMethod( + 'updateLocation', userId, Utils.padRight(Utils.utf8ToHex(location), maxLength * 2), nonce, @@ -130,22 +139,19 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - location: Utils.hexToUtf8(tx.events.UpdateLocation.returnValues._location) + location: Utils.hexToUtf8( + tx.events?.['UpdateLocation']?.returnValues._location + ) } } - async updateBio (userId, bio) { + async updateBio(userId: number, bio: string) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserBioRequestData, userId, bio ) - const method = await this.getMethod('updateBio', - userId, - bio, - nonce, - sig - ) + const method = await this.getMethod('updateBio', userId, bio, nonce, sig) const contractAddress = await this.getAddress() const tx = await this.web3Manager.sendTransaction( @@ -155,17 +161,21 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - bio: tx.events.UpdateBio.returnValues._bio + bio: tx.events?.['UpdateBio']?.returnValues._bio } } - async updateProfilePhoto (userId, profilePhotoMultihashDigest) { + async updateProfilePhoto( + userId: number, + profilePhotoMultihashDigest: string + ) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserProfilePhotoRequestData, userId, profilePhotoMultihashDigest ) - const method = await this.getMethod('updateProfilePhoto', + const method = await this.getMethod( + 'updateProfilePhoto', userId, profilePhotoMultihashDigest, nonce, @@ -180,17 +190,19 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - profilePhotoMultihashDigest: tx.events.UpdateProfilePhoto.returnValues._profilePhotoDigest + profilePhotoMultihashDigest: + tx.events?.['UpdateProfilePhoto']?.returnValues._profilePhotoDigest } } - async updateCoverPhoto (userId, coverPhotoMultihashDigest) { + async updateCoverPhoto(userId: number, coverPhotoMultihashDigest: string) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserCoverPhotoRequestData, userId, coverPhotoMultihashDigest ) - const method = await this.getMethod('updateCoverPhoto', + const method = await this.getMethod( + 'updateCoverPhoto', userId, coverPhotoMultihashDigest, nonce, @@ -205,17 +217,19 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - coverPhotoMultihashDigest: tx.events.UpdateCoverPhoto.returnValues._coverPhotoDigest + coverPhotoMultihashDigest: + tx.events?.['UpdateCoverPhoto']?.returnValues._coverPhotoDigest } } - async updateIsCreator (userId, isCreator) { + async updateIsCreator(userId: number, isCreator: boolean) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserCreatorRequestData, userId, isCreator ) - const method = await this.getMethod('updateIsCreator', + const method = await this.getMethod( + 'updateIsCreator', userId, isCreator, nonce, @@ -230,7 +244,7 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - isCreator: tx.events.UpdateIsCreator.returnValues._isCreator + isCreator: tx.events?.['UpdateIsCreator']?.returnValues._isCreator } } @@ -239,11 +253,15 @@ class UserFactoryClient extends ContractClient { * the return properties are different. The web3 sendTransaction() function isn't called, rather * the encodedABI and contract address are returned, and the identity service can relay it * to the chain on behalf of the user - * @param {number} userId blockchain userId - * @param {Boolean} isVerified - * @param {string} privateKey 64 character hex string + * @param userId blockchain userId + * @param isVerified + * @param privateKey 64 character hex string */ - async updateIsVerified (userId, isVerified, privateKey) { + async updateIsVerified( + userId: number, + isVerified: boolean, + privateKey: string + ) { const contractAddress = await this.getAddress() const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserVerifiedRequestData, @@ -251,7 +269,8 @@ class UserFactoryClient extends ContractClient { isVerified, privateKey ) - const method = await this.getMethod('updateIsVerified', + const method = await this.getMethod( + 'updateIsVerified', userId, isVerified, nonce, @@ -261,13 +280,14 @@ class UserFactoryClient extends ContractClient { return [method.encodeABI(), contractAddress] } - async updateCreatorNodeEndpoint (userId, creatorNodeEndpoint) { + async updateCreatorNodeEndpoint(userId: number, creatorNodeEndpoint: string) { const [nonce, sig] = await this.getUpdateNonceAndSig( signatureSchemas.generators.getUpdateUserCreatorNodeRequestData, userId, creatorNodeEndpoint ) - const method = await this.getMethod('updateCreatorNodeEndpoint', + const method = await this.getMethod( + 'updateCreatorNodeEndpoint', userId, creatorNodeEndpoint, nonce, @@ -282,7 +302,9 @@ class UserFactoryClient extends ContractClient { ) return { txReceipt: tx, - creatorNodeEndpoint: tx.events.UpdateCreatorNodeEndpoint.returnValues._creatorNodeEndpoint + creatorNodeEndpoint: + tx.events?.['UpdateCreatorNodeEndpoint']?.returnValues + ._creatorNodeEndpoint } } @@ -292,24 +314,38 @@ class UserFactoryClient extends ContractClient { * Gets a nonce and generates a signature for the given function. Private key is optional and * will use that private key to create the signature. Otherwise the web3Manager private key * will be used. - * @param {Object} generatorFn signature scheme object function - * @param {number} userId blockchain userId - * @param {Varies} newValue new value to set - * @param {string} privateKey 64 character hex string + * @param generatorFn signature scheme object function + * @param userId blockchain userId + * @param newValue new value to set + * @param privateKey 64 character hex string */ - async getUpdateNonceAndSig (generatorFn, userId, newValue, privateKey) { + async getUpdateNonceAndSig( + generatorFn: UserUpdateRequestFn, + userId: number, + newValue: unknown, + privateKey?: string + ) { const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() - const signatureData = generatorFn(chainId, contractAddress, userId, newValue, nonce) + const signatureData = generatorFn( + chainId, + contractAddress, + userId, + newValue, + nonce + ) let sig if (privateKey) { - sig = sigUtil.signTypedData(BufferSafe.from(privateKey, 'hex'), { data: signatureData }) + sig = sigUtil.signTypedData( + SafeBuffer.from(privateKey, 'hex') as unknown as Buffer, + { + data: signatureData + } + ) } else { sig = await this.web3Manager.signTypedData(signatureData) } return [nonce, sig] } } - -module.exports = UserFactoryClient diff --git a/libs/src/services/dataContracts/UserLibraryFactoryClient.ts b/libs/src/services/dataContracts/UserLibraryFactoryClient.ts new file mode 100644 index 00000000000..38ccb98cc38 --- /dev/null +++ b/libs/src/services/dataContracts/UserLibraryFactoryClient.ts @@ -0,0 +1,115 @@ +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { Web3Manager } from '../web3Manager' + +export class UserLibraryFactoryClient extends ContractClient { + override web3Manager!: Web3Manager + /* ------- SETTERS ------- */ + + async addTrackSave(userId: number, trackId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = signatureSchemas.generators.getTrackSaveRequestData( + chainId, + contractAddress, + userId, + trackId, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const contractMethod = await this.getMethod( + 'addTrackSave', + userId, + trackId, + nonce, + sig + ) + return await this.web3Manager.sendTransaction( + contractMethod, + this.contractRegistryKey, + contractAddress + ) + } + + async deleteTrackSave(userId: number, trackId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getDeleteTrackSaveRequestData( + chainId, + contractAddress, + userId, + trackId, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const contractMethod = await this.getMethod( + 'deleteTrackSave', + userId, + trackId, + nonce, + sig + ) + return await this.web3Manager.sendTransaction( + contractMethod, + this.contractRegistryKey, + contractAddress + ) + } + + async addPlaylistSave(userId: number, playlistId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getPlaylistSaveRequestData( + chainId, + contractAddress, + userId, + playlistId, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const contractMethod = await this.getMethod( + 'addPlaylistSave', + userId, + playlistId, + nonce, + sig + ) + return await this.web3Manager.sendTransaction( + contractMethod, + this.contractRegistryKey, + contractAddress + ) + } + + async deletePlaylistSave(userId: number, playlistId: number) { + const nonce = signatureSchemas.getNonce() + const chainId = await this.getEthNetId() + const contractAddress = await this.getAddress() + const signatureData = + signatureSchemas.generators.getDeletePlaylistSaveRequestData( + chainId, + contractAddress, + userId, + playlistId, + nonce + ) + const sig = await this.web3Manager.signTypedData(signatureData) + const contractMethod = await this.getMethod( + 'deletePlaylistSave', + userId, + playlistId, + nonce, + sig + ) + return await this.web3Manager.sendTransaction( + contractMethod, + this.contractRegistryKey, + contractAddress + ) + } +} diff --git a/libs/src/services/dataContracts/userReplicaSetManagerClient.js b/libs/src/services/dataContracts/UserReplicaSetManagerClient.ts similarity index 53% rename from libs/src/services/dataContracts/userReplicaSetManagerClient.js rename to libs/src/services/dataContracts/UserReplicaSetManagerClient.ts index ec618707c47..d9645e2c5c0 100644 --- a/libs/src/services/dataContracts/userReplicaSetManagerClient.js +++ b/libs/src/services/dataContracts/UserReplicaSetManagerClient.ts @@ -1,17 +1,23 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') +import { ContractClient } from '../contracts/ContractClient' +import * as signatureSchemas from '../../../data-contracts/signatureSchemas' +import type { Web3Manager } from '../web3Manager' -class UserReplicaSetManagerClient extends ContractClient { +export class UserReplicaSetManagerClient extends ContractClient { + override web3Manager!: Web3Manager /** * Update a user's replica set on the UserReplicaSetManager contract * Callable by user wallet, or any node within the user's replica set - * @param {number} userId - * @param {number} primary - * @param {Array} secondaries + * @param userId + * @param primary + * @param secondaries */ - async updateReplicaSet (userId, primary, secondaries) { + async updateReplicaSet( + userId: number, + primary: number, + secondaries: number[] + ) { const existingReplicaSetInfo = await this.getUserReplicaSet(userId) - return this._updateReplicaSet( + return await this._updateReplicaSet( userId, primary, secondaries, @@ -23,22 +29,22 @@ class UserReplicaSetManagerClient extends ContractClient { /** * Add a new content node to the L2 layer of the protocol * Requires signatures from 3 existing nodes on the UserReplicaSetManager contract - * @param {number} cnodeId - * @param {Array} cnodeOwnerWallets - [0] = incoming delegateOwnerWallet, [1] = incoming ownerWallet - * @param {Array} proposerSpIds - * @param {Array} proposerNonces - * @param {string} proposer1Sig - * @param {string} proposer2Sig - * @param {string} proposer3Sig + * @param cnodeId + * @param cnodeOwnerWallets - [0] = incoming delegateOwnerWallet, [1] = incoming ownerWallet + * @param proposerSpIds + * @param proposerNonces + * @param proposer1Sig + * @param proposer2Sig + * @param proposer3Sig */ - async addOrUpdateContentNode ( - cnodeId, - cnodeOwnerWallets, - proposerSpIds, - proposerNonces, - proposer1Sig, - proposer2Sig, - proposer3Sig + async addOrUpdateContentNode( + cnodeId: number, + cnodeOwnerWallets: string[], + proposerSpIds: number[], + proposerNonces: string[], + proposer1Sig: string, + proposer2Sig: string, + proposer3Sig: string ) { const contractAddress = await this.getAddress() const method = await this.getMethod( @@ -63,28 +69,26 @@ class UserReplicaSetManagerClient extends ContractClient { * Generate the relevant data required to propose a new content node * Each incoming node requires 3 distinct signatures in order to be added * This function will be used by content nodes - * @param {number} cnodeId - * @param {string} cnodeDelegateWallet - * @param {number} proposerSpId */ - async getProposeAddOrUpdateContentNodeRequestData ( - cnodeId, - cnodeDelegateWallet, - cnodeOwnerWallet, - proposerSpId + async getProposeAddOrUpdateContentNodeRequestData( + cnodeId: number, + cnodeDelegateWallet: string, + cnodeOwnerWallet: string, + proposerSpId: number ) { const chainId = await this.getEthNetId() const contractAddress = await this.getAddress() const nonce = signatureSchemas.getNonce() - const signatureData = signatureSchemas.generators.getProposeAddOrUpdateContentNodeRequestData( - chainId, - contractAddress, - cnodeId, - cnodeDelegateWallet, - cnodeOwnerWallet, - proposerSpId, - nonce - ) + const signatureData = + signatureSchemas.generators.getProposeAddOrUpdateContentNodeRequestData( + chainId, + contractAddress, + cnodeId, + cnodeDelegateWallet, + cnodeOwnerWallet, + proposerSpId, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) return { nonce, @@ -95,39 +99,40 @@ class UserReplicaSetManagerClient extends ContractClient { /** * Returns replica set for requested user at requested blocknumber - * @param {number} userId - * @returns {Object} replica set info with schema { primaryId: int, secondaryIds: number[] } + * @param userId + * @returns replica set info with schema { primaryId: int, secondaryIds: number[] } */ - async getUserReplicaSet (userId) { + async getUserReplicaSet(userId: number) { const method = await this.getMethod('getUserReplicaSet', userId) const currentWallet = this.web3Manager.getWalletAddress() - const resp = await method.call({ from: currentWallet }) + const resp: { primaryId: string; secondaryIds: string[] } = + await method.call({ from: currentWallet }) return { primaryId: parseInt(resp.primaryId), - secondaryIds: resp.secondaryIds.map(x => parseInt(x)) + secondaryIds: resp.secondaryIds.map((x) => parseInt(x)) } } /** * Returns replica set for requested user at requested blocknumber * @notice will error if web3 cannot find data for requested blocknumber - * @returns {Object} replica set info with schema { primaryId: int, secondaryIds: int[] } + * @returns replica set info with schema { primaryId: int, secondaryIds: int[] } */ - async getUserReplicaSetAtBlockNumber (userId, blockNumber) { + async getUserReplicaSetAtBlockNumber(userId: number, blockNumber: number) { const method = await this.getMethod('getUserReplicaSet', userId) const currentWallet = this.web3Manager.getWalletAddress() - const resp = await method.call({ from: currentWallet }, blockNumber) + const resp: { primaryId: string; secondaryIds: string[] } = + await method.call({ from: currentWallet }, blockNumber) return { primaryId: parseInt(resp.primaryId), - secondaryIds: resp.secondaryIds.map(x => parseInt(x)) + secondaryIds: resp.secondaryIds.map((x) => parseInt(x)) } } /** * Return the current ownerWallet and delegateOwnerWallet for a given spID - * @param {number} userId */ - async getContentNodeWallets (spId) { + async getContentNodeWallets(spId: number) { const method = await this.getMethod('getContentNodeWallets', spId) const currentWallet = this.web3Manager.getWalletAddress() return method.call({ from: currentWallet }) @@ -137,7 +142,7 @@ class UserReplicaSetManagerClient extends ContractClient { * Return boolean indicating status of URSM seed operation * Prior to seed, no replica sets can be written */ - async getSeedComplete () { + async getSeedComplete() { const method = await this.getMethod('getSeedComplete') const currentWallet = this.web3Manager.getWalletAddress() return method.call({ from: currentWallet }) @@ -146,31 +151,43 @@ class UserReplicaSetManagerClient extends ContractClient { /** * Submit update transaction to UserReplicaSetManager to modify a user's replica set * Can be sent by user's wallet, or any content node in the replica set - * @param {number} userId - * @param {number} primary - * @param {Array} secondaries - * @param {number} oldPrimary - * @param {Array} oldSecondaries + * @param userId + * @param primary + * @param secondaries + * @param oldPrimary + * @param oldSecondaries */ - async _updateReplicaSet (userId, primary, secondaries, oldPrimary, oldSecondaries) { + async _updateReplicaSet( + userId: number, + primary: number, + secondaries: number[], + oldPrimary: number, + oldSecondaries: number[] + ) { const contractAddress = await this.getAddress() const nonce = signatureSchemas.getNonce() const chainId = await this.getEthNetId() const web3 = this.web3Manager.getWeb3() - const secondariesHash = web3.utils.soliditySha3(web3.eth.abi.encodeParameter('uint[]', secondaries)) - const oldSecondariesHash = web3.utils.soliditySha3(web3.eth.abi.encodeParameter('uint[]', oldSecondaries)) - const signatureData = signatureSchemas.generators.getUpdateReplicaSetRequestData( - chainId, - contractAddress, - userId, - primary, - secondariesHash, - oldPrimary, - oldSecondariesHash, - nonce + const secondariesHash = web3.utils.soliditySha3( + web3.eth.abi.encodeParameter('uint[]', secondaries) + ) + const oldSecondariesHash = web3.utils.soliditySha3( + web3.eth.abi.encodeParameter('uint[]', oldSecondaries) ) + const signatureData = + signatureSchemas.generators.getUpdateReplicaSetRequestData( + chainId, + contractAddress, + userId, + primary, + secondariesHash, + oldPrimary, + oldSecondariesHash, + nonce + ) const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('updateReplicaSet', + const method = await this.getMethod( + 'updateReplicaSet', userId, primary, secondaries, @@ -187,5 +204,3 @@ class UserReplicaSetManagerClient extends ContractClient { return tx } } - -module.exports = UserReplicaSetManagerClient diff --git a/libs/src/services/dataContracts/index.ts b/libs/src/services/dataContracts/index.ts new file mode 100644 index 00000000000..1ebb1c7af0a --- /dev/null +++ b/libs/src/services/dataContracts/index.ts @@ -0,0 +1 @@ +export * from './AudiusContracts' diff --git a/libs/src/services/dataContracts/playlistFactoryClient.js b/libs/src/services/dataContracts/playlistFactoryClient.js deleted file mode 100644 index c4d837af8ac..00000000000 --- a/libs/src/services/dataContracts/playlistFactoryClient.js +++ /dev/null @@ -1,304 +0,0 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') - -const MAX_PLAYLIST_LENGTH = 199 - -class PlaylistFactoryClient extends ContractClient { - /* ------- SETTERS ------- */ - - async createPlaylist (userId, playlistName, isPrivate, isAlbum, trackIds) { - if (!Array.isArray(trackIds) || trackIds.length > MAX_PLAYLIST_LENGTH) { - throw new Error(`Cannot create playlist - trackIds must be array with length <= ${MAX_PLAYLIST_LENGTH}`) - } - - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const trackIdsHash = this.web3Manager.getWeb3().utils.soliditySha3( - this.web3Manager.getWeb3().eth.abi.encodeParameter('uint[]', trackIds) - ) - const signatureData = signatureSchemas.generators.getCreatePlaylistRequestData( - chainId, - contractAddress, - userId, - playlistName, - isPrivate, - isAlbum, - trackIdsHash, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('createPlaylist', - userId, - playlistName, - isPrivate, - isAlbum, - trackIds, - nonce, - sig - ) - - const tx = await this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - return { - playlistId: parseInt(tx.events.PlaylistCreated.returnValues._playlistId, 10), - txReceipt: tx - } - } - - async deletePlaylist (playlistId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeletePlaylistRequestData( - chainId, - contractAddress, - playlistId, - nonce - ) - - const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('deletePlaylist', playlistId, nonce, sig) - - const tx = await this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - return { - playlistId: parseInt(tx.events.PlaylistDeleted.returnValues._playlistId, 10), - txReceipt: tx - } - } - - async addPlaylistTrack (playlistId, addedTrackId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getAddPlaylistTrackRequestData( - chainId, - contractAddress, - playlistId, - addedTrackId, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('addPlaylistTrack', - playlistId, - addedTrackId, - nonce, - sig) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async deletePlaylistTrack (playlistId, deletedTrackId, deletedPlaylistTimestamp, retries) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeletePlaylistTrackRequestData( - chainId, - contractAddress, - playlistId, - deletedTrackId, - deletedPlaylistTimestamp, - nonce - ) - - const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('deletePlaylistTrack', - playlistId, - deletedTrackId, - deletedPlaylistTimestamp, - nonce, - sig) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress, - retries - ) - } - - async orderPlaylistTracks (playlistId, trackIds, retries) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const trackIdsHash = this.web3Manager.getWeb3().utils.soliditySha3(this.web3Manager.getWeb3().eth.abi.encodeParameter('uint[]', trackIds)) - const signatureData = signatureSchemas.generators.getOrderPlaylistTracksRequestData( - chainId, - contractAddress, - playlistId, - trackIdsHash, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('orderPlaylistTracks', - playlistId, - trackIds, - nonce, - sig) - - return this.web3Manager.sendTransaction( - method, // contractMethod - this.contractRegistryKey, - contractAddress, - retries - ) - } - - async updatePlaylistPrivacy (playlistId, updatedPlaylistPrivacy) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getUpdatePlaylistPrivacyRequestData( - chainId, - contractAddress, - playlistId, - updatedPlaylistPrivacy, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('updatePlaylistPrivacy', - playlistId, - updatedPlaylistPrivacy, - nonce, - sig - ) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async updatePlaylistName (playlistId, updatedPlaylistName) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getUpdatePlaylistNameRequestData( - chainId, - contractAddress, - playlistId, - updatedPlaylistName, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('updatePlaylistName', - playlistId, - updatedPlaylistName, - nonce, - sig - ) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async updatePlaylistCoverPhoto (playlistId, updatedPlaylistImageMultihashDigest) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getUpdatePlaylistCoverPhotoRequestData( - chainId, - contractAddress, - playlistId, - updatedPlaylistImageMultihashDigest, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - - const method = await this.getMethod('updatePlaylistCoverPhoto', - playlistId, - updatedPlaylistImageMultihashDigest, - nonce, - sig - ) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async updatePlaylistDescription (playlistId, updatedPlaylistDescription) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getUpdatePlaylistDescriptionRequestData( - chainId, - contractAddress, - playlistId, - updatedPlaylistDescription, - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('updatePlaylistDescription', - playlistId, - updatedPlaylistDescription, - nonce, - sig - ) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async updatePlaylistUPC (playlistId, updatedPlaylistUPC) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getUpdatePlaylistUPCRequestData( - chainId, - contractAddress, - playlistId, - this.web3Manager.getWeb3().utils.utf8ToHex(updatedPlaylistUPC), - nonce - ) - const sig = await this.web3Manager.signTypedData(signatureData) - const method = await this.getMethod('updatePlaylistUPC', - playlistId, - this.web3Manager.getWeb3().utils.utf8ToHex(updatedPlaylistUPC), - nonce, - sig - ) - - return this.web3Manager.sendTransaction( - method, - this.contractRegistryKey, - contractAddress - ) - } - - async isTrackInPlaylist (playlistId, trackId) { - const method = await this.getMethod('isTrackInPlaylist', - playlistId, - trackId - ) - const result = await method.call() - return result - } -} - -module.exports = PlaylistFactoryClient diff --git a/libs/src/services/dataContracts/userLibraryFactoryClient.js b/libs/src/services/dataContracts/userLibraryFactoryClient.js deleted file mode 100644 index ff0287fc990..00000000000 --- a/libs/src/services/dataContracts/userLibraryFactoryClient.js +++ /dev/null @@ -1,100 +0,0 @@ -const { ContractClient } = require('../contracts/ContractClient') -const signatureSchemas = require('../../../data-contracts/signatureSchemas') - -class UserLibraryFactoryClient extends ContractClient { - /* ------- SETTERS ------- */ - - async addTrackSave (userId, trackId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getTrackSaveRequestData( - chainId, - contractAddress, - userId, - trackId, - nonce) - const sig = await this.web3Manager.signTypedData(signatureData) - const contractMethod = await this.getMethod('addTrackSave', - userId, - trackId, - nonce, - sig) - return this.web3Manager.sendTransaction( - contractMethod, - this.contractRegistryKey, - contractAddress - ) - } - - async deleteTrackSave (userId, trackId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeleteTrackSaveRequestData( - chainId, - contractAddress, - userId, - trackId, - nonce) - const sig = await this.web3Manager.signTypedData(signatureData) - const contractMethod = await this.getMethod('deleteTrackSave', - userId, - trackId, - nonce, - sig) - return this.web3Manager.sendTransaction( - contractMethod, - this.contractRegistryKey, - contractAddress - ) - } - - async addPlaylistSave (userId, playlistId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getPlaylistSaveRequestData( - chainId, - contractAddress, - userId, - playlistId, - nonce) - const sig = await this.web3Manager.signTypedData(signatureData) - const contractMethod = await this.getMethod('addPlaylistSave', - userId, - playlistId, - nonce, - sig) - return this.web3Manager.sendTransaction( - contractMethod, - this.contractRegistryKey, - contractAddress - ) - } - - async deletePlaylistSave (userId, playlistId) { - const nonce = signatureSchemas.getNonce() - const chainId = await this.getEthNetId() - const contractAddress = await this.getAddress() - const signatureData = signatureSchemas.generators.getDeletePlaylistSaveRequestData( - chainId, - contractAddress, - userId, - playlistId, - nonce) - const sig = await this.web3Manager.signTypedData(signatureData) - const contractMethod = await this.getMethod('deletePlaylistSave', - userId, - playlistId, - nonce, - sig) - return this.web3Manager.sendTransaction( - contractMethod, - this.contractRegistryKey, - contractAddress - ) - } -} - -module.exports = UserLibraryFactoryClient diff --git a/libs/src/services/solanaWeb3Manager/transactionHandler.js b/libs/src/services/solanaWeb3Manager/transactionHandler.js index 1bf5ac8ddca..c822d01bfaf 100644 --- a/libs/src/services/solanaWeb3Manager/transactionHandler.js +++ b/libs/src/services/solanaWeb3Manager/transactionHandler.js @@ -216,7 +216,7 @@ class TransactionHandler { errorCode: null } } catch (e) { - logger.warn(`transactionHandler: error in awaitTransactionSignature: ${e}, ${txid}`) + logger.warn(`transactionHandler: error in awaitTransactionSignature: ${JSON.stringify(e)}, ${txid}`) done = true const { message: error } = e const errorCode = this._parseSolanaErrorCode(error) @@ -252,8 +252,9 @@ class TransactionHandler { if (done) return done = true if (result.err) { - logger.warn(`transactionHandler: Error in onSignature ${txid}, ${result.err}`) - reject(result.err) + const err = JSON.stringify(result.err) + logger.warn(`transactionHandler: Error in onSignature ${txid}, ${err}`) + reject(new Error(err)) } else { resolve(txid) } @@ -278,11 +279,12 @@ class TransactionHandler { // End loop if error if (result.err) { + const err = JSON.stringify(result.err) logger.error( - `transactionHandler: polling saw result error: ${result.err}, tx: ${txid}` + `transactionHandler: polling saw result error: ${err}, tx: ${txid}` ) done = true - reject(result.err) + reject(new Error(err)) return } @@ -327,7 +329,7 @@ class TransactionHandler { // Match on custom solana program errors const matcher = /(?:custom program error: 0x)(.*)$/ const res = errorMessage.match(matcher) - if (res && res.length !== 2) return parseInt(res[1], 16) || null + if (res && res.length === 2) return parseInt(res[1], 16) || null // Match on custom anchor errors const matcher2 = /(?:"Custom":)(\d+)/ const res2 = errorMessage.match(matcher2) diff --git a/logspout/.gitignore b/logspout/.gitignore new file mode 100644 index 00000000000..30bd623c235 --- /dev/null +++ b/logspout/.gitignore @@ -0,0 +1,2 @@ +.env + diff --git a/logspout/Dockerfile b/logspout/Dockerfile new file mode 100644 index 00000000000..a1907838b6e --- /dev/null +++ b/logspout/Dockerfile @@ -0,0 +1,16 @@ +FROM gliderlabs/logspout:v3.2.14 + +# ignores previously log on startup +# could be lossy if the container restarts +# could send double logs upon container restarts if we remove this +ENV BACKLOG false + +ARG git_sha +ENV GIT_SHA=${git_sha} + +ARG audius_loggly_token +ENV audius_loggly_token ${audius_loggly_token} + +ENTRYPOINT [] +COPY start.sh /start.sh +CMD /start.sh diff --git a/logspout/README.md b/logspout/README.md new file mode 100644 index 00000000000..75497143853 --- /dev/null +++ b/logspout/README.md @@ -0,0 +1,19 @@ +# Build Custom Audius Logspout Sidecar Container + +In the event we want to build the Logspout container by hand +(for testing purposes, CircleCI is down, etc) run the following: + +```bash +# .env contains: audius_loggly_token=xxx +. .env + +LOGSPOUT_VERSION=$(head -n1 Dockerfile | cut -f 2 -d ':') +[ ${audius_loggly_token} ] \ + && audius_loggly_token_64=$(echo ${audius_loggly_token} | base64) \ + && docker build \ + -t audius/logspout:${LOGSPOUT_VERSION} \ + --build-arg git_sha=$(git rev-parse HEAD) \ + --build-arg audius_loggly_token=${audius_loggly_token_64} \ + . \ + && docker push audius/logspout:${LOGSPOUT_VERSION} +``` diff --git a/logspout/build.sh b/logspout/build.sh new file mode 100644 index 00000000000..f3e0521b111 --- /dev/null +++ b/logspout/build.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# https://github.com/gliderlabs/logspout/blob/818dd8260e52d2c148280d86170bdf5267b5c637/build.sh +# due to: https://github.com/gliderlabs/logspout/blob/818dd8260e52d2c148280d86170bdf5267b5c637/Dockerfile#L9-L11 + +set -e +apk add --update go build-base git mercurial ca-certificates +cd /src +go build -ldflags "-X main.Version=$1" -o /bin/logspout +apk del go git mercurial build-base +rm -rf /root/go /var/cache/apk/* + +# backwards compatibility +ln -fs /tmp/docker.sock /var/run/docker.sock + diff --git a/logspout/modules.go b/logspout/modules.go new file mode 100644 index 00000000000..7729d03bf43 --- /dev/null +++ b/logspout/modules.go @@ -0,0 +1,15 @@ +// https://github.com/gliderlabs/logspout/blob/818dd8260e52d2c148280d86170bdf5267b5c637/modules.go + +package main + +import ( + _ "github.com/gliderlabs/logspout/adapters/multiline" + _ "github.com/gliderlabs/logspout/adapters/raw" + _ "github.com/gliderlabs/logspout/adapters/syslog" + _ "github.com/gliderlabs/logspout/healthcheck" + _ "github.com/gliderlabs/logspout/httpstream" + _ "github.com/gliderlabs/logspout/routesapi" + _ "github.com/gliderlabs/logspout/transports/tcp" + _ "github.com/gliderlabs/logspout/transports/tls" + _ "github.com/gliderlabs/logspout/transports/udp" +) diff --git a/logspout/start.sh b/logspout/start.sh new file mode 100755 index 00000000000..e5693b503ed --- /dev/null +++ b/logspout/start.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env sh + +# start with `logspout` Loggly tag, and add ${audius_loggly_tags} or ${logglyTags} if present +tag_csv=logspout +if [[ "${audius_loggly_tags}" ]]; then + tag_csv=${tag_csv},${audius_loggly_tags} +elif [[ "${logglyTags}" ]]; then + tag_csv=${tag_csv},${logglyTags} +fi + +# set hostname to ${audius_discprov_url}, else ${creatorNodeEndpoint} +if [[ "${audius_discprov_url}" ]]; then + hostname=${audius_discprov_url} +elif [[ "${creatorNodeEndpoint}" ]]; then + hostname=${creatorNodeEndpoint} +fi + +# use regex to extract domain in url (source: https://stackoverflow.com/a/2506635/8674706) +# add extracted domain as a Loggly tag +if [[ "${hostname}" ]]; then + hostname=$(echo ${hostname} | sed -e 's/[^/]*\/\/\([^@]*@\)\?\([^:/]*\).*/\2/') + tag_csv=${tag_csv},${hostname} +fi + +# reformat our comma-delimited list +IFS="," +for tag in ${tag_csv} +do + tags="${tags} tag=\"${tag}\"" +done + +# set and echo our Loggly token and tags for Logspout +export SYSLOG_STRUCTURED_DATA="$(echo ${audius_loggly_token} | base64 -d)@41058 ${tags}" +echo SYSLOG_STRUCTURED_DATA=${SYSLOG_STRUCTURED_DATA} + +# start logspout and point it to Loggly +/bin/logspout multiline+syslog+tcp://logs-01.loggly.com:514