diff --git a/Taskfile.helper.yml b/Taskfile.helper.yml index b358f6f84..8ad6dee20 100644 --- a/Taskfile.helper.yml +++ b/Taskfile.helper.yml @@ -13,14 +13,16 @@ vars: solo_logs_dir: "{{ .solo_user_dir }}/logs" solo_keys_dir: "{{ .solo_cache_dir }}/keys" solo_bin_dir: "{{ .solo_user_dir }}/bin" + temp_prefix: + sh: (echo "/tmp/solo-${USER}-$(date +%Y%m%d%H%M%S)") run_build_file: - sh: (echo "/tmp/${USER}-run-build-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/solo-${USER}-run-build-$(date +%Y%m%d%H%M%S)") var_check_file: - sh: (echo "/tmp/${USER}-var-check-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/solo-${USER}-var-check-$(date +%Y%m%d%H%M%S)") minio_flag_file: - sh: (echo "/tmp/${USER}-minio-flag-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/solo-${USER}-minio-flag-$(date +%Y%m%d%H%M%S)") solo_install_file: - sh: (echo "/tmp/${USER}-solo-install-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/solo-${USER}-solo-install-$(date +%Y%m%d%H%M%S)") env: SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup @@ -75,6 +77,11 @@ tasks: - echo "LOCAL_BUILD_FLAG=${LOCAL_BUILD_FLAG}" - echo "DEBUG_NODE_ALIAS=${DEBUG_NODE_ALIAS}" - echo "SOLO_CHARTS_DIR_FLAG=${SOLO_CHARTS_DIR_FLAG}" + - echo "LOAD_BALANCER_FLAG=${LOAD_BALANCER_FLAG}" + - echo "ENABLE_EXPLORER_TLS_FLAG=${ENABLE_EXPLORER_TLS_FLAG}" + - echo "CLUSTER_TLS_FLAGS=${CLUSTER_TLS_FLAGS}" + - echo "NETWORK_DEPLOY_EXTRA_FLAGS=${NETWORK_DEPLOY_EXTRA_FLAGS}" + - echo "MIRROR_NODE_DEPLOY_EXTRA_FLAGS=${MIRROR_NODE_DEPLOY_EXTRA_FLAGS}" - touch {{ .var_check_file }} readme: @@ -98,7 +105,7 @@ tasks: - test -f {{ .solo_install_file }} cmds: - | - if [[ "$(ls -1 package.json)" == "" ]]; then + if [[ "$(ls -1 package.json > /dev/null 2>&1)" == "" ]]; then cd .. fi pwd @@ -106,6 +113,7 @@ tasks: - touch {{ .solo_install_file }} install:kubectl:darwin: + silent: true internal: true platforms: - darwin @@ -116,6 +124,7 @@ tasks: - brew install kubernetes-cli install:kubectl:linux: + silent: true internal: true platforms: - linux @@ -142,9 +151,10 @@ tasks: #- test "$(yq -r '.flags."node-ids"' < {{ .solo_user_dir }}/solo.yaml)" == "{{ .node_identifiers }}" - test "$(jq -r '.flags."node-ids"' < {{ .solo_user_dir }}/solo.config)" == "{{ .node_identifiers }}" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- init + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- init --dev solo:keys: + silent: true internal: true status: - | @@ -157,9 +167,10 @@ tasks: deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node keys --gossip-keys --tls-keys --node-aliases-unparsed {{.node_identifiers}} -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node keys --gossip-keys --tls-keys --node-aliases-unparsed {{.node_identifiers}} -q --dev solo:network:deploy: + silent: true internal: true deps: - task: "init" @@ -174,21 +185,31 @@ tasks: if [[ "${SOLO_CHART_VERSION}" != "" ]]; then export SOLO_CHART_FLAG='--solo-chart-version ${SOLO_CHART_VERSION}' fi - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${SOLO_CHART_FLAG} ${VALUES_FLAG} ${SETTINGS_FLAG} ${LOG4J2_FLAG} ${APPLICATION_PROPERTIES_FLAG} ${GENESIS_THROTTLES_FLAG} ${DEBUG_NODE_FLAG} ${SOLO_CHARTS_DIR_FLAG} -q + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${SOLO_CHART_FLAG} ${VALUES_FLAG} ${SETTINGS_FLAG} ${LOG4J2_FLAG} ${APPLICATION_PROPERTIES_FLAG} ${GENESIS_THROTTLES_FLAG} ${DEBUG_NODE_FLAG} ${SOLO_CHARTS_DIR_FLAG} ${LOAD_BALANCER_FLAG} ${NETWORK_DEPLOY_EXTRA_FLAGS} -q --dev + - task: "solo:node:setup" + + solo:node:setup: + silent: true + internal: true + deps: + - task: "init" + cmds: - | if [[ "${CONSENSUS_NODE_VERSION}" != "" ]]; then export CONSENSUS_NODE_FLAG='--release-tag ${CONSENSUS_NODE_VERSION}' fi - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${LOCAL_BUILD_FLAG} -q + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${LOCAL_BUILD_FLAG} -q --dev solo:network:destroy: + silent: true internal: true deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network destroy --namespace "${SOLO_NAMESPACE}" --delete-pvcs --delete-secrets --force -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network destroy --namespace "${SOLO_NAMESPACE}" --delete-pvcs --delete-secrets --force -q --dev solo:node:start: + silent: true internal: true deps: - task: "init" @@ -197,42 +218,46 @@ tasks: if [[ "${DEBUG_NODE_ALIAS}" != "" ]]; then export DEBUG_NODE_FLAG="--debug-node-alias {{ .DEBUG_NODE_ALIAS }}" fi - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${DEBUG_NODE_FLAG} -q {{ .CLI_ARGS }} + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${DEBUG_NODE_FLAG} -q {{ .CLI_ARGS }} --dev - | if [[ "{{ .use_port_forwards }}" == "true" ]];then - echo "Enable port forwarding for Hedera Node" - kubectl port-forward -n "${SOLO_NAMESPACE}" svc/haproxy-node1-svc 50211:50211 & + echo "Enable port forwarding for Hedera Network Node" + echo "Port forwarding for Hedera Network Node: grpc:50211" + nohup kubectl port-forward -n "${SOLO_NAMESPACE}" pod/network-node1-0 50211:50211 < /dev/null > {{.temp_prefix}}-50211-nohup.out 2>&1 & sleep 4 fi solo:node:stop: + silent: true internal: true ignore_error: true deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node stop --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q {{ .CLI_ARGS }} + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node stop --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q {{ .CLI_ARGS }} --dev solo:relay: + silent: true deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay deploy -n "${SOLO_NAMESPACE}" -i node1 -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay deploy -n "${SOLO_NAMESPACE}" -i node1 ${RELAY_NODE_DEPLOY_EXTRA_FLAGS} -q --dev - | if [[ "{{ .use_port_forwards }}" == "true" ]];then echo "Enable port forwarding for Hedera JSON RPC Relay" - kubectl port-forward -n "${SOLO_NAMESPACE}" svc/relay-node1-hedera-json-rpc-relay 7546:7546 & + nohup kubectl port-forward -n "${SOLO_NAMESPACE}" svc/relay-node1-hedera-json-rpc-relay 7546:7546 & sleep 4 fi solo:destroy-relay: + silent: true status: - | {{.solo_bin_dir}}/helm list -n "${SOLO_NAMESPACE}" | grep -vqz relay-node1 deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay destroy -n "${SOLO_NAMESPACE}" -i node1 -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- relay destroy -n "${SOLO_NAMESPACE}" -i node1 -q --dev solo:cache:remove: silent: true @@ -261,7 +286,15 @@ tasks: - echo "Removing solo config..." - rm -rf {{ .solo_user_dir }}/solo.yaml + solo:freeze:restart: + cmds: + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node prepare-upgrade --namespace "${SOLO_NAMESPACE}" -q --dev + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node freeze-upgrade --namespace "${SOLO_NAMESPACE}" -q --dev + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node stop --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q --dev + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q --dev + cluster:create: + silent: true status: - kind get clusters | grep -q "${SOLO_CLUSTER_NAME}" cmds: @@ -269,13 +302,8 @@ tasks: - sleep 10 # wait for control plane to come up - kubectl config set-context kind-${SOLO_CLUSTER_NAME} - cluster:setup: - deps: - - task: "init" - cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" ${SOLO_CHARTS_DIR_FLAG} -q - cluster:destroy: + silent: true cmds: - kind delete cluster --name "${SOLO_CLUSTER_NAME}" @@ -319,7 +347,7 @@ tasks: cmds: - | export MINIO_FLAG=$(cat {{ .minio_flag_file }}) - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" ${MINIO_FLAG} -q + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" ${MINIO_FLAG} ${SOLO_CHARTS_DIR_FLAG} ${CLUSTER_TLS_FLAGS} -q --dev solo:node:addresses: internal: true @@ -330,6 +358,11 @@ tasks: export IP_LIST_TEMPLATE_FILE={{ .TASKFILE_DIR }}/list-external-ips.gotemplate kubectl get svc -n "${SOLO_NAMESPACE}" -l "solo.hedera.com/type=network-node-svc" --output=go-template-file=${IP_LIST_TEMPLATE_FILE} + solo:node:logs: + silent: true + cmds: + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node logs --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q --dev + start: desc: solo node start deps: @@ -383,20 +416,25 @@ tasks: - task: "solo:relay" solo:mirror-node: + silent: true desc: solo mirror-node deploy with port forward on explorer deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node deploy --namespace "${SOLO_NAMESPACE}" -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node deploy --namespace "${SOLO_NAMESPACE}" -s ${SOLO_CLUSTER_SETUP_NAMESPACE} ${SOLO_CHARTS_DIR_FLAG} ${ENABLE_EXPLORER_TLS_FLAG} ${TLS_CLUSTER_ISSUER_TYPE_FLAG} ${MIRROR_NODE_DEPLOY_EXTRA_FLAGS} --pinger -q --dev - | if [[ "{{ .use_port_forwards }}" == "true" ]];then echo "Enable port forwarding for Hedera Explorer & Mirror Node Network" - kubectl port-forward -n "${SOLO_NAMESPACE}" svc/hedera-explorer 8080:80 & - kubectl port-forward svc/mirror-grpc -n "${SOLO_NAMESPACE}" 5600:5600 & + echo "Port forwarding for Hedera Explorer: http://localhost:6789" + nohup kubectl port-forward -n "${SOLO_NAMESPACE}" svc/hedera-explorer 6789:80 < /dev/null > {{.temp_prefix}}-6789-nohup.out 2>&1 & + echo "Port forwarding for Mirror Node Network: grpc:5600, rest:5551" + nohup kubectl port-forward svc/mirror-grpc -n "${SOLO_NAMESPACE}" 5600:5600 < /dev/null > {{.temp_prefix}}-5600-nohup.out 2>&1 & + nohup kubectl port-forward svc/mirror-rest -n "${SOLO_NAMESPACE}" 5551:80 < /dev/null > {{.temp_prefix}}-5551-nohup.out 2>&1 & sleep 4 fi solo:destroy-mirror-node: + silent: true desc: solo mirror-node destroy status: - | @@ -404,7 +442,7 @@ tasks: deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node destroy --namespace "${SOLO_NAMESPACE}" --force -q || true + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- mirror-node destroy --namespace "${SOLO_NAMESPACE}" --force -q --dev || true clean: desc: destroy, then remove cache directory, logs directory, config, and port forwards @@ -423,7 +461,4 @@ tasks: silent: true cmds: - echo "Cleaning up temporary files..." - - rm -f /tmp/${USER}-run-build-* || true - - rm -f /tmp/${USER}-var-check-* || true - - rm -f /tmp/${USER}-minio-flag-* || true - - rm -f /tmp/${USER}-solo-install-* || true + - rm -f /tmp/solo-${USER}-* || true diff --git a/Taskfile.yml b/Taskfile.yml index f6e15388f..0c9c0e23d 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -12,6 +12,10 @@ env: # LOCAL_BUILD_FLAG: "--local-build-path {{.HEDERA_SERVICES_ROOT}}/hedera-node/data" # DEBUG_NODE_ALIAS: "node2" # SOLO_CHARTS_DIR_FLAG: "-d /Users/user/source/solo-charts/charts" + # LOAD_BALANCER_FLAG: "--load-balancer" + # ENABLE_EXPLORER_TLS_FLAG: "--enable-hedera-explorer-tls" + # TLS_CLUSTER_ISSUER_TYPE_FLAG: "--tls-cluster-issuer-type acme-staging" + # NETWORK_DEPLOY_EXTRA_FLAGS: "--haproxy-ips node1=" vars: use_port_forwards: "true" @@ -35,7 +39,7 @@ tasks: cmds: - task: "cluster:create" - task: "solo:init" - - task: "cluster:setup" + - task: "solo:cluster:setup" - task: "solo:keys" - task: "solo:network:deploy" diff --git a/examples/address-book/.gitignore b/examples/address-book/.gitignore new file mode 100644 index 000000000..04f5250ea --- /dev/null +++ b/examples/address-book/.gitignore @@ -0,0 +1,3 @@ +output/ +syserr.log +/localhost/sysfiles/ diff --git a/examples/address-book/README.md b/examples/address-book/README.md new file mode 100644 index 000000000..96b35b073 --- /dev/null +++ b/examples/address-book/README.md @@ -0,0 +1,50 @@ +# Yahcli Address Book Example + +This is an example of how to use Yahcli to pull the ledger and mirror node address book. And to update the ledger address book. It updates File 101 (the ledger address book file) and File 102 (the ledger node details file). + +NOTE: Mirror Node refers to File 102 as its address book. + +## Usage + +To get the address book from the ledger, this requires a port forward to be setup on port 50211 to consensus node with node ID = 0. +```bash +# try and detect if the port forward is already setup +netstat -na | grep 50211 +ps -ef | grep 50211 | grep -v grep + +# setup a port forward if you need to +kubectl port-forward -n "${SOLO_NAMESPACE}" pod/network-node1-0 50211:50211 +``` + +To get the address book from the ledger, run the following command: +```bash +cd /examples/address-book +task get:ledger:addressbook +``` +It will output the address book in JSON format to: +* `examples/address-book/localhost/sysfiles/addressBook.json` +* `examples/address-book/localhost/sysfiles/nodeDetails.json` + +You can update the address book files with your favorite text editor. + +Once the files are ready, you can upload them to the ledger by running the following command: +```bash +cd /examples/address-book +task update:ledger:addressbook +``` + +To get the address book from the mirror node, run the following command: +```bash +cd /examples/address-book +task get:mirror:addressbook +``` +NOTE: Mirror Node may not pick up the changes automatically, it might require running some transactions through, example: +```bash +cd +npm run solo -- account create +npm run solo -- account create +npm run solo -- account create +npm run solo -- account create +npm run solo -- account create +npm run solo -- account update -n solo-e2e --account-id 0.0.1004 --hbar-amount 78910 +``` diff --git a/examples/address-book/Taskfile.yml b/examples/address-book/Taskfile.yml new file mode 100644 index 000000000..3431eb1af --- /dev/null +++ b/examples/address-book/Taskfile.yml @@ -0,0 +1,34 @@ +version: 3 +tasks: + get:ledger:addressbook: + silent: true + desc: retrieve the address book (file 101; examples/address-book/localhost/sysfiles/addressBook.json) and node details (file 102; examples/address-book/localhost/sysfiles/nodeDetails.json) from the ledger + cmds: + - java -jar yahcli.jar --verbose=WARN -n localhost -p 2 sysfiles download 102 + - java -jar yahcli.jar --verbose=WARN -n localhost -p 2 sysfiles download 101 + - echo "cat file 102 = localhost/sysfiles/nodeDetails.json" + - echo "---------------------------------------" + - cat localhost/sysfiles/nodeDetails.json + - echo "---------------------------------------" + - echo "cat file 101 = localhost/sysfiles/addressBook.json" + - echo "---------------------------------------" + - cat localhost/sysfiles/addressBook.json + + update:ledger:addressbook: + silent: true + desc: update the address book (file 101; examples/address-book/localhost/sysfiles/addressBook.json) and node details (file 102; examples/address-book/localhost/sysfiles/nodeDetails.json) on the ledger + cmds: + - java -jar yahcli.jar --verbose=WARN -n localhost -p 2 sysfiles upload 102 + - java -jar yahcli.jar --verbose=WARN -n localhost -p 2 sysfiles upload 101 + + get:mirror:addressbook: + silent: true + desc: retrieve the address book from the mirror node (file 102) + cmds: + - | + jq --version > /dev/null 2>&1 + if [[ $? -eq 0 ]]; then + curl -s http://localhost:5551/api/v1/network/nodes | jq + else + curl -s http://localhost:5551/api/v1/network/nodes + fi diff --git a/examples/address-book/config.yml b/examples/address-book/config.yml new file mode 100644 index 000000000..178b9fb2f --- /dev/null +++ b/examples/address-book/config.yml @@ -0,0 +1,6 @@ +defaultNetwork: localhost +networks: + localhost: + allowedReceiverAccountIds: [] + nodes: + - { id: 0, account: 3, ipv4Addr: 127.0.0.1 } diff --git a/examples/address-book/localhost/keys/account2.pass b/examples/address-book/localhost/keys/account2.pass new file mode 100644 index 000000000..7c79971d1 --- /dev/null +++ b/examples/address-book/localhost/keys/account2.pass @@ -0,0 +1 @@ +swirlds diff --git a/examples/address-book/localhost/keys/account2.pem b/examples/address-book/localhost/keys/account2.pem new file mode 100644 index 000000000..dd125904d --- /dev/null +++ b/examples/address-book/localhost/keys/account2.pem @@ -0,0 +1,7 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIHDMH8GCSqGSIb3DQEFDTByMFEGCSqGSIb3DQEFDDBEBDAqNosoP7Vz+faodhk1 +2N4Lykgs3AjiaZws0sajodGNLax8pG0FIFarGWtJyRPSKvgCAicQMAwGCCqGSIb3 +DQIKBQAwHQYJYIZIAWUDBAEqBBC6ckCkLqfdzlIynrvwtracBECvr1K4KI1PWJ5z +YY7WKfjy57ffuuQ2GlNZUrp6yylRlRPGqZ015XT2Cbph6sZNE4xwn0NMxGp7Wf2f +j0A8/Il/ +-----END ENCRYPTED PRIVATE KEY----- diff --git a/examples/address-book/localhost/keys/account55.pass b/examples/address-book/localhost/keys/account55.pass new file mode 100644 index 000000000..ab22d1b2d --- /dev/null +++ b/examples/address-book/localhost/keys/account55.pass @@ -0,0 +1 @@ +myOHiYnunypq \ No newline at end of file diff --git a/examples/address-book/localhost/keys/account55.pem b/examples/address-book/localhost/keys/account55.pem new file mode 100644 index 000000000..36ea3c303 --- /dev/null +++ b/examples/address-book/localhost/keys/account55.pem @@ -0,0 +1,7 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIHDMH8GCSqGSIb3DQEFDTByMFEGCSqGSIb3DQEFDDBEBDAAjYrJqdRn+yhHliur +S1x8C89N3CTmjwLsB6C5n2fLs8oaB+7SpgDBkWibsrlc8+ACAicQMAwGCCqGSIb3 +DQIKBQAwHQYJYIZIAWUDBAEqBBCRaztcTLabmkKgzlaF4vP9BEBb9fqmaZXq86dr +SnlCMX1xu5+zz0sJIYTT9oV4tVvqeW+CLT8FdReh3Zu86T6IoGN4sCU9C/FmyhOZ +JZlpLoFO +-----END ENCRYPTED PRIVATE KEY----- diff --git a/examples/address-book/yahcli.jar b/examples/address-book/yahcli.jar new file mode 100644 index 000000000..9093dd45e Binary files /dev/null and b/examples/address-book/yahcli.jar differ diff --git a/examples/performance-tuning/latitude/Taskfile.yml b/examples/performance-tuning/latitude/Taskfile.yml index d664266a4..ba356d4e5 100644 --- a/examples/performance-tuning/latitude/Taskfile.yml +++ b/examples/performance-tuning/latitude/Taskfile.yml @@ -7,9 +7,9 @@ vars: solo_home_override_dir: "%HOME%/.solo" env: SOLO_NETWORK_SIZE: 10 - SOLO_NAMESPACE: "{{.SOLO_NAMESPACE}}" - # SOLO_CHART_VERSION: 0.39.0 - # CONSENSUS_NODE_VERSION: 0.0.0 + SOLO_NAMESPACE: %SOLO_NAMESPACE% + SOLO_CHART_VERSION: 0.42.3 + #CONSENSUS_NODE_VERSION: 0.0.0 VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml" SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt" SOLO_HOME: "{{.solo_home_override_dir}}" diff --git a/examples/performance-tuning/latitude/application.properties b/examples/performance-tuning/latitude/application.properties index 0e7e43070..03e4cc076 100644 --- a/examples/performance-tuning/latitude/application.properties +++ b/examples/performance-tuning/latitude/application.properties @@ -1,2 +1,3 @@ contracts.chainId=298 entities.unlimitedAutoAssociationsEnabled=true +bootstrap.throttleJsonDef.resource=genesis/throttles-dev.json diff --git a/examples/performance-tuning/latitude/init-containers-values.yaml b/examples/performance-tuning/latitude/init-containers-values.yaml index 12888072d..400159e7a 100644 --- a/examples/performance-tuning/latitude/init-containers-values.yaml +++ b/examples/performance-tuning/latitude/init-containers-values.yaml @@ -17,7 +17,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node2 nodeId: 1 @@ -28,7 +28,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node3 nodeId: 2 @@ -39,7 +39,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node4 nodeId: 3 @@ -50,7 +50,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node5 nodeId: 4 @@ -61,7 +61,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node6 nodeId: 5 @@ -72,7 +72,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node7 nodeId: 6 @@ -83,7 +83,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node8 nodeId: 7 @@ -94,7 +94,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node9 nodeId: 8 @@ -105,7 +105,7 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi - name: node10 nodeId: 9 @@ -116,11 +116,9 @@ hedera: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi defaults: - haproxy: - serviceType: NodePort sidecars: recordStreamUploader: resources: @@ -146,13 +144,21 @@ defaults: limits: cpu: 150m memory: 400Mi + blockstreamUploader: + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 150m + memory: 400Mi root: resources: requests: cpu: 18 memory: 256Gi limits: - cpu: 24 + cpu: 22 memory: 256Gi extraEnv: - name: JAVA_OPTS diff --git a/examples/performance-tuning/latitude/nlg-values.yaml b/examples/performance-tuning/latitude/nlg-values.yaml index d5973a74f..34898b2cd 100644 --- a/examples/performance-tuning/latitude/nlg-values.yaml +++ b/examples/performance-tuning/latitude/nlg-values.yaml @@ -2,15 +2,29 @@ replicas: 1 resources: limits: - cpu: 32 + cpu: 20 memory: 32Gi requests: - cpu: 8 - memory: 30Gi + cpu: 20 + memory: 32Gi -nodeSelector: {} -tolerations: [] -affinity: {} +nodeSelector: + solo.hashgraph.io/role: "consensus-node" + solo.hashgraph.io/owner: "alex.kuzmin" + solo.hashgraph.io/network-id: "%NETWORK_ID%" +tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/owner" + operator: "Equal" + value: "alex.kuzmin" + effect: "NoSchedule" + - key: "solo.hashgraph.io/network-id" + operator: "Equal" + value: "%NETWORK_ID%" + effect: "NoSchedule" loadGenerator: java: diff --git a/examples/performance-tuning/latitude/settings.txt b/examples/performance-tuning/latitude/settings.txt index a0b6b2c20..c9f146936 100644 --- a/examples/performance-tuning/latitude/settings.txt +++ b/examples/performance-tuning/latitude/settings.txt @@ -1,7 +1,6 @@ checkSignedStateFromDisk, 1 csvFileName, MainNetStats doUpnp, false -loadKeysFromPfxFiles, 0 maxOutgoingSyncs, 1 reconnect.active, 1 reconnect.reconnectWindowSeconds, -1 diff --git a/examples/solo-gke-test/Taskfile.yml b/examples/solo-gke-test/Taskfile.yml index daeaeebd2..2bd1340f0 100644 --- a/examples/solo-gke-test/Taskfile.yml +++ b/examples/solo-gke-test/Taskfile.yml @@ -5,17 +5,25 @@ includes: flatten: true vars: solo_home_override_dir: "/Users/user/.solo-gke-test" + use_port_forwards: "true" env: - SOLO_NETWORK_SIZE: 5 + SOLO_NETWORK_SIZE: 4 SOLO_NAMESPACE: solo-gke-test # SOLO_CHART_VERSION: 0.39.0 # CONSENSUS_NODE_VERSION: v0.58.0 VALUES_FLAG: "--values-file {{.USER_WORKING_DIR}}/init-containers-values.yaml" SETTINGS_FLAG: "--settings-txt {{.USER_WORKING_DIR}}/settings.txt" - SOLO_HOME: "{{.solo_home_override_dir}}" + # SOLO_HOME: "{{.solo_home_override_dir}}" LOG4J2_FLAG: "--log4j2-xml {{.USER_WORKING_DIR}}/log4j2.xml" APPLICATION_PROPERTIES_FLAG: "--application-properties {{.USER_WORKING_DIR}}/application.properties" HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services" - LOCAL_BUILD_FLAG: "--local-build-path {{.HEDERA_SERVICES_ROOT}}/hedera-node/data" + # LOCAL_BUILD_FLAG: "--local-build-path {{.HEDERA_SERVICES_ROOT}}/hedera-node/data" GENESIS_THROTTLES_FLAG: "--genesis-throttles-file {{.USER_WORKING_DIR}}/throttles.json" - # SOLO_CHARTS_DIR_FLAG: "-d /Users/user/source/solo-charts/charts" + SOLO_CHARTS_DIR_FLAG: "-d /Users/user/source/solo-charts/charts" + LOAD_BALANCER_FLAG: "--load-balancer true" + ENABLE_EXPLORER_TLS_FLAG: "--enable-hedera-explorer-tls" + TLS_CLUSTER_ISSUER_TYPE_FLAG: "--tls-cluster-issuer-type acme-staging" + # CLUSTER_TLS_FLAGS: "--cert-manager --cert-manager-crds" + NETWORK_DEPLOY_EXTRA_FLAGS: "--haproxy-ips node1=,node2=,node3=,node4= --pvcs" + MIRROR_NODE_DEPLOY_EXTRA_FLAGS: "--values-file {{.USER_WORKING_DIR}}/mirror-and-explorer-values.yaml" + RELAY_NODE_DEPLOY_EXTRA_FLAGS: "--values-file {{.USER_WORKING_DIR}}/relay-values.yaml" diff --git a/examples/solo-gke-test/init-containers-values.yaml b/examples/solo-gke-test/init-containers-values.yaml index f69c67714..c1f92f887 100644 --- a/examples/solo-gke-test/init-containers-values.yaml +++ b/examples/solo-gke-test/init-containers-values.yaml @@ -1,109 +1,108 @@ # hedera node configuration hedera: - initContainers: - - name: init-hedera-node - image: busybox:stable-musl - command: ["sh", "-c", "cp -r /etc /data-saved"] - volumeMounts: - - name: hgcapp-data-saved - mountPath: /data-saved +# initContainers: +# - name: init-hedera-node +# image: busybox:stable-musl +# command: ["sh", "-c", "cp -r /etc /data-saved"] +# volumeMounts: +# - name: hgcapp-data-saved +# mountPath: /data-saved nodes: - name: node1 nodeId: 0 accountId: 0.0.3 - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi +# root: +# resources: +# requests: +# cpu: 2 +# memory: 16Gi +# limits: +# cpu: 4 +# memory: 31Gi - name: node2 nodeId: 1 accountId: 0.0.4 - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi +# root: +# resources: +# requests: +# cpu: 2 +# memory: 16Gi +# limits: +# cpu: 4 +# memory: 31Gi - name: node3 nodeId: 2 accountId: 0.0.5 - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi +# root: +# resources: +# requests: +# cpu: 2 +# memory: 16Gi +# limits: +# cpu: 4 +# memory: 31Gi - name: node4 nodeId: 3 accountId: 0.0.6 - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi - - name: node5 - nodeId: 4 - accountId: 0.0.7 - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi +# root: +# resources: +# requests: +# cpu: 2 +# memory: 16Gi +# limits: +# cpu: 4 +# memory: 31Gi defaults: - sidecars: - recordStreamUploader: - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - cpu: 150m - memory: 200Mi - eventStreamUploader: - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - cpu: 150m - memory: 200Mi - recordStreamSidecarUploader: - resources: - requests: - cpu: 100m - memory: 100Mi - limits: - cpu: 150m - memory: 200Mi - root: - resources: - requests: - cpu: 2 - memory: 16Gi - limits: - cpu: 4 - memory: 31Gi - extraEnv: - - name: JAVA_OPTS - value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=4 -XX:MaxDirectMemorySize=4g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true" - - name: JAVA_HEAP_MIN - value: "16g" - - name: JAVA_HEAP_MAX - value: "19g" + volumeClaims: + storageClassName: standard-rwo +# sidecars: +# recordStreamUploader: +# resources: +# requests: +# cpu: 100m +# memory: 100Mi +# limits: +# cpu: 150m +# memory: 200Mi +# eventStreamUploader: +# resources: +# requests: +# cpu: 100m +# memory: 100Mi +# limits: +# cpu: 150m +# memory: 200Mi +# recordStreamSidecarUploader: +# resources: +# requests: +# cpu: 100m +# memory: 100Mi +# limits: +# cpu: 150m +# memory: 200Mi +# blockstreamUploader: +# resources: +# requests: +# cpu: 100m +# memory: 100Mi +# limits: +# cpu: 150m +# memory: 200Mi +# root: +# resources: +# requests: +# cpu: 2 +# memory: 16Gi +# limits: +# cpu: 4 +# memory: 31Gi +# extraEnv: +# - name: JAVA_OPTS +# value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=4 -XX:MaxDirectMemorySize=4g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true" +# - name: JAVA_HEAP_MIN +# value: "16g" +# - name: JAVA_HEAP_MAX +# value: "19g" deployment: podAnnotations: {} podLabels: {} diff --git a/examples/solo-gke-test/mirror-and-explorer-values.yaml b/examples/solo-gke-test/mirror-and-explorer-values.yaml new file mode 100644 index 000000000..5ac9dc88d --- /dev/null +++ b/examples/solo-gke-test/mirror-and-explorer-values.yaml @@ -0,0 +1,277 @@ +# hedera mirror node explorer +ingress: + enabled: true + hosts: + - host: "explorer.solo.local" + paths: + - path: / + pathType: Prefix + tls: + - secretName: ca-secret-hedera-explorer + hosts: + - '{{ index .Values.ingress.hosts 0 "host" }}' +labels: + solo.hedera.com/testSuiteName: "" + solo.hedera.com/testName: "" + solo.hedera.com/testRunUID: "" + solo.hedera.com/testCreationTimestamp: "" + solo.hedera.com/testExpirationTimestamp: "" + solo.hedera.com/testRequester: "" + +stackgres: + coordinator: + persistentVolume: + storageClass: "standard-rwo" + worker: + persistentVolume: + storageClass: "standard-rwo" + +tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" +#global: +# namespaceOverride: "{{ .Values.global.namespaceOverride }}" +# The hedera explorer UI /api url will proxy all request to mirror node +# +# Without this we would need to expose the mirror node rest API publicly and specify its public url in the network config below +proxyPass: + /api: "http://{{ .Release.Name }}-rest" + +# In the json config below we are using the url as "/", instead of a regular http://mainnet.url +# This makes the explorer UI make a relative request to its own url +# This in combination with proxyPass above saves us the need to expose mirror node URL publicly +config: | + [ + { + "name": "localnet", + "displayName": "LOCALNET", + "url": "/", + "ledgerID": "03" + } + ] + +# mirror node +graphql: # not needed for default use case + enabled: false +rosetta: # not needed for default use case + enabled: false +redis: + enabled: true +#global: +# namespaceOverride: "{{ tpl (.Values.global.namespaceOverride | toString) }}" + +# importer is a component of the hedera mirror node +# config for subchart hedera-mirror/importer +importer: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + envFrom: + - secretRef: + name: mirror-passwords + - secretRef: + name: "{{ .Release.Name }}-redis" + - secretRef: + name: uploader-mirror-secrets + # The addressbook.bin file updates will be handled by infrastructure code or solo + addressBook: "" + config: + # importer is a springboot app, its application.yaml configuration starts here + # This config is mounted at [/usr/etc/hedera/application.yaml] in the importer pod + hedera: + mirror: + importer: + network: other + downloader: + allowAnonymousAccess: false + bucketName: "solo-streams" + # for s3 configuration of mirror node look at uploader-mirror-secrets.yaml + parser: + record: + entity: + notify: + enabled: true + redis: + enabled: false + sidecar: + enabled: true + management: + endpoint: + health: + group: + readiness: + exclude: redis +grpc: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + config: + hedera: + mirror: + grpc: + listener: + type: NOTIFY + management: + endpoint: + health: + group: + readiness: + exclude: redis +postgresql: + postgresql: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + pgpool: + replicaCount: 0 +rest: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + monitor: + enabled: false + redis: + enabled: true +web3: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + +# config for subchart hedera-mirror/monitor +# Sets up a Pinger service that periodically submits CRYPTO_TRANSFER transactions +# Additional configuration for node addresses, operator id and key should be handled by infrastructure code or solo +monitor: + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" + - key: "solo-scheduling.io/os" + operator: "Equal" + value: "linux" + effect: "NoSchedule" + - key: "solo-scheduling.io/role" + operator: "Equal" + value: "network" + effect: "NoSchedule" + envFrom: + - secretRef: + name: mirror-passwords + - secretRef: + name: "{{ .Release.Name }}-redis" + - secretRef: + name: uploader-mirror-secrets + config: + hedera: + mirror: + monitor: + publish: + scenarios: + pinger: + properties: + amount: 1 + maxTransactionFee: 10000 + senderAccountId: 0.0.2 + recipientAccountId: 0.0.55 + transferTypes: + - CRYPTO + receiptPercent: 1 + tps: 10 + type: CRYPTO_TRANSFER + subscribe: + grpc: + hcs: + enabled: false + rest: + transactionId: + enabled: true + samplePercent: 1 + network: OTHER diff --git a/examples/solo-gke-test/relay-values.yaml b/examples/solo-gke-test/relay-values.yaml new file mode 100644 index 000000000..0364431f7 --- /dev/null +++ b/examples/solo-gke-test/relay-values.yaml @@ -0,0 +1,25 @@ +# https://github.com/hashgraph/hedera-json-rpc-relay/blob/main/charts/hedera-json-rpc-relay/values.yaml#L125 + +ingress: + enabled: true + hosts: + - host: relay.explorer.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + +replicaCount: 1 + +resources: + requests: + cpu: 0 + memory: 0 + limits: + cpu: 500m + memory: 1000Mi + +service: + type: ClusterIP + port: 7546 + annotations: {} diff --git a/src/commands/account.ts b/src/commands/account.ts index 2bb0bb0e4..f8e39fda2 100644 --- a/src/commands/account.ts +++ b/src/commands/account.ts @@ -559,13 +559,13 @@ export class AccountCommand extends BaseCommand { desc: 'Initialize system accounts with new keys', builder: (y: any) => flags.setCommandFlags(y, flags.namespace), handler: (argv: any) => { - self.logger.debug("==== Running 'account init' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'account init' ==="); + self.logger.info(argv); self .init(argv) .then(r => { - self.logger.debug("==== Finished running 'account init' ==="); + self.logger.info("==== Finished running 'account init' ==="); if (!r) process.exit(1); }) .catch(err => { @@ -589,13 +589,13 @@ export class AccountCommand extends BaseCommand { flags.setAlias, ), handler: (argv: any) => { - self.logger.debug("==== Running 'account create' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'account create' ==="); + self.logger.info(argv); self .create(argv) .then(r => { - self.logger.debug("==== Finished running 'account create' ==="); + self.logger.info("==== Finished running 'account create' ==="); if (!r) process.exit(1); }) .catch(err => { @@ -617,13 +617,13 @@ export class AccountCommand extends BaseCommand { flags.ed25519PrivateKey, ), handler: (argv: any) => { - self.logger.debug("==== Running 'account update' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'account update' ==="); + self.logger.info(argv); self .update(argv) .then(r => { - self.logger.debug("==== Finished running 'account update' ==="); + self.logger.info("==== Finished running 'account update' ==="); if (!r) process.exit(1); }) .catch(err => { @@ -637,13 +637,13 @@ export class AccountCommand extends BaseCommand { desc: 'Gets the account info including the current amount of HBAR', builder: (y: any) => flags.setCommandFlags(y, flags.accountId, flags.privateKey, flags.namespace), handler: (argv: any) => { - self.logger.debug("==== Running 'account get' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'account get' ==="); + self.logger.info(argv); self .get(argv) .then(r => { - self.logger.debug("==== Finished running 'account get' ==="); + self.logger.info("==== Finished running 'account get' ==="); if (!r) process.exit(1); }) .catch(err => { diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts index 74deb0f06..6187aadc0 100644 --- a/src/commands/deployment.ts +++ b/src/commands/deployment.ts @@ -140,13 +140,13 @@ export class DeploymentCommand extends BaseCommand { desc: 'Creates solo deployment', builder: (y: any) => flags.setCommandFlags(y, ...DeploymentCommand.DEPLOY_FLAGS_LIST), handler: (argv: any) => { - self.logger.debug("==== Running 'deployment create' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'deployment create' ==="); + self.logger.info(argv); self .create(argv) .then(r => { - self.logger.debug('==== Finished running `deployment create`===='); + self.logger.info('==== Finished running `deployment create`===='); if (!r) process.exit(1); }) diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 9d5b1381c..c27789c8a 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -155,6 +155,11 @@ export class MirrorNodeCommand extends BaseCommand { valuesArg += ` --set-json 'ingress.hosts[0]={"host":"${hederaExplorerTlsHostName}","paths":[{"path":"/","pathType":"Prefix"}]}'`; } + if (!(await this.k8.isCertManagerInstalled())) { + valuesArg += ' --set cloud.certManager.enabled=true'; + valuesArg += ' --set cert-manager.installCRDs=true'; + } + if (hederaExplorerTlsLoadBalancerIp !== '') { valuesArg += ` --set haproxy-ingress.controller.service.loadBalancerIP=${hederaExplorerTlsLoadBalancerIp}`; } @@ -286,6 +291,43 @@ export class MirrorNodeCommand extends BaseCommand { ctx.config.valuesArg += ` --set "importer.addressBook=${ctx.addressBook}"`; }, }, + { + title: 'Upgrade solo-setup chart', + task: async ctx => { + const config = ctx.config; + const {chartDirectory, clusterSetupNamespace, soloChartVersion} = config; + + const chartPath = await this.prepareChartPath( + chartDirectory, + constants.SOLO_TESTING_CHART_URL, + constants.SOLO_CLUSTER_SETUP_CHART, + ); + + const soloChartSetupValuesArg = await self.prepareSoloChartSetupValuesArg(config); + + // if cert-manager isn't already installed we want to install it separate from the certificate issuers + // as they will fail to be created due to the order of the installation being dependent on the cert-manager + // being installed first + if (soloChartSetupValuesArg.includes('cloud.certManager.enabled=true')) { + await self.chartManager.upgrade( + clusterSetupNamespace, + constants.SOLO_CLUSTER_SETUP_CHART, + chartPath, + soloChartVersion, + ' --set cloud.certManager.enabled=true --set cert-manager.installCRDs=true', + ); + } + + await self.chartManager.upgrade( + clusterSetupNamespace, + constants.SOLO_CLUSTER_SETUP_CHART, + chartPath, + soloChartVersion, + soloChartSetupValuesArg, + ); + }, + skip: ctx => !ctx.config.enableHederaExplorerTls, + }, { title: 'Deploy mirror-node', task: async ctx => { @@ -312,36 +354,13 @@ export class MirrorNodeCommand extends BaseCommand { ); }, }, - { - title: 'Upgrade solo-setup chart', - task: async ctx => { - const config = ctx.config; - const {chartDirectory, clusterSetupNamespace, soloChartVersion} = config; - - const chartPath = await this.prepareChartPath( - chartDirectory, - constants.SOLO_TESTING_CHART_URL, - constants.SOLO_CLUSTER_SETUP_CHART, - ); - - const soloChartSetupValuesArg = await self.prepareSoloChartSetupValuesArg(config); - await self.chartManager.upgrade( - clusterSetupNamespace, - constants.SOLO_CLUSTER_SETUP_CHART, - chartPath, - soloChartVersion, - soloChartSetupValuesArg, - ); - }, - skip: ctx => !ctx.config.enableHederaExplorerTls, - }, { title: 'Deploy hedera-explorer', task: async ctx => { const config = ctx.config; - let exploreValuesArg = await self.prepareHederaExplorerValuesArg(config); - exploreValuesArg += self.prepareValuesFiles(constants.EXPLORER_VALUES_FILE); + let exploreValuesArg = self.prepareValuesFiles(constants.EXPLORER_VALUES_FILE); + exploreValuesArg += await self.prepareHederaExplorerValuesArg(config); await self.chartManager.install( config.namespace, @@ -515,9 +534,11 @@ export class MirrorNodeCommand extends BaseCommand { try { await tasks.run(); - self.logger.debug('mirror node depolyment has completed'); + self.logger.debug('mirror node deployment has completed'); } catch (e) { - throw new SoloError(`Error deploying node: ${e.message}`, e); + const message = `Error deploying node: ${e.message}`; + self.logger.error(message, e); + throw new SoloError(message, e); } finally { await lease.release(); await self.accountManager.close(); @@ -636,13 +657,13 @@ export class MirrorNodeCommand extends BaseCommand { desc: 'Deploy mirror-node and its components', builder: y => flags.setCommandFlags(y, ...MirrorNodeCommand.DEPLOY_FLAGS_LIST), handler: argv => { - self.logger.debug("==== Running 'mirror-node deploy' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'mirror-node deploy' ==="); + self.logger.info(argv); self .deploy(argv) .then(r => { - self.logger.debug('==== Finished running `mirror-node deploy`===='); + self.logger.info('==== Finished running `mirror-node deploy`===='); if (!r) process.exit(1); }) .catch(err => { @@ -656,13 +677,13 @@ export class MirrorNodeCommand extends BaseCommand { desc: 'Destroy mirror-node components and database', builder: y => flags.setCommandFlags(y, flags.chartDirectory, flags.force, flags.namespace), handler: argv => { - self.logger.debug("==== Running 'mirror-node destroy' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'mirror-node destroy' ==="); + self.logger.info(argv); self .destroy(argv) .then(r => { - self.logger.debug('==== Finished running `mirror-node destroy`===='); + self.logger.info('==== Finished running `mirror-node destroy`===='); if (!r) process.exit(1); }) .catch(err => { diff --git a/src/commands/network.ts b/src/commands/network.ts index 351448ac6..7a9b53d31 100644 --- a/src/commands/network.ts +++ b/src/commands/network.ts @@ -874,13 +874,13 @@ export class NetworkCommand extends BaseCommand { desc: "Deploy solo network. Requires the chart `solo-cluster-setup` to have been installed in the cluster. If it hasn't the following command can be ran: `solo cluster setup`", builder: (y: any) => flags.setCommandFlags(y, ...NetworkCommand.DEPLOY_FLAGS_LIST), handler: (argv: any) => { - self.logger.debug("==== Running 'network deploy' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'network deploy' ==="); + self.logger.info(argv); self .deploy(argv) .then(r => { - self.logger.debug('==== Finished running `network deploy`===='); + self.logger.info('==== Finished running `network deploy`===='); if (!r) process.exit(1); }) @@ -904,13 +904,13 @@ export class NetworkCommand extends BaseCommand { flags.quiet, ), handler: (argv: any) => { - self.logger.debug("==== Running 'network destroy' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'network destroy' ==="); + self.logger.info(argv); self .destroy(argv) .then(r => { - self.logger.debug('==== Finished running `network destroy`===='); + self.logger.info('==== Finished running `network destroy`===='); if (!r) process.exit(1); }) @@ -925,13 +925,13 @@ export class NetworkCommand extends BaseCommand { desc: 'Refresh solo network deployment', builder: (y: any) => flags.setCommandFlags(y, ...NetworkCommand.DEPLOY_FLAGS_LIST), handler: (argv: any) => { - self.logger.debug("==== Running 'chart upgrade' ==="); - self.logger.debug(argv); + self.logger.info("==== Running 'chart upgrade' ==="); + self.logger.info(argv); self .refresh(argv) .then(r => { - self.logger.debug('==== Finished running `chart upgrade`===='); + self.logger.info('==== Finished running `chart upgrade`===='); if (!r) process.exit(1); }) diff --git a/src/commands/relay.ts b/src/commands/relay.ts index a5c91c5f8..ede47a104 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -398,12 +398,13 @@ export class RelayCommand extends BaseCommand { flags.setCommandFlags(y, ...RelayCommand.DEPLOY_FLAGS_LIST); }, handler: (argv: any) => { - self.logger.debug("==== Running 'relay install' ===", {argv}); + self.logger.info("==== Running 'relay deploy' ===", {argv}); + self.logger.info(argv); self .deploy(argv) .then(r => { - self.logger.debug('==== Finished running `relay install`===='); + self.logger.info('==== Finished running `relay deploy`===='); if (!r) process.exit(1); }) @@ -419,11 +420,11 @@ export class RelayCommand extends BaseCommand { builder: (y: any) => flags.setCommandFlags(y, flags.chartDirectory, flags.namespace, flags.nodeAliasesUnparsed), handler: (argv: any) => { - self.logger.debug("==== Running 'relay uninstall' ===", {argv}); + self.logger.info("==== Running 'relay destroy' ===", {argv}); self.logger.debug(argv); self.destroy(argv).then(r => { - self.logger.debug('==== Finished running `relay uninstall`===='); + self.logger.info('==== Finished running `relay destroy`===='); if (!r) process.exit(1); }); diff --git a/src/core/account_manager.ts b/src/core/account_manager.ts index b0435bd3b..dcb1d3188 100644 --- a/src/core/account_manager.ts +++ b/src/core/account_manager.ts @@ -150,6 +150,7 @@ export class AccountManager { this._nodeClient = null; this._portForwards = []; + this.logger.debug('node client and port forwards have been closed'); } /** @@ -157,44 +158,58 @@ export class AccountManager { * @param namespace - the namespace of the network */ async loadNodeClient(namespace: string) { - this.logger.debug( - `loading node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, - ); - if (!this._nodeClient || this._nodeClient?.isClientShutDown) { + try { this.logger.debug( - `refreshing node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, + `loading node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, ); - await this.refreshNodeClient(namespace); - } else { - try { - await this._nodeClient.ping(this._nodeClient.operatorAccountId); - } catch { - this.logger.debug('node client ping failed, refreshing node client'); + if (!this._nodeClient || this._nodeClient?.isClientShutDown) { + this.logger.debug( + `refreshing node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, + ); await this.refreshNodeClient(namespace); + } else { + try { + await this._nodeClient.ping(this._nodeClient.operatorAccountId); + } catch { + this.logger.debug('node client ping failed, refreshing node client'); + await this.refreshNodeClient(namespace); + } } - } - return this._nodeClient!; + return this._nodeClient!; + } catch (e) { + const message = `failed to load node client: ${e.message}`; + this.logger.error(message, e); + throw new SoloError(message, e); + } } /** - * loads and initializes the Node Client + * loads and initializes the Node Client, throws a SoloError if anything fails * @param namespace - the namespace of the network * @param skipNodeAlias - the node alias to skip */ async refreshNodeClient(namespace: string, skipNodeAlias?: NodeAlias) { - await this.close(); - const treasuryAccountInfo = await this.getTreasuryAccountKeys(namespace); - const networkNodeServicesMap = await this.getNodeServiceMap(namespace); - - this._nodeClient = await this._getNodeClient( - namespace, - networkNodeServicesMap, - treasuryAccountInfo.accountId, - treasuryAccountInfo.privateKey, - skipNodeAlias, - ); - return this._nodeClient; + try { + await this.close(); + const treasuryAccountInfo = await this.getTreasuryAccountKeys(namespace); + const networkNodeServicesMap = await this.getNodeServiceMap(namespace); + + this._nodeClient = await this._getNodeClient( + namespace, + networkNodeServicesMap, + treasuryAccountInfo.accountId, + treasuryAccountInfo.privateKey, + skipNodeAlias, + ); + + this.logger.debug('node client has been refreshed'); + return this._nodeClient; + } catch (e) { + const message = `failed to refresh node client: ${e.message}`; + this.logger.error(message, e); + throw new SoloError(message, e); + } } /** @@ -239,6 +254,7 @@ export class AccountManager { localPort++; } } + this.logger.debug(`configuring node access for ${configureNodeAccessPromiseArray.length} nodes`); await Promise.allSettled(configureNodeAccessPromiseArray).then(results => { for (const result of results) { @@ -251,10 +267,11 @@ export class AccountManager { } } }); + this.logger.debug(`configured node access for ${Object.keys(nodes).length} nodes`); let formattedNetworkConnection = ''; Object.keys(nodes).forEach(key => (formattedNetworkConnection += `${key}:${nodes[key]}, `)); - this.logger.debug(`creating client from network configuration: [${formattedNetworkConnection}]`); + this.logger.info(`creating client from network configuration: [${formattedNetworkConnection}]`); // scheduleNetworkUpdate is set to false, because the ports 50212/50211 are hardcoded in JS SDK that will not work // when running locally or in a pipeline @@ -304,38 +321,45 @@ export class AccountManager { } private async configureNodeAccess(networkNodeService: NetworkNodeServices, localPort: number, totalNodes: number) { + this.logger.debug(`configuring node access for node: ${networkNodeService.nodeAlias}`); const obj = {} as Record; const port = +networkNodeService.haProxyGrpcPort; const accountId = AccountId.fromString(networkNodeService.accountId as string); - // if the load balancer IP is set, then we should use that and avoid the local host port forward - if (!this.shouldUseLocalHostPortForward(networkNodeService)) { - const host = networkNodeService.haProxyLoadBalancerIp as string; - const targetPort = port; - try { - obj[`${host}:${targetPort}`] = accountId; - await this.pingNetworkNode(obj, accountId); + try { + // if the load balancer IP is set, then we should use that and avoid the local host port forward + if (!this.shouldUseLocalHostPortForward(networkNodeService)) { + const host = networkNodeService.haProxyLoadBalancerIp as string; + const targetPort = port; this.logger.debug(`using load balancer IP: ${host}:${targetPort}`); - return obj; - } catch { - // if the connection fails, then we should use the local host port forward + try { + obj[`${host}:${targetPort}`] = accountId; + await this.pingNetworkNode(obj, accountId); + this.logger.debug(`successfully pinged network node: ${host}:${targetPort}`); + + return obj; + } catch { + // if the connection fails, then we should use the local host port forward + } } - } - // if the load balancer IP is not set or the test connection fails, then we should use the local host port forward - const host = '127.0.0.1'; - const targetPort = localPort; + // if the load balancer IP is not set or the test connection fails, then we should use the local host port forward + const host = '127.0.0.1'; + const targetPort = localPort; - if (this._portForwards.length < totalNodes) { - this._portForwards.push(await this.k8.portForward(networkNodeService.haProxyPodName, localPort, port)); - } + if (this._portForwards.length < totalNodes) { + this._portForwards.push(await this.k8.portForward(networkNodeService.haProxyPodName, localPort, port)); + } - this.logger.debug(`using local host port forward: ${host}:${targetPort}`); - obj[`${host}:${targetPort}`] = accountId; + this.logger.debug(`using local host port forward: ${host}:${targetPort}`); + obj[`${host}:${targetPort}`] = accountId; - await this.testNodeClientConnection(obj, accountId); + await this.testNodeClientConnection(obj, accountId); - return obj; + return obj; + } catch (e) { + throw new SoloError(`failed to configure node access: ${e.message}`, e); + } } /** @@ -352,26 +376,37 @@ export class AccountManager { let currentRetry = 0; let success = false; - while (!success && currentRetry < maxRetries) { - try { - this.logger.debug( - `attempting to ping network node: ${Object.keys(obj)[0]}, attempt: ${currentRetry}, of ${maxRetries}`, - ); - await this.pingNetworkNode(obj, accountId); - success = true; - } catch (e: Error | any) { - this.logger.error(`failed to ping network node: ${Object.keys(obj)[0]}, ${e.message}`); - currentRetry++; - await sleep(Duration.ofMillis(sleepInterval)); + try { + while (!success && currentRetry < maxRetries) { + try { + this.logger.debug( + `attempting to ping network node: ${Object.keys(obj)[0]}, attempt: ${currentRetry}, of ${maxRetries}`, + ); + await this.pingNetworkNode(obj, accountId); + success = true; + + return; + } catch (e: Error | any) { + this.logger.error(`failed to ping network node: ${Object.keys(obj)[0]}, ${e.message}`); + currentRetry++; + await sleep(Duration.ofMillis(sleepInterval)); + } } + } catch (e) { + const message = `failed testing node client connection for network node: ${Object.keys(obj)[0]}, after ${maxRetries} retries: ${e.message}`; + this.logger.error(message, e); + throw new SoloError(message, e); } + if (currentRetry >= maxRetries) { throw new SoloError(`failed to ping network node: ${Object.keys(obj)[0]}, after ${maxRetries} retries`); } + + return; } /** - * Gets a Map of the Hedera node services and the attributes needed + * Gets a Map of the Hedera node services and the attributes needed, throws a SoloError if anything fails * @param namespace - the namespace of the solo network deployment * @returns a map of the network node services */ @@ -380,120 +415,127 @@ export class AccountManager { const serviceBuilderMap = new Map(); - const serviceList = await this.k8.kubeClient.listNamespacedService( - namespace, - undefined, - undefined, - undefined, - undefined, - labelSelector, - ); - - let nodeId = '0'; - // retrieve the list of services and build custom objects for the attributes we need - for (const service of serviceList.body.items) { - let serviceBuilder = new NetworkNodeServicesBuilder( - service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, + try { + const serviceList = await this.k8.kubeClient.listNamespacedService( + namespace, + undefined, + undefined, + undefined, + undefined, + labelSelector, ); - if (serviceBuilderMap.has(serviceBuilder.key())) { - serviceBuilder = serviceBuilderMap.get(serviceBuilder.key()) as NetworkNodeServicesBuilder; - } else { - serviceBuilder = new NetworkNodeServicesBuilder( + let nodeId = '0'; + // retrieve the list of services and build custom objects for the attributes we need + for (const service of serviceList.body.items) { + let serviceBuilder = new NetworkNodeServicesBuilder( service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, ); - serviceBuilder.withNamespace(namespace); - } - const serviceType = service.metadata.labels['solo.hedera.com/type']; - switch (serviceType) { - // solo.hedera.com/type: envoy-proxy-svc - case 'envoy-proxy-svc': - serviceBuilder - .withEnvoyProxyName(service.metadata!.name as string) - .withEnvoyProxyClusterIp(service.spec!.clusterIP as string) - .withEnvoyProxyLoadBalancerIp( - service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, - ) - .withEnvoyProxyGrpcWebPort(service.spec!.ports!.filter(port => port.name === 'hedera-grpc-web')[0].port); - break; - // solo.hedera.com/type: haproxy-svc - case 'haproxy-svc': - serviceBuilder - .withHaProxyAppSelector(service.spec!.selector!.app) - .withHaProxyName(service.metadata!.name as string) - .withHaProxyClusterIp(service.spec!.clusterIP as string) - // @ts-ignore - .withHaProxyLoadBalancerIp( - service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, - ) - .withHaProxyGrpcPort(service.spec!.ports!.filter(port => port.name === 'non-tls-grpc-client-port')[0].port) - .withHaProxyGrpcsPort(service.spec!.ports!.filter(port => port.name === 'tls-grpc-client-port')[0].port); - break; - // solo.hedera.com/type: network-node-svc - case 'network-node-svc': - if ( - service.metadata!.labels!['solo.hedera.com/node-id'] !== '' && - isNumeric(service.metadata!.labels!['solo.hedera.com/node-id']) - ) { - nodeId = service.metadata!.labels!['solo.hedera.com/node-id']; - } else { - nodeId = `${Templates.nodeIdFromNodeAlias(service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias)}`; - this.logger.warn( - `received an incorrect node id of ${service.metadata!.labels!['solo.hedera.com/node-id']} for ` + - `${service.metadata.labels['solo.hedera.com/node-name']}`, - ); - } + if (serviceBuilderMap.has(serviceBuilder.key())) { + serviceBuilder = serviceBuilderMap.get(serviceBuilder.key()) as NetworkNodeServicesBuilder; + } else { + serviceBuilder = new NetworkNodeServicesBuilder( + service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, + ); + serviceBuilder.withNamespace(namespace); + } - serviceBuilder - .withNodeId(nodeId) - .withAccountId(service.metadata!.labels!['solo.hedera.com/account-id']) - .withNodeServiceName(service.metadata!.name as string) - .withNodeServiceClusterIp(service.spec!.clusterIP as string) - .withNodeServiceLoadBalancerIp( - service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, - ) - .withNodeServiceGossipPort(service.spec!.ports!.filter(port => port.name === 'gossip')[0].port) - .withNodeServiceGrpcPort(service.spec!.ports!.filter(port => port.name === 'grpc-non-tls')[0].port) - .withNodeServiceGrpcsPort(service.spec!.ports!.filter(port => port.name === 'grpc-tls')[0].port); - break; + const serviceType = service.metadata.labels['solo.hedera.com/type']; + switch (serviceType) { + // solo.hedera.com/type: envoy-proxy-svc + case 'envoy-proxy-svc': + serviceBuilder + .withEnvoyProxyName(service.metadata!.name as string) + .withEnvoyProxyClusterIp(service.spec!.clusterIP as string) + .withEnvoyProxyLoadBalancerIp( + service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, + ) + .withEnvoyProxyGrpcWebPort(service.spec!.ports!.filter(port => port.name === 'hedera-grpc-web')[0].port); + break; + // solo.hedera.com/type: haproxy-svc + case 'haproxy-svc': + serviceBuilder + .withHaProxyAppSelector(service.spec!.selector!.app) + .withHaProxyName(service.metadata!.name as string) + .withHaProxyClusterIp(service.spec!.clusterIP as string) + // @ts-ignore + .withHaProxyLoadBalancerIp( + service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, + ) + .withHaProxyGrpcPort( + service.spec!.ports!.filter(port => port.name === 'non-tls-grpc-client-port')[0].port, + ) + .withHaProxyGrpcsPort(service.spec!.ports!.filter(port => port.name === 'tls-grpc-client-port')[0].port); + break; + // solo.hedera.com/type: network-node-svc + case 'network-node-svc': + if ( + service.metadata!.labels!['solo.hedera.com/node-id'] !== '' && + isNumeric(service.metadata!.labels!['solo.hedera.com/node-id']) + ) { + nodeId = service.metadata!.labels!['solo.hedera.com/node-id']; + } else { + nodeId = `${Templates.nodeIdFromNodeAlias(service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias)}`; + this.logger.warn( + `received an incorrect node id of ${service.metadata!.labels!['solo.hedera.com/node-id']} for ` + + `${service.metadata.labels['solo.hedera.com/node-name']}`, + ); + } + + serviceBuilder + .withNodeId(nodeId) + .withAccountId(service.metadata!.labels!['solo.hedera.com/account-id']) + .withNodeServiceName(service.metadata!.name as string) + .withNodeServiceClusterIp(service.spec!.clusterIP as string) + .withNodeServiceLoadBalancerIp( + service.status.loadBalancer.ingress ? service.status.loadBalancer.ingress[0].ip : undefined, + ) + .withNodeServiceGossipPort(service.spec!.ports!.filter(port => port.name === 'gossip')[0].port) + .withNodeServiceGrpcPort(service.spec!.ports!.filter(port => port.name === 'grpc-non-tls')[0].port) + .withNodeServiceGrpcsPort(service.spec!.ports!.filter(port => port.name === 'grpc-tls')[0].port); + break; + } + serviceBuilderMap.set(serviceBuilder.key(), serviceBuilder); } - serviceBuilderMap.set(serviceBuilder.key(), serviceBuilder); - } - // get the pod name for the service to use with portForward if needed - for (const serviceBuilder of serviceBuilderMap.values()) { - const podList = await this.k8.kubeClient.listNamespacedPod( - namespace, - undefined, - undefined, - undefined, - undefined, - `app=${serviceBuilder.haProxyAppSelector}`, - ); - serviceBuilder.withHaProxyPodName(podList.body!.items[0].metadata.name as PodName); - } + // get the pod name for the service to use with portForward if needed + for (const serviceBuilder of serviceBuilderMap.values()) { + const podList = await this.k8.kubeClient.listNamespacedPod( + namespace, + undefined, + undefined, + undefined, + undefined, + `app=${serviceBuilder.haProxyAppSelector}`, + ); + serviceBuilder.withHaProxyPodName(podList.body!.items[0].metadata.name as PodName); + } - // get the pod name of the network node - const pods = await this.k8.getPodsByLabel(['solo.hedera.com/type=network-node']); - for (const pod of pods) { - // eslint-disable-next-line no-prototype-builtins - if (!pod.metadata?.labels?.hasOwnProperty('solo.hedera.com/node-name')) { - // TODO Review why this fixes issue - continue; + // get the pod name of the network node + const pods = await this.k8.getPodsByLabel(['solo.hedera.com/type=network-node']); + for (const pod of pods) { + // eslint-disable-next-line no-prototype-builtins + if (!pod.metadata?.labels?.hasOwnProperty('solo.hedera.com/node-name')) { + // TODO Review why this fixes issue + continue; + } + const podName = pod.metadata!.name; + const nodeAlias = pod.metadata!.labels!['solo.hedera.com/node-name'] as NodeAlias; + const serviceBuilder = serviceBuilderMap.get(nodeAlias) as NetworkNodeServicesBuilder; + serviceBuilder.withNodePodName(podName as PodName); } - const podName = pod.metadata!.name; - const nodeAlias = pod.metadata!.labels!['solo.hedera.com/node-name'] as NodeAlias; - const serviceBuilder = serviceBuilderMap.get(nodeAlias) as NetworkNodeServicesBuilder; - serviceBuilder.withNodePodName(podName as PodName); - } - const serviceMap = new Map(); - for (const networkNodeServicesBuilder of serviceBuilderMap.values()) { - serviceMap.set(networkNodeServicesBuilder.key(), networkNodeServicesBuilder.build()); - } + const serviceMap = new Map(); + for (const networkNodeServicesBuilder of serviceBuilderMap.values()) { + serviceMap.set(networkNodeServicesBuilder.key(), networkNodeServicesBuilder.build()); + } - return serviceMap; + this.logger.debug('node services have been loaded'); + return serviceMap; + } catch (e) { + throw new SoloError(`failed to get node services: ${e.message}`, e); + } } /** @@ -872,20 +914,37 @@ export class AccountManager { } /** - * Pings the network node with a grpc call to ensure it is working + * Pings the network node with a grpc call to ensure it is working, throws a SoloError if the ping fails * @param obj - the network node object where the key is the network endpoint and the value is the account id * @param accountId - the account id to ping * @throws {@link SoloError} if the ping fails * @private */ private async pingNetworkNode(obj: Record, accountId: AccountId) { - const nodeClient = Client.fromConfig({network: obj, scheduleNetworkUpdate: false}); + let nodeClient: Client; try { - await nodeClient.ping(accountId); - } catch (e: Error | any) { + nodeClient = Client.fromConfig({network: obj, scheduleNetworkUpdate: false}); + this.logger.debug(`pinging network node: ${Object.keys(obj)[0]}`); + try { + await nodeClient.ping(accountId); + this.logger.debug(`ping successful for network node: ${Object.keys(obj)[0]}`); + } catch (e) { + const message = `failed to ping network node: ${Object.keys(obj)[0]} ${e.message}`; + this.logger.error(message, e); + throw new SoloError(message, e); + } + + return; + } catch (e) { throw new SoloError(`failed to ping network node: ${Object.keys(obj)[0]} ${e.message}`, e); } finally { - nodeClient.close(); + if (nodeClient) { + try { + nodeClient.close(); + } catch { + // continue if nodeClient.close() fails + } + } } } } diff --git a/src/core/helm.ts b/src/core/helm.ts index 25ed89962..889b966e7 100644 --- a/src/core/helm.ts +++ b/src/core/helm.ts @@ -49,7 +49,7 @@ export class Helm extends ShellRunner { * @returns console output as an array of strings */ install(...args: string[]) { - return this.run(this.prepareCommand('install', ...args)); + return this.run(this.prepareCommand('install', ...args), true); } /** @@ -58,7 +58,7 @@ export class Helm extends ShellRunner { * @returns console output as an array of strings */ uninstall(...args: string[]) { - return this.run(this.prepareCommand('uninstall', ...args)); + return this.run(this.prepareCommand('uninstall', ...args), true); } /** @@ -67,7 +67,7 @@ export class Helm extends ShellRunner { * @returns console output as an array of strings */ upgrade(...args: string[]) { - return this.run(this.prepareCommand('upgrade', ...args)); + return this.run(this.prepareCommand('upgrade', ...args), true); } /** diff --git a/src/core/k8.ts b/src/core/k8.ts index d2ee3694a..ec58f62ba 100644 --- a/src/core/k8.ts +++ b/src/core/k8.ts @@ -481,7 +481,7 @@ export class K8 { localContext.errorMessage = localContext.errorMessage ? `${localContext.errorMessage}:${errorMessage}` : errorMessage; - this.logger.error(errorMessage); + this.logger.warn(errorMessage); return localContext.reject(new SoloError(localContext.errorMessage)); } @@ -618,7 +618,6 @@ export class K8 { ({status}) => self.handleCallback(status, localContext, messagePrefix), ) .then(conn => { - self.logger.info(`${messagePrefix} connection established`); localContext.connection = conn; self.registerConnectionOnError(localContext, messagePrefix, conn); @@ -724,7 +723,6 @@ export class K8 { self.registerOutputFileStreamOnDrain(localContext, messagePrefix, outputPassthroughStream, outputFileStream); - self.logger.debug(`${messagePrefix} running...`); execInstance .exec( namespace, @@ -744,7 +742,6 @@ export class K8 { }, ) .then(conn => { - self.logger.debug(`${messagePrefix} connection established`); localContext.connection = conn; conn.on('error', e => { @@ -839,7 +836,6 @@ export class K8 { self.registerOutputFileStreamOnDrain(localContext, messagePrefix, outputPassthroughStream, outputFileStream); - self.logger.debug(`${messagePrefix} running...`); execInstance .exec( namespace, @@ -853,7 +849,6 @@ export class K8 { ({status}) => self.handleCallback(status, localContext, messagePrefix), ) .then(conn => { - self.logger.debug(`${messagePrefix} connection established`); localContext.connection = conn; self.registerConnectionOnError(localContext, messagePrefix, conn); @@ -891,17 +886,24 @@ export class K8 { * -> localhost:localPort -> port-forward-tunnel -> kubernetes-pod:targetPort */ async portForward(podName: PodName, localPort: number, podPort: number) { - const ns = this._getNamespace(); - const forwarder = new k8s.PortForward(this.kubeConfig, false); - const server = (await net.createServer(socket => { - forwarder.portForward(ns, podName, [podPort], socket, null, socket, 3); - })) as ExtendedNetServer; - - // add info for logging - server.info = `${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`; - server.localPort = localPort; - this.logger.debug(`Starting port-forwarder [${server.info}]`); - return server.listen(localPort, constants.LOCAL_HOST); + try { + this.logger.debug(`Creating port-forwarder for ${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`); + const ns = this._getNamespace(); + const forwarder = new k8s.PortForward(this.kubeConfig, false); + const server = (await net.createServer(socket => { + forwarder.portForward(ns, podName, [podPort], socket, null, socket, 3); + })) as ExtendedNetServer; + + // add info for logging + server.info = `${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`; + server.localPort = localPort; + this.logger.debug(`Starting port-forwarder [${server.info}]`); + return server.listen(localPort, constants.LOCAL_HOST); + } catch (e) { + const message = `failed to start port-forwarder [${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}]: ${e.message}`; + this.logger.error(message, e); + throw new SoloError(message, e); + } } /** @@ -1012,7 +1014,7 @@ export class K8 { const ns = this._getNamespace(); const labelSelector = labels.join(','); - this.logger.debug(`WaitForPod [labelSelector: ${labelSelector}, namespace:${ns}, maxAttempts: ${maxAttempts}]`); + this.logger.info(`WaitForPod [labelSelector: ${labelSelector}, namespace:${ns}, maxAttempts: ${maxAttempts}]`); return new Promise((resolve, reject) => { let attempts = 0; @@ -1113,7 +1115,7 @@ export class K8 { const condType = entry[0]; const condStatus = entry[1]; if (cond.type === condType && cond.status === condStatus) { - this.logger.debug( + this.logger.info( `Pod condition met for ${pod.metadata?.name} [type: ${cond.type} status: ${cond.status}]`, ); return true; diff --git a/src/core/logging.ts b/src/core/logging.ts index eb4358278..86ed86a66 100644 --- a/src/core/logging.ts +++ b/src/core/logging.ts @@ -95,6 +95,7 @@ export class SoloLogger { showUser(msg: any, ...args: any) { console.log(util.format(msg, ...args)); + this.info(util.format(msg, ...args)); } showUserError(err: Error | any) { diff --git a/src/core/platform_installer.ts b/src/core/platform_installer.ts index 6d20cc2b6..410011bae 100644 --- a/src/core/platform_installer.ts +++ b/src/core/platform_installer.ts @@ -32,6 +32,7 @@ import {Duration} from './time/duration.js'; import {sleep} from './helpers.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; +import {HEDERA_HGCAPP_DIR} from './constants.js'; /** PlatformInstaller install platform code in the root-container of a network pod */ @injectable() @@ -265,7 +266,7 @@ export class PlatformInstaller { if (!podName) throw new MissingArgumentError('podName is required'); try { - const destPaths = [constants.HEDERA_HAPI_PATH]; + const destPaths = [constants.HEDERA_HAPI_PATH, constants.HEDERA_HGCAPP_DIR]; for (const destPath of destPaths) { await self.setPathPermission(podName, destPath); diff --git a/src/core/shell_runner.ts b/src/core/shell_runner.ts index 49c9c12d4..a8187a7c8 100644 --- a/src/core/shell_runner.ts +++ b/src/core/shell_runner.ts @@ -30,7 +30,7 @@ export class ShellRunner { run(cmd: string, verbose = false) { const self = this; const callStack = new Error().stack; // capture the callstack to be included in error - self.logger.debug(`Executing command: '${cmd}'`); + self.logger.info(`Executing command: '${cmd}'`); return new Promise((resolve, reject) => { const child = spawn(cmd, { diff --git a/src/core/yargs_command.ts b/src/core/yargs_command.ts index 9fbfca01e..aee8d083e 100644 --- a/src/core/yargs_command.ts +++ b/src/core/yargs_command.ts @@ -63,11 +63,11 @@ export class YargsCommand { desc: description, builder: (y: any) => commandFlags.setCommandFlags(y, ...allFlags), handler: (argv: any) => { - commandDef.logger.debug(`==== Running '${commandNamespace} ${command}' ===`); - commandDef.logger.debug(argv); + commandDef.logger.info(`==== Running '${commandNamespace} ${command}' ===`); + commandDef.logger.info(argv); commandDef.handlers[handler](argv) .then((r: any) => { - commandDef.logger.debug(`==== Finished running '${commandNamespace} ${command}' ====`); + commandDef.logger.info(`==== Finished running '${commandNamespace} ${command}' ====`); if (!r) process.exit(1); }) .catch((err: Error | any) => { diff --git a/test/unit/core/shell_runner.test.ts b/test/unit/core/shell_runner.test.ts index 98cc4a784..2f1554036 100644 --- a/test/unit/core/shell_runner.test.ts +++ b/test/unit/core/shell_runner.test.ts @@ -27,13 +27,18 @@ import {Readable} from 'stream'; import {Duration} from '../../../src/core/time/duration.js'; describe('ShellRunner', () => { - let shellRunner: ShellRunner, loggerStub: SinonStub, childProcessSpy: SinonSpy, readableSpy: SinonSpy; + let shellRunner: ShellRunner, + loggerDebugStub: SinonStub, + loggerInfoStub: SinonStub, + childProcessSpy: SinonSpy, + readableSpy: SinonSpy; beforeEach(() => { shellRunner = new ShellRunner(); // Spy on methods - loggerStub = sinon.stub(SoloLogger.prototype, 'debug'); + loggerDebugStub = sinon.stub(SoloLogger.prototype, 'debug'); + loggerInfoStub = sinon.stub(SoloLogger.prototype, 'info'); childProcessSpy = sinon.spy(ChildProcess.prototype, 'on'); readableSpy = sinon.spy(Readable.prototype, 'on'); }); @@ -43,10 +48,11 @@ describe('ShellRunner', () => { it('should run command', async () => { await shellRunner.run('ls -l'); - loggerStub.withArgs("Executing command: 'ls -l'").onFirstCall(); - loggerStub.withArgs("Finished executing: 'ls -l'", sinon.match.any).onSecondCall(); + loggerInfoStub.withArgs("Executing command: 'ls -l'").onFirstCall(); + loggerDebugStub.withArgs("Finished executing: 'ls -l'", sinon.match.any).onFirstCall(); - expect(loggerStub).to.have.been.calledTwice; + expect(loggerDebugStub).to.have.been.calledOnce; + expect(loggerInfoStub).to.have.been.calledOnce; expect(readableSpy).to.have.been.calledWith('data', sinon.match.any); expect(childProcessSpy).to.have.been.calledWith('exit', sinon.match.any); diff --git a/version.ts b/version.ts index 345f7cdb4..8a266459b 100644 --- a/version.ts +++ b/version.ts @@ -20,7 +20,7 @@ */ export const HELM_VERSION = 'v3.14.2'; -export const SOLO_CHART_VERSION = '0.42.2'; +export const SOLO_CHART_VERSION = '0.42.4'; export const HEDERA_PLATFORM_VERSION = 'v0.58.1'; export const MIRROR_NODE_VERSION = '0.118.1'; export const HEDERA_EXPLORER_VERSION = '0.2.1';