diff --git a/.github/workflows/agent-docker-image-pr.yml b/.github/workflows/agent-docker-image-pr.yml index 604395ef..2f3c6a87 100644 --- a/.github/workflows/agent-docker-image-pr.yml +++ b/.github/workflows/agent-docker-image-pr.yml @@ -21,11 +21,11 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - + - name: Set up QEMU uses: docker/setup-qemu-action@v2 - + - uses: docker/setup-buildx-action@v1 name: Set up Docker Buildx @@ -36,7 +36,7 @@ jobs: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Build and push on PR uses: docker/build-push-action@v4 @@ -49,4 +49,4 @@ jobs: labels: ${{ steps.meta.outputs.labels }} build-args: | "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" - + diff --git a/.github/workflows/agent-docker-image.yml b/.github/workflows/agent-docker-image.yml index 8641180e..6173b07e 100644 --- a/.github/workflows/agent-docker-image.yml +++ b/.github/workflows/agent-docker-image.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout GitHub Action uses: actions/checkout@v3 - + - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v2 @@ -60,7 +60,7 @@ jobs: ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }}, ${{ env.REGISTRY }}/${{ env.IMAGE }}:latest labels: ${{ steps.metadata.outputs.labels }} - + push: true - name: Install cosign @@ -71,12 +71,12 @@ jobs: cosign sign -y ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} env: COSIGN_EXPERIMENTAL: 1 - + - name: Verify the pushed tags run: cosign verify ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/agent-docker-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com env: COSIGN_EXPERIMENTAL: 1 - + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph uses: aquasecurity/trivy-action@master with: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 08927933..acbc3392 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -48,11 +48,11 @@ jobs: # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. - + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality - + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild @@ -61,7 +61,7 @@ jobs: # ℹī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - # If the Autobuild fails above, remove it and uncomment the following three lines. + # If the Autobuild fails above, remove it and uncomment the following three lines. # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # - run: | diff --git a/.github/workflows/config-worker-docker-image-pr.yml b/.github/workflows/config-worker-docker-image-pr.yml index 3cc855e6..54513755 100644 --- a/.github/workflows/config-worker-docker-image-pr.yml +++ b/.github/workflows/config-worker-docker-image-pr.yml @@ -51,4 +51,4 @@ jobs: "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" - + diff --git a/.github/workflows/config-worker-docker-image.yml b/.github/workflows/config-worker-docker-image.yml index eabe2d7c..d1ce0dd6 100644 --- a/.github/workflows/config-worker-docker-image.yml +++ b/.github/workflows/config-worker-docker-image.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout GitHub Action uses: actions/checkout@v3 - + - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v2 @@ -59,7 +59,7 @@ jobs: ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }}, ${{ env.REGISTRY }}/${{ env.IMAGE }}:latest labels: ${{ steps.metadata.outputs.labels }} - + push: true - name: Install cosign @@ -70,12 +70,12 @@ jobs: cosign sign -y ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} env: COSIGN_EXPERIMENTAL: 1 - + - name: Verify the pushed tags run: cosign verify ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/config-worker-docker-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com env: COSIGN_EXPERIMENTAL: 1 - + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph uses: aquasecurity/trivy-action@master with: diff --git a/.github/workflows/deployment-worker-docker-image-pr.yml b/.github/workflows/deployment-worker-docker-image-pr.yml index ad5086f2..9af227b0 100644 --- a/.github/workflows/deployment-worker-docker-image-pr.yml +++ b/.github/workflows/deployment-worker-docker-image-pr.yml @@ -36,7 +36,7 @@ jobs: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Build and push on PR uses: docker/build-push-action@v4 @@ -51,4 +51,4 @@ jobs: "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" - + diff --git a/.github/workflows/deployment-worker-docker-image.yml b/.github/workflows/deployment-worker-docker-image.yml index b322ad0a..64b37fc1 100644 --- a/.github/workflows/deployment-worker-docker-image.yml +++ b/.github/workflows/deployment-worker-docker-image.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout GitHub Action uses: actions/checkout@v3 - + - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v2 @@ -59,7 +59,7 @@ jobs: ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }}, ${{ env.REGISTRY }}/${{ env.IMAGE }}:latest labels: ${{ steps.metadata.outputs.labels }} - + push: true - name: Install cosign @@ -70,12 +70,12 @@ jobs: cosign sign -y ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} env: COSIGN_EXPERIMENTAL: 1 - + - name: Verify the pushed tags run: cosign verify ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/deployment-worker-docker-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com env: COSIGN_EXPERIMENTAL: 1 - + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph uses: aquasecurity/trivy-action@master with: diff --git a/.github/workflows/server-docker-image-pr.yml b/.github/workflows/server-docker-image-pr.yml index 905d4e82..89c7481d 100644 --- a/.github/workflows/server-docker-image-pr.yml +++ b/.github/workflows/server-docker-image-pr.yml @@ -4,7 +4,7 @@ on: pull_request: branches: - 'main' - + env: # Use docker.io for Docker Hub if empty REGISTRY: ghcr.io @@ -21,11 +21,11 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - + - name: Set up QEMU uses: docker/setup-qemu-action@v2 - + - uses: docker/setup-buildx-action@v1 name: Set up Docker Buildx @@ -36,7 +36,7 @@ jobs: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - + - name: Build and push on PR uses: docker/build-push-action@v4 @@ -51,4 +51,4 @@ jobs: "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" - + diff --git a/.github/workflows/server-docker-image.yml b/.github/workflows/server-docker-image.yml index 60cf0e79..b910b965 100644 --- a/.github/workflows/server-docker-image.yml +++ b/.github/workflows/server-docker-image.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Checkout GitHub Action uses: actions/checkout@v3 - + - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v2 @@ -59,7 +59,7 @@ jobs: ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }}, ${{ env.REGISTRY }}/${{ env.IMAGE }}:latest labels: ${{ steps.metadata.outputs.labels }} - + push: true - name: Install cosign @@ -70,12 +70,12 @@ jobs: cosign sign -y ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} env: COSIGN_EXPERIMENTAL: 1 - + - name: Verify the pushed tags run: cosign verify ${{ env.REGISTRY }}/${{ env.IMAGE }}:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/server-docker-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com env: COSIGN_EXPERIMENTAL: 1 - + - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph uses: aquasecurity/trivy-action@master with: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..95aa9da9 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,123 @@ +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + # Checks for files that contain merge conflict strings. + - id: check-merge-conflict + # Detects aws credentials from the aws cli credentials file. + - id: detect-aws-credentials + args: [--allow-missing-credentials] + # detects the presence of private keys. + - id: detect-private-key + # Trims trailing whitespace in codebase. + - id: trailing-whitespace + # Protect commit to main branch + - id: no-commit-to-branch + args: [--branch,main] + + +# Check is the Commit is Signed off using `--signoff/-s` +- repo: https://github.com/KAUTH/pre-commit-git-checks + rev: v0.0.1 # Use the SHA or tag you want to point to + hooks: + - id: git-signoff + stages: [commit-msg] + +# Checks your git commit messages for style. +- repo: https://github.com/jorisroovers/gitlint + rev: v0.19.1 + hooks: + - id: gitlint + name: Scan Commit messages + +# Detects hardcoded secrets, security vulnerabilities and policy breaks using GGShield +- repo: https://github.com/zricethezav/gitleaks + rev: v8.18.1 + hooks: + - id: gitleaks + name: Detect hardcoded secrets + description: Detect hardcoded secrets using Gitleaks + entry: gitleaks protect --verbose --redact --staged + language: golang + pass_filenames: false + +- repo: https://github.com/Bahjat/pre-commit-golang + rev: v1.0.3 + hooks: + # Formats Go code + # - id: gofumpt # requires gofumpt to be installed from github.com/mvdan/gofumpt + # name: Go formatter + # description: Runs a strict Go formatter + - id: go-fmt-import + name: Go formatter + description: Go formatter with fmt and imports + # Runs Unit tests + - id: go-unit-tests + name: Run Unit tests + desription: Runs all the unit tests in the repo + # Runs static analysis of the Go code + - id: go-static-check + name: Go Static Check + description: Finds bugs and performance issues + +# Local hooks + +- repo: https://github.com/intelops/gitrepos-templates-policies + rev: v0.0.1 + hooks: + - id: check-devcontainer + name: Check devcontainer + description: Checks for existance of .devcontainer.json in the project + - id: check-gitsign + name: Check gitsign + description: Check if the last commit is signed with Sigstore gitsign + # - id: check-multistage-dockerfile + # name: Check multi-stage Dockerfile + # description: Check the existance of Dockerfile in the project and verify that its a multi-stage Dockerfile + +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: check-yaml + name: Verify YAML syntax + args: + - --allow-multiple-documents +- repo: https://github.com/hadolint/hadolint + rev: v2.12.0 + hooks: + - id: hadolint + # Rules you want to ignore may be found here: https://github.com/hadolint/hadolint?tab=readme-ov-file#rules + name: Dockerfile linter + description: Dockerfile linter following best-practices + args: [--ignore, DL3051] + +- repo: local + hooks: + - name: Check Dockerfile + id: check-dockerfile-sh + entry: bash + args: + - -c + - | + check_dockerfile() { + if [[ $1 == *"Dockerfile"* ]]; then + base_image=$(grep '^FROM' "$1" | awk '{print $2}') + if [[ $base_image != golang:* ]]; then + echo "Error: Base image in $1 is not from cgr.dev/chianguard" + return 1 + fi + fi + return 0 + } + + export -f check_dockerfile + + if find . -type f -exec bash -c 'check_dockerfile "$0"' {} \; | grep -q 'Error'; then + echo "Commit failed due to non-compliant Dockerfile(s)." + exit 1 + fi + + echo "All Dockerfiles are compliant." + exit 0 + language: system + pass_filenames: false \ No newline at end of file diff --git a/Makefile b/Makefile index d5d25ca5..69a79de2 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ gen-protoc: cd proto && protoc --go_out=../server/pkg/pb/agentpb --go_opt=paths=source_relative \ --go-grpc_out=../server/pkg/pb/agentpb --go-grpc_opt=paths=source_relative \ ./agent.proto - + cd proto && protoc --go_out=../capten/common-pkg/agentpb --go_opt=paths=source_relative \ --go-grpc_out=../capten/common-pkg/agentpb --go-grpc_opt=paths=source_relative \ ./agent.proto diff --git a/Notes.md b/Notes.md index 8d2fd749..86ea6ab5 100644 --- a/Notes.md +++ b/Notes.md @@ -97,7 +97,7 @@ git clone https://github.com/temporalio/helm-charts.git helm dependencies update # Install temporal -helm install --set server.replicaCount=1 --set cassandra.config.cluster_size=1 --set prometheus.enabled=false --set grafana.enabled=false --set elasticsearch.enabled=false temporal . --timeout 8m +helm install --set server.replicaCount=1 --set cassandra.config.cluster_size=1 --set prometheus.enabled=false --set grafana.enabled=false --set elasticsearch.enabled=false temporal . --timeout 8m ``` ### Delete temporal @@ -150,7 +150,7 @@ kubectl -n default get secret argocd-initial-admin-secret -o jsonpath="{.data.pa ### Add below env parameters to kad-deployment-worker deployment according to argocd deployment ``` -kubectl edit deployments.apps kad-deployment-worker +kubectl edit deployments.apps kad-deployment-worker - name: ARGOCD_SERVICE_URL value: {{ .Values.argocd.serviceURL }} @@ -161,13 +161,13 @@ kubectl edit deployments.apps kad-deployment-worker ### Create helloworld app ``` -curl -X POST http://127.0.0.1:9091/deploy -H "content-type: application/json" -d @tests/hello-world-argocd-plugin.json +curl -X POST http://127.0.0.1:9091/deploy -H "content-type: application/json" -d @tests/hello-world-argocd-plugin.json ``` ### Delete helloworld app ``` -curl -X POST http://127.0.0.1:9091/deploy -H "content-type: application/json" -d @tests/hello-world-argocd-plugin-delete.json +curl -X POST http://127.0.0.1:9091/deploy -H "content-type: application/json" -d @tests/hello-world-argocd-plugin-delete.json ``` diff --git a/README.md b/README.md index f57b810b..2b123b25 100644 --- a/README.md +++ b/README.md @@ -6,3 +6,6 @@ Universal **Integrator** - Framework to easily integrate with other tools/platforms to use their APIs, gRPC, DB, Workflows, etc. and also to develop workflows around them. This framework development is based on Temporal and NATS. > name -Kad is Haitian Creole word, translates to framework. + + +############################ diff --git a/capten/common-pkg/cassandra/db-migrate/tests/config/cassandra.yaml b/capten/common-pkg/cassandra/db-migrate/tests/config/cassandra.yaml index d6801a2a..21a20018 100644 --- a/capten/common-pkg/cassandra/db-migrate/tests/config/cassandra.yaml +++ b/capten/common-pkg/cassandra/db-migrate/tests/config/cassandra.yaml @@ -20,7 +20,7 @@ cluster_name: 'Test Cluster' # Specifying initial_token will override this setting on the node's initial start, # on subsequent starts, this setting will apply even if initial token is set. # -# If you already have a cluster with 1 token per node, and wish to migrate to +# If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations num_tokens: 256 @@ -36,8 +36,8 @@ num_tokens: 256 # allocate_tokens_for_keyspace: KEYSPACE # initial_token allows you to specify tokens manually. While you can use it with -# vnodes (num_tokens > 1, above) -- in which case you should provide a -# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters # that do not have vnodes enabled. # initial_token: @@ -367,8 +367,8 @@ counter_cache_save_period: 7200 # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. saved_caches_directory: /var/lib/cassandra/saved_caches -# commitlog_sync may be either "periodic" or "batch." -# +# commitlog_sync may be either "periodic" or "batch." +# # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. @@ -414,7 +414,7 @@ commitlog_segment_size_in_mb: 32 # any class that implements the SeedProvider interface and has a # constructor that takes a Map of parameters will do. seed_provider: - # Addresses of hosts that are deemed contact points. + # Addresses of hosts that are deemed contact points. # Cassandra nodes use this list of hosts to find each other and learn # the topology of the ring. You must change this if you are running # multiple nodes! @@ -768,7 +768,7 @@ incremental_backups: false snapshot_before_compaction: false # Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true +# or dropping of column families. The STRONGLY advised default of true # should be used to provide data safety. If you set this flag to false, you will # lose data on truncation or drop. auto_snapshot: true @@ -805,7 +805,7 @@ column_index_cache_size_in_kb: 2 # # concurrent_compactors defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. -# +# # If your data directories are backed by SSD, you should increase this # to the number of cores. #concurrent_compactors: 1 @@ -820,7 +820,7 @@ compaction_throughput_mb_per_sec: 16 # When compacting, the replacement sstable(s) can be opened before they # are completely written, and used in place of the prior sstables for -# any range that has been written. This helps to smoothly transfer reads +# any range that has been written. This helps to smoothly transfer reads # between the sstables, reducing page cache churn and keeping hot rows hot sstable_preemptive_open_interval_in_mb: 50 @@ -864,7 +864,7 @@ slow_query_log_timeout_in_ms: 500 # Enable operation timeout information exchange between nodes to accurately # measure request timeouts. If disabled, replicas will assume that requests # were forwarded to them instantly by the coordinator, which means that -# under overload conditions we will waste that much extra time processing +# under overload conditions we will waste that much extra time processing # already-timed-out requests. # # Warning: before enabling this property make sure to ntp is installed @@ -950,7 +950,7 @@ endpoint_snitch: SimpleSnitch # controls how often to perform the more expensive part of host score # calculation -dynamic_snitch_update_interval_in_ms: 100 +dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 @@ -984,7 +984,7 @@ request_scheduler: org.apache.cassandra.scheduler.NoScheduler # RoundRobin # throttle_limit # The throttle_limit is the number of in-flight -# requests per client. Requests beyond +# requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of @@ -1130,9 +1130,9 @@ transparent_data_encryption_options: key_alias: testing:1 # CBC IV length for AES needs to be 16 bytes (which is also the default size) # iv_length: 16 - key_provider: + key_provider: - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: + parameters: - keystore: conf/.keystore keystore_password: cassandra store_type: JCEKS diff --git a/capten/common-pkg/plugins/argocd/README.md b/capten/common-pkg/plugins/argocd/README.md index c2d3a691..6a3d3a81 100644 --- a/capten/common-pkg/plugins/argocd/README.md +++ b/capten/common-pkg/plugins/argocd/README.md @@ -43,7 +43,7 @@ Sec-Fetch-Site: same-origin {"apiVersion":"argoproj.io/v1alpha1","kind":"Application","metadata":{"name":"demo"},"spec":{"destination":{"name":"","namespace":"default","server":"https://kubernetes.default.svc"},"source":{"path":"./simple-app","repoURL":"https://github.com/Jasmine-Harit/gitops-certification-examples.git","targetRevision":"HEAD"},"project":"default"}} ``` -$ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKEN" +$ curl $ARGOCD_SERVER/api/v1/applications -H "Authorization: Bearer $ARGOCD_TOKEN" {"metadata":{"selfLink":"/apis/argoproj.io/v1alpha1/namespaces/argocd/applications","resourceVersion":"37755"},"items":...} diff --git a/capten/common-pkg/plugins/helm/go-helm-client/README.md b/capten/common-pkg/plugins/helm/go-helm-client/README.md index ec4384f4..58af0fd7 100644 --- a/capten/common-pkg/plugins/helm/go-helm-client/README.md +++ b/capten/common-pkg/plugins/helm/go-helm-client/README.md @@ -2,7 +2,7 @@ Go client library for accessing [Helm](https://github.com/helm/helm), enabling the user to programmatically change helm charts and releases. This library is build upon [`helm`](https://github.com/helm/helm) and available under the MIT License. - + ![Compile & Test](https://github.com/mittwald/go-helm-client/workflows/Compile%20&%20Test/badge.svg) [![GitHub license](https://img.shields.io/github/license/mittwald/go-helm-client.svg)](https://github.com/mittwald/go-helm-client/blob/master/LICENSE) [![Go Report Card](https://goreportcard.com/badge/github.com/mittwald/go-helm-client)](https://goreportcard.com/report/github.com/mittwald/go-helm-client) diff --git a/capten/common-pkg/postgres/db-migrate/postgres/docker-compose.yaml b/capten/common-pkg/postgres/db-migrate/postgres/docker-compose.yaml index 4824d0f9..09d44f13 100644 --- a/capten/common-pkg/postgres/db-migrate/postgres/docker-compose.yaml +++ b/capten/common-pkg/postgres/db-migrate/postgres/docker-compose.yaml @@ -18,7 +18,7 @@ services: POSTGRES_PASSWORD: example volumes: - pgdata:/var/lib/postgresql/data - + ports: - 5432:5432 diff --git a/capten/database/cassandra/config/cassandra.yaml b/capten/database/cassandra/config/cassandra.yaml index d6801a2a..21a20018 100644 --- a/capten/database/cassandra/config/cassandra.yaml +++ b/capten/database/cassandra/config/cassandra.yaml @@ -20,7 +20,7 @@ cluster_name: 'Test Cluster' # Specifying initial_token will override this setting on the node's initial start, # on subsequent starts, this setting will apply even if initial token is set. # -# If you already have a cluster with 1 token per node, and wish to migrate to +# If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations num_tokens: 256 @@ -36,8 +36,8 @@ num_tokens: 256 # allocate_tokens_for_keyspace: KEYSPACE # initial_token allows you to specify tokens manually. While you can use it with -# vnodes (num_tokens > 1, above) -- in which case you should provide a -# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters # that do not have vnodes enabled. # initial_token: @@ -367,8 +367,8 @@ counter_cache_save_period: 7200 # If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. saved_caches_directory: /var/lib/cassandra/saved_caches -# commitlog_sync may be either "periodic" or "batch." -# +# commitlog_sync may be either "periodic" or "batch." +# # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. @@ -414,7 +414,7 @@ commitlog_segment_size_in_mb: 32 # any class that implements the SeedProvider interface and has a # constructor that takes a Map of parameters will do. seed_provider: - # Addresses of hosts that are deemed contact points. + # Addresses of hosts that are deemed contact points. # Cassandra nodes use this list of hosts to find each other and learn # the topology of the ring. You must change this if you are running # multiple nodes! @@ -768,7 +768,7 @@ incremental_backups: false snapshot_before_compaction: false # Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true +# or dropping of column families. The STRONGLY advised default of true # should be used to provide data safety. If you set this flag to false, you will # lose data on truncation or drop. auto_snapshot: true @@ -805,7 +805,7 @@ column_index_cache_size_in_kb: 2 # # concurrent_compactors defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. -# +# # If your data directories are backed by SSD, you should increase this # to the number of cores. #concurrent_compactors: 1 @@ -820,7 +820,7 @@ compaction_throughput_mb_per_sec: 16 # When compacting, the replacement sstable(s) can be opened before they # are completely written, and used in place of the prior sstables for -# any range that has been written. This helps to smoothly transfer reads +# any range that has been written. This helps to smoothly transfer reads # between the sstables, reducing page cache churn and keeping hot rows hot sstable_preemptive_open_interval_in_mb: 50 @@ -864,7 +864,7 @@ slow_query_log_timeout_in_ms: 500 # Enable operation timeout information exchange between nodes to accurately # measure request timeouts. If disabled, replicas will assume that requests # were forwarded to them instantly by the coordinator, which means that -# under overload conditions we will waste that much extra time processing +# under overload conditions we will waste that much extra time processing # already-timed-out requests. # # Warning: before enabling this property make sure to ntp is installed @@ -950,7 +950,7 @@ endpoint_snitch: SimpleSnitch # controls how often to perform the more expensive part of host score # calculation -dynamic_snitch_update_interval_in_ms: 100 +dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 @@ -984,7 +984,7 @@ request_scheduler: org.apache.cassandra.scheduler.NoScheduler # RoundRobin # throttle_limit # The throttle_limit is the number of in-flight -# requests per client. Requests beyond +# requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of @@ -1130,9 +1130,9 @@ transparent_data_encryption_options: key_alias: testing:1 # CBC IV length for AES needs to be 16 bytes (which is also the default size) # iv_length: 16 - key_provider: + key_provider: - class_name: org.apache.cassandra.security.JKSKeyProvider - parameters: + parameters: - keystore: conf/.keystore keystore_password: cassandra store_type: JCEKS diff --git a/charts/kad/crossplane_plugin_config.json b/charts/kad/crossplane_plugin_config.json index 279e5878..87992e37 100644 --- a/charts/kad/crossplane_plugin_config.json +++ b/charts/kad/crossplane_plugin_config.json @@ -38,7 +38,7 @@ "secretPath": "generic/cosign/signer" } ] - }, + }, { "namespace": "ml-server", "secretName": "regcred-ghcr", diff --git a/charts/kad/tekton_plugin_config.json b/charts/kad/tekton_plugin_config.json index 5e94dd69..241b4e2e 100644 --- a/charts/kad/tekton_plugin_config.json +++ b/charts/kad/tekton_plugin_config.json @@ -12,5 +12,5 @@ "mainAppGitPath": "cicd/tekton/tekton-main-app.yaml", "synchApp": true } - ] + ] } diff --git a/proto/agent.proto b/proto/agent.proto index 8450f053..33ba4d80 100644 --- a/proto/agent.proto +++ b/proto/agent.proto @@ -35,7 +35,7 @@ enum StatusCode { NOT_FOUND = 3; } -message PingRequest { +message PingRequest { } message PingResponse { @@ -46,7 +46,7 @@ message StoreCredentialRequest { string credentialType = 1; string credEntityName = 2; string credIdentifier = 3; - map credential = 4; + map credential = 4; } message StoreCredentialResponse { @@ -112,7 +112,7 @@ message GetClusterAppLaunchesResponse { } message ConfigureAppSSORequest { - string releaseName = 1; + string releaseName = 1; string clientId = 2; string clientSecret = 3; string oAuthBaseURL = 4; @@ -120,7 +120,7 @@ message ConfigureAppSSORequest { message ConfigureAppSSOResponse { StatusCode status = 1; - string statusMessage = 2; + string statusMessage = 2; } message GetClusterAppConfigRequest { @@ -143,10 +143,10 @@ message GetClusterAppValuesResponse { AppValues values = 3; } -message GetClusterGlobalValuesRequest { +message GetClusterGlobalValuesRequest { } -message GetClusterGlobalValuesResponse { +message GetClusterGlobalValuesResponse { StatusCode status = 1; string statusMessage = 2; bytes globalValues = 3; @@ -215,7 +215,7 @@ message AppLaunchConfig { enum SecretAccess { READ = 0; - WRITE = 1; + WRITE = 1; } message SecretPathRef { @@ -223,13 +223,13 @@ message SecretPathRef { string SecretKey = 2; } -message ConfigureVaultSecretRequest { +message ConfigureVaultSecretRequest { string secretName = 1; string namespace = 2; repeated SecretPathRef SecretPathData = 3; } -message SecretPolicy { +message SecretPolicy { string secretPath = 1; SecretAccess access = 2; } @@ -239,20 +239,20 @@ message ConfigureVaultSecretResponse { string statusMessage = 2; } -message CreateVaultRoleRequest { +message CreateVaultRoleRequest { string managedClusterName = 1; string roleName = 2; repeated SecretPolicy secretPolicy = 3; repeated string namespaces = 4; - repeated string serviceAccounts = 5; + repeated string serviceAccounts = 5; } -message CreateVaultRoleResponse { +message CreateVaultRoleResponse { StatusCode status = 1; string statusMessage = 2; } -message UpdateVaultRoleRequest { +message UpdateVaultRoleRequest { string managedClusterName = 1; string roleName = 2; repeated SecretPolicy secretPolicy = 3; @@ -260,16 +260,16 @@ message UpdateVaultRoleRequest { repeated string serviceAccounts = 5; } -message UpdateVaultRoleResponse { +message UpdateVaultRoleResponse { StatusCode status = 1; string statusMessage = 2; } -message DeleteVaultRoleRequest { +message DeleteVaultRoleRequest { string roleName = 1; } -message DeleteVaultRoleResponse { +message DeleteVaultRoleResponse { StatusCode status = 1; string statusMessage = 2; } @@ -283,7 +283,7 @@ message DeployDefaultAppsResponse { string statusMessage = 2; } -message ApplicationStatus { +message ApplicationStatus { string appName = 1; string version = 2; string category = 3; @@ -291,7 +291,7 @@ message ApplicationStatus { string runtimeStatus = 5; } -message GetDefaultAppsStatusRequest { +message GetDefaultAppsStatusRequest { } message GetDefaultAppsStatusResponse { diff --git a/proto/capten_sdk.proto b/proto/capten_sdk.proto index a05838bd..df3add97 100644 --- a/proto/capten_sdk.proto +++ b/proto/capten_sdk.proto @@ -63,10 +63,9 @@ message DBSetupRequest { string dbName = 3; string serviceUserName = 4; } - + message DBSetupResponse { StatusCode status = 1; string statusMessage = 2; string vaultPath = 3; } - \ No newline at end of file diff --git a/proto/cluster_plugins.proto b/proto/cluster_plugins.proto index 4181c53e..8c19957b 100644 --- a/proto/cluster_plugins.proto +++ b/proto/cluster_plugins.proto @@ -21,7 +21,7 @@ enum StatusCode { enum StoreType { CENTRAL_CAPTEN_STORE = 0; - LOCAL_CAPTEN_STORE = 1; + LOCAL_CAPTEN_STORE = 1; } message Plugin { @@ -48,7 +48,7 @@ message DeployClusterPluginRequest { Plugin plugin = 1; } -message DeployClusterPluginResponse { +message DeployClusterPluginResponse { StatusCode status = 1; string statusMessage = 2; } @@ -58,7 +58,7 @@ message UnDeployClusterPluginRequest { string pluginName = 2; } -message UnDeployClusterPluginResponse { +message UnDeployClusterPluginResponse { StatusCode status = 1; string statusMessage = 2; } @@ -76,7 +76,7 @@ message ClusterPlugin { message GetClusterPluginsRequest { } -message GetClusterPluginsResponse { +message GetClusterPluginsResponse { StatusCode status = 1; string statusMessage = 2; repeated ClusterPlugin plugins = 3; diff --git a/proto/plugin_store.proto b/proto/plugin_store.proto index b5cbdc06..d6860864 100644 --- a/proto/plugin_store.proto +++ b/proto/plugin_store.proto @@ -55,7 +55,7 @@ message PluginData { } message ConfigurePluginStoreRequest { - PluginStoreConfig config = 1; + PluginStoreConfig config = 1; } message ConfigurePluginStoreResponse { diff --git a/proto/server.proto b/proto/server.proto index 6b664fdf..8d13ef11 100644 --- a/proto/server.proto +++ b/proto/server.proto @@ -6,16 +6,16 @@ option go_package = "/serverpb"; package serverpb; service Server { - rpc NewClusterRegistration (NewClusterRegistrationRequest) returns (NewClusterRegistrationResponse) {} - rpc UpdateClusterRegistration (UpdateClusterRegistrationRequest) returns (UpdateClusterRegistrationResponse) {} + rpc NewClusterRegistration (NewClusterRegistrationRequest) returns (NewClusterRegistrationResponse) {} + rpc UpdateClusterRegistration (UpdateClusterRegistrationRequest) returns (UpdateClusterRegistrationResponse) {} rpc DeleteClusterRegistration (DeleteClusterRegistrationRequest) returns (DeleteClusterRegistrationResponse) {} - + rpc GetClusters (GetClustersRequest) returns (GetClustersResponse) {} - rpc GetCluster (GetClusterRequest) returns (GetClusterResponse) {} + rpc GetCluster (GetClusterRequest) returns (GetClusterResponse) {} rpc GetClusterApps (GetClusterAppsRequest) returns (GetClusterAppsResponse) {} rpc GetClusterAppLaunchConfigs (GetClusterAppLaunchConfigsRequest) returns (GetClusterAppLaunchConfigsResponse) {} rpc GetClusterApp (GetClusterAppRequest) returns (GetClusterAppResponse) {} - rpc GetClusterDetails (GetClusterDetailsRequest) returns (GetClusterDetailsResponse) {} + rpc GetClusterDetails (GetClusterDetailsRequest) returns (GetClusterDetailsResponse) {} rpc AddStoreApp (AddStoreAppRequest) returns (AddStoreAppResponse) {} rpc UpdateStoreApp (UpdateStoreAppRequest) returns (UpdateStoreAppRsponse){} @@ -63,7 +63,7 @@ message UpdateClusterRegistrationResponse { } message DeleteClusterRegistrationRequest { - string clusterID = 1; + string clusterID = 1; } message DeleteClusterRegistrationResponse { @@ -77,7 +77,7 @@ message GetClustersRequest { message GetClustersResponse { StatusCode status = 1; string statusMessage = 2; - repeated ClusterInfo data = 3; + repeated ClusterInfo data = 3; } message GetClusterDetailsRequest { @@ -86,12 +86,12 @@ message GetClusterDetailsRequest { message GetClusterDetailsResponse { StatusCode status = 1; string statusMessage = 2; - ClusterInfo data = 3; + ClusterInfo data = 3; } message GetClusterAppsRequest { - string clusterID = 1; + string clusterID = 1; } message GetClusterAppsResponse { @@ -108,7 +108,7 @@ message GetClusterAppRequest { message GetClusterAppResponse { StatusCode status = 1; string statusMessage = 2; - ClusterAppConfig appConfig = 3; + ClusterAppConfig appConfig = 3; } message GetClusterAppLaunchConfigsRequest { @@ -150,7 +150,7 @@ message AppLaunchConfig { message ClusterAttribute { string key = 1; - string value = 2; + string value = 2; } enum PluginStoreType { @@ -280,7 +280,7 @@ message StoreCredentialRequest { string clusterID = 1; string credentialIdentifier = 2; string credentialEntiryName=3; - map credential = 4; + map credential = 4; } message StoreCredentialResponse { diff --git a/proto/vault_cred.proto b/proto/vault_cred.proto index b7459a0e..f7315030 100644 --- a/proto/vault_cred.proto +++ b/proto/vault_cred.proto @@ -16,7 +16,7 @@ service VaultCred { rpc DeleteAppRole(DeleteAppRoleRequest) returns (DeleteAppRoleResponse) {}; rpc GetCredentialWithAppRoleToken(GetCredentialWithAppRoleTokenRequest) returns (GetCredentialWithAppRoleTokenResponse) {}; rpc AddClusterK8SAuth(AddClusterK8SAuthRequest) returns (AddClusterK8SAuthResponse) {}; - rpc DeleteClusterK8SAuth(DeleteClusterK8SAuthRequest) returns (DeleteClusterK8SAuthResponse) {}; + rpc DeleteClusterK8SAuth(DeleteClusterK8SAuthRequest) returns (DeleteClusterK8SAuthResponse) {}; rpc CreateK8SAuthRole(CreateK8SAuthRoleRequest) returns (CreateK8SAuthRoleResponse) {}; rpc UpdateK8SAuthRole(UpdateK8SAuthRoleRequest) returns (UpdateK8SAuthRoleResponse) {}; rpc DeleteK8SAuthRole(DeleteK8SAuthRoleRequest) returns (DeleteK8SAuthRoleResponse) {}; @@ -92,44 +92,44 @@ message GetCredentialWithAppRoleTokenResponse { enum SecretAccess { READ = 0; - WRITE = 1; + WRITE = 1; } -message secretPolicy { +message secretPolicy { string secretPath = 1; SecretAccess access = 2; } -message CreateK8SAuthRoleRequest { +message CreateK8SAuthRoleRequest { string roleName = 1; repeated secretPolicy secretPolicy = 2; string clusterName = 3; repeated string namespaces = 4; repeated string serviceAccounts = 5; } - -message CreateK8SAuthRoleResponse { + +message CreateK8SAuthRoleResponse { StatusCode status = 1; string statusMessage = 2; } - -message UpdateK8SAuthRoleRequest { + +message UpdateK8SAuthRoleRequest { string roleName = 1; repeated secretPolicy secretPolicy = 2; string clusterName = 3; } - -message UpdateK8SAuthRoleResponse { + +message UpdateK8SAuthRoleResponse { StatusCode status = 1; string statusMessage = 2; } - -message DeleteK8SAuthRoleRequest { + +message DeleteK8SAuthRoleRequest { string roleName = 1; string clusterName = 3; } - -message DeleteK8SAuthRoleResponse { + +message DeleteK8SAuthRoleResponse { StatusCode status = 1; string statusMessage = 2; }