Skip to content
This repository has been archived by the owner on Oct 8, 2024. It is now read-only.

Commit

Permalink
config-refactor (#7)
Browse files Browse the repository at this point in the history
Co-authored-by: awendt <[email protected]>
  • Loading branch information
Michael-Kruggel and anthonywendt authored Aug 23, 2023
1 parent 91c0dae commit 6602ddc
Show file tree
Hide file tree
Showing 14 changed files with 5,265 additions and 26 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ test/tf/public-ec2-instance/.tool-versions
zarf-sbom
tmp/
values-*.yaml
overlay-values-*

# Terraform
test/tf/public-ec2-instance/.test-data
Expand Down
25 changes: 20 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ BUILD_HARNESS_REPO := ghcr.io/defenseunicorns/build-harness/build-harness
# renovate: datasource=docker depName=ghcr.io/defenseunicorns/build-harness/build-harness
BUILD_HARNESS_VERSION := 1.10.2
# renovate: datasource=docker depName=ghcr.io/defenseunicorns/packages/dubbd-k3d extractVersion=^(?<version>\d+\.\d+\.\d+)
DUBBD_K3D_VERSION := 0.6.1
DUBBD_K3D_VERSION := 0.6.2

# Figure out which Zarf binary we should use based on the operating system we are on
ZARF_BIN := zarf
Expand Down Expand Up @@ -119,8 +119,15 @@ test-ssh: ## Run this if you set SKIP_TEARDOWN=1 and want to SSH into the still-
cluster/full: cluster/destroy cluster/create build/all deploy/all ## This will destroy any existing cluster, create a new one, then build and deploy all

cluster/create: ## Create a k3d cluster with metallb installed
k3d cluster create k3d-test-cluster --config utils/k3d/k3d-config.yaml -v /etc/machine-id:/etc/machine-id@server:*
K3D_FIX_MOUNTS=1 k3d cluster create k3d-test-cluster --config utils/k3d/k3d-config.yaml
k3d kubeconfig merge k3d-test-cluster -o /home/${USER}/cluster-kubeconfig.yaml
echo "Installing Calico..."
kubectl apply --wait=true -f utils/calico/calico.yaml 2>&1 >/dev/null
echo "Waiting for Calico to be ready..."
kubectl rollout status deployment/calico-kube-controllers -n kube-system --watch --timeout=90s 2>&1 >/dev/null
kubectl rollout status daemonset/calico-node -n kube-system --watch --timeout=90s 2>&1 >/dev/null
kubectl wait --for=condition=Ready pods --all --all-namespaces 2>&1 >/dev/null
echo
utils/metallb/install.sh
echo "Cluster is ready!"

Expand All @@ -131,7 +138,7 @@ cluster/destroy: ## Destroy the k3d cluster
# Build Section
########################################################################

build/all: build build/zarf build/zarf-init.sha256 build/dubbd-pull-k3d.sha256 build/uds-capability-sonarqube ##
build/all: build build/zarf build/zarf-init.sha256 build/dubbd-pull-k3d.sha256 build/test-pkg-deps build/uds-capability-sonarqube ##

build: ## Create build directory
mkdir -p build
Expand Down Expand Up @@ -161,20 +168,28 @@ build/dubbd-pull-k3d.sha256: | build ## Download dubbd k3d oci package
echo "Creating shasum of the dubbd-k3d package"
shasum -a 256 build/zarf-package-dubbd-k3d-amd64-$(DUBBD_K3D_VERSION).tar.zst | awk '{print $$1}' > build/dubbd-pull-k3d.sha256

build/test-pkg-deps: | build ## Build package dependencies for testing
build/zarf package create utils/pkg-deps/namespaces/ --skip-sbom --confirm --output-directory build
build/zarf package create utils/pkg-deps/sonarqube/postgres --skip-sbom --confirm --output-directory build

build/uds-capability-sonarqube: | build ## Build the sonarqube capability
build/zarf package create . --skip-sbom --confirm --output-directory build

########################################################################
# Deploy Section
########################################################################

deploy/all: deploy/init deploy/dubbd-k3d deploy/uds-capability-sonarqube ##
deploy/all: deploy/init deploy/dubbd-k3d deploy/test-pkg-deps deploy/uds-capability-sonarqube ##

deploy/init: ## Deploy the zarf init package
./build/zarf init --confirm --components=git-server

deploy/dubbd-k3d: ## Deploy the k3d flavor of DUBBD
cd ./build && ./zarf package deploy zarf-package-dubbd-k3d-amd64-$(DUBBD_K3D_VERSION).tar.zst --confirm

deploy/test-pkg-deps: ## Deploy the package dependencies needed for testing the gitlab capability
cd ./build && ./zarf package deploy zarf-package-sonarqube-namespaces-* --confirm
cd ./build && ./zarf package deploy zarf-package-sonarqube-postgres* --confirm

deploy/uds-capability-sonarqube: ## Deploy the sonarqube capability
cd ./build && ./zarf package deploy zarf-package-sonarqube-*.tar.zst --confirm
cd ./build && ./zarf package deploy zarf-package-sonarqube-amd*.tar.zst --confirm
27 changes: 25 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
# uds-capability-sonarqube

Platform One Sonarqube deployed via flux

## Pre-req

- Minimum compute requirements for single node deployment are at LEAST 64 GB RAM and 32 virtual CPU threads (aws `m6i.8xlarge` instance type should do)
- k3d installed on machine

Expand Down Expand Up @@ -37,6 +39,7 @@ make cluster/full
```

## Import Zarf Skeleton

Below is an example of how to import this projects zarf skeleton into your zarf.yaml. The [uds-package-sofware-factory](https://github.com/defenseunicorns/uds-package-software-factory.git) does this with a subset of the uds-capability projects.

```yaml
Expand All @@ -50,5 +53,25 @@ components:
required: true
import:
name: sonarqube
url: oci://ghcr.io/defenseunicorns/uds-capability/nexus:0.0.3-skeleton
```
url: oci://ghcr.io/defenseunicorns/uds-capability/sonarqube:0.0.4-skeleton
```
## Prerequisites
### SonarQube Capability
The SonarQube Capability expects the database listed below to exist in the cluster before being deployed.
#### General
- Create `sonarqube` namespace
- Label `sonarqube` namespace with `istio-injection: enabled`

#### Database

- A Postgres database is running on port `5432` and accessible to the cluster
- This database can be logged into via the username `sonarqube`
- This database instance has a psql database created matching what is defined in the deploy time variable `SONARQUBE_DB`. Default is `sonarqubedb`
- The `sonarqube` user has read/write access to above mentioned database
- Create `sonarqube-postgres` service in `sonarqube` namespace that points to the psql database
- Create `sonarqube-postgres` secret in `sonarqube` namespace with the key `password` that contains the password to the `sonarqube` user for the psql database
86 changes: 84 additions & 2 deletions sonarqube-flux-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,89 @@ application:
ref:
# renovate: datasource=gitlab-tags depName=big-bang/product/packages/sonarqube versioning=loose registryUrl=https://repo1.dso.mil
tag: 8.0.1-bb.2
values: |
###ZARF_VAR_SONARQUBE_VALUES###
dependsOn:
###ZARF_VAR_SONARQUBE_DEPENDS_ON###
baseValues:
# hostname is deprecated and replaced with domain. But if hostname exists then use it.
domain: ###ZARF_VAR_DOMAIN###

# Define variables to help with conditionals later

OpenShift:
enabled: false

istio:
enabled: true
sonarqube:
gateways:
- istio-system/tenant
injection: enabled

monitoring:
enabled: true

networkPolicies:
enabled: true
ingressLabels:
app: null
istio: null

networkPolicy:
enabled: true
additionalNetworkPolicys:
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: gitlab-runner-sandbox
podSelector: {}
ports:
- port: 9000
protocol: TCP
egress:
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: gitlab
podSelector:
matchLabels:
app: webservice
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: gitlab-runner-sandbox
podSelector: {}
podSelector:
matchLabels: {}
policyTypes:
- Egress
- Ingress

image:
pullPolicy: IfNotPresent
annotations:
bigbang.dev/istioVersion: ###ZARF_VAR_ISTIO_VERSION###

jdbcOverwrite:
enable: true
jdbcUrl: jdbc:postgresql://postgresql.sonarqube-db.svc.cluster.local:5432/###ZARF_VAR_SONARQUBE_DB###
jdbcUsername: sonarqube
jdbcSecretName: sonarqube-postgres
jdbcSecretPasswordKey: password

postgresql:
# Use internal database, defaults are fine
enabled: false
master:
podAnnotations:
bigbang.dev/istioVersion: ###ZARF_VAR_ISTIO_VERSION###
slave:
podAnnotations:
bigbang.dev/istioVersion: ###ZARF_VAR_ISTIO_VERSION###
postgresqlServer: sonarqube-postgres
existingSecret: "sonarqube-postgres"
existingSecretPasswordKey: "password"
postgresqlUsername: "sonarqube"
postgresqlDatabase: "###ZARF_VAR_SONARQUBE_DB###"
service:
port: 5432
Loading

0 comments on commit 6602ddc

Please sign in to comment.