diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..937ea64c5a --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners +* @cbc02009 diff --git a/.github/renovate.json5 b/.github/renovate.json5 new file mode 100644 index 0000000000..0e06a3246a --- /dev/null +++ b/.github/renovate.json5 @@ -0,0 +1,159 @@ +{ + // General RenovateBot configuration + extends: [ + ":enableRenovate", + "config:base", + ":disableRateLimiting", + ":dependencyDashboard", + ":semanticCommits", + ":separatePatchReleases", + "docker:enableMajor", + ":enablePreCommit", + "github>cbc02009/k8s-home-ops//.github/renovate/commitMessage", + "github>cbc02009/k8s-home-ops//.github/renovate/labels", + "github>cbc02009/k8s-home-ops//.github/renovate/semanticCommits", + // "github>cbc02009/k8s-home-ops//.github/renovate/allowedVersions", + // "github>cbc02009/k8s-home-ops//.github/renovate/autoMerge", + // "github>cbc02009/k8s-home-ops//.github/renovate/updateSchedule", + ], + repositories: ["cbc02009/k8s-home-ops"], + onboarding: false, + requireConfig: false, + timezone: "America/New_York", + dependencyDashboardTitle: "Renovate Dashboard 🤖", + suppressNotifications: ["prIgnoreNotification"], + rebaseWhen: "conflicted", + // Set up Renovate Managers + "helm-values": { + fileMatch: [ + "cluster/manifests/.+/helmrelease\\.ya?ml$", + ], + }, + kubernetes: { + fileMatch: ["cluster/manifests/.+\\.ya?ml$"], + }, + flux: { + fileMatch: [ + "cluster/manifests/.+\\.ya?ml$", + ], + }, + regexManagers: [ + // regexManager to read and process GitHub release artifacts + { + fileMatch: ["k8s/clusters/.+\\.ya?ml$", "k8s/global/.+\\.ya?ml$"], + matchStrings: [ + "datasource=(?.*?)\n *url: https://github\\.com/(?.*?)\\.git\n *ref:\n *tag: (?.*)\n", + 'datasource=(?.*?) depName=(?.*?)\n.*?_version: "(?.*)"\n', + 'datasource=(?.*?) depName=(?.*?)\n.*?_VERSION="(?.*)"\n', + ], + datasourceTemplate: "github-releases", + depNameTemplate: "{{org}}/{{repo}}", + }, + // Generic Regex Manager + { + fileMatch: ["cluster/.+\\.ya?ml$", "k8s/global/.+\\.ya?ml$"], + matchStrings: [ + "# renovate: datasource=(?.*?) depName=(?.*?)( registryUrl=(?.*?))?( versioning=(?.*=?))?\n .*: (?.*=?)", + ], + versioningTemplate: "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}", + registryUrlTemplate: "{{#if registryUrl}}{{{registryUrl}}}{{else}}{{/if}}", + }, + // Generic Docker image Regex manager + { + fileMatch: [ + "cluster/.+\\.ya?ml$", + ], + matchStrings: [ + "# renovate: docker-image( registryUrl=(?.*?))?( versioning=(?.*=?))?\n .*: (?.*?):(?.*=?)", + ], + datasourceTemplate: "docker", + versioningTemplate: "{{#if versioning}}{{{versioning}}}{{else}}docker{{/if}}", + registryUrlTemplate: "{{#if registryUrl}}{{{registryUrl}}}{{else}}{{/if}}", + }, + ], + packageRules: [ + // Setup datasources + { + matchDatasources: ["helm"], + ignoreDeprecated: true, + }, + // { + // matchDatasources: ["docker"], + // commitMessageExtra: "to {{newVersion}}", + // }, + // Custom version schemes + { + matchDatasources: ["docker"], + matchPackageNames: ["blakeblackshear/frigate"], + versioning: "regex:^(?\\d+)\\.(?\\d+)\\.(?\\d+)-(?.*)$", + }, + { + matchDatasources: ["docker"], + matchPackageNames: ["xirixiz/dsmr-reader-docker"], + versioning: "regex:^(?.*)-(?\\d+)\\.(?\\d+)\\.(?\\d+)$", + }, + { + matchDatasources: ["docker"], + matchPackageNames: ["quay.io/minio/minio"], + versioning: "regex:^RELEASE\\.(?\\d{4})-(?\\d{2})-(?\\d{2})", + }, + { + matchDatasources: ["docker"], + matchPackageNames: ["ghcr.io/airsonic-advanced/airsonic-advanced"], + versioning: "regex:^edge-(?\\d+)\\.(?\\d+)\\.(?\\d+)-SNAPSHOT\\.(?\\d+)$", + }, + // Version strategies + { + matchDatasources: ["docker"], + matchPackageNames: [ + "ghcr.io/k8s-at-home/plex", + "ghcr.io/k8s-at-home/qbittorrent", + ], + versioning: "loose", + }, + { + matchDatasources: ["docker"], + matchPackageNames: ["tomsquest/docker-radicale"], + versioning: "pep440", + }, + // Group packages + { + matchDatasources: ["helm", "docker"], + matchPackagePatterns: ["^rook.ceph"], + groupName: "rook-ceph-suite", + additionalBranchPrefix: "", + separateMinorPatch: true, + }, + { + matchDatasources: ["docker"], + matchPackageNames: [ + "k8s.gcr.io/kube-apiserver", + "k8s.gcr.io/kube-controller-manager", + "k8s.gcr.io/kube-scheduler", + "ghcr.io/siderolabs/kubelet", + ], + groupName: "kubernetes", + additionalBranchPrefix: "", + separateMinorPatch: true, + }, + { + matchDatasources: ["helm", "docker"], + matchPackageNames: [ + "quay.io/cilium/cilium", + "quay.io/cilium/operator-generic", + "cilium", + ], + groupName: "cilium", + additionalBranchPrefix: "", + separateMinorPatch: true, + }, + { + matchDatasources: ["docker"], + matchPackageNames: [ + "ghcr.io/linuxserver/calibre", + "ghcr.io/linuxserver/calibre-web", + ], + versioning: "regex:^version-v?(?\\d+)\\.(?\\d+)\\.(?\\d+)$", + }, + ], +} diff --git a/.github/renovate/allowedVersions.json b/.github/renovate/allowedVersions.json new file mode 100644 index 0000000000..a3abe0754b --- /dev/null +++ b/.github/renovate/allowedVersions.json @@ -0,0 +1,19 @@ +// { +// "packageRules": [ +// { +// "matchDatasources": ["docker"], +// "matchPackageNames": ["ghcr.io/linuxserver/calibre-web"], +// "allowedVersions": "<1" +// }, +// { +// "matchDatasources": ["docker"], +// "matchPackageNames": ["influxdb"], +// "allowedVersions": "<2" +// }, +// { +// "matchDatasources": ["docker"], +// "matchPackageNames": ["tomsquest/docker-radicale"], +// "allowedVersions": "/^[0-9]+\\.[0-9]+\\.[0-9]+(\\.[0-9]+)?$/" +// } +// ] +// } diff --git a/.github/renovate/autoMerge.json b/.github/renovate/autoMerge.json new file mode 100644 index 0000000000..b9b672d7f8 --- /dev/null +++ b/.github/renovate/autoMerge.json @@ -0,0 +1,32 @@ +// { +// "packageRules": [ +// { +// "matchDatasources": ["docker"], +// "automerge": true, +// "automergeType": "branch", +// "requiredStatusChecks": null, +// "stabilityDays": 2, +// "matchUpdateTypes": ["minor", "patch"], +// "matchPackageNames": [ +// // "ghcr.io/k8s-at-home/plex", +// // "ghcr.io/k8s-at-home/radarr", +// // "ghcr.io/k8s-at-home/sonarr", +// // "ghcr.io/k8s-at-home/lidarr", +// // "ghcr.io/k8s-at-home/nzbget", +// // "ghcr.io/k8s-at-home/prowlarr", +// // "ghcr.io/k8s-at-home/readarr-nightly", +// // "ghcr.io/k8s-at-home/qbittorrent", +// // "ghcr.io/k8s-at-home/sabnzbd", +// // "ghcr.io/k8s-at-home/tautulli", +// // "linuxserver/pyload", +// // "sctx/overseerr", +// // "b4bz/homer", +// // "vexorian/dizquetv", +// // "grafana/grafana", +// // "ghcr.io/tarampampam/error-pages", +// // "k8s-at-home/tautulli" +// ], +// "schedule": ["after 2am, before 5am"] +// } +// ] +// } diff --git a/.github/renovate/commitMessage.json b/.github/renovate/commitMessage.json new file mode 100644 index 0000000000..d1d9fd9f53 --- /dev/null +++ b/.github/renovate/commitMessage.json @@ -0,0 +1,15 @@ +{ + "commitMessageTopic": "{{depName}}", + "commitMessageExtra": "to {{newVersion}}", + "commitMessageSuffix": "", + "packageRules": [ + { + "matchDatasources": ["helm"], + "commitMessageTopic": "Helm chart {{depName}}" + }, + { + "matchDatasources": ["docker"], + "commitMessageTopic": "Docker image {{depName}}" + } + ] +} diff --git a/.github/renovate/labels.json b/.github/renovate/labels.json new file mode 100644 index 0000000000..48a59a5576 --- /dev/null +++ b/.github/renovate/labels.json @@ -0,0 +1,40 @@ +{ + "packageRules": [ + { + "matchUpdateTypes": ["major"], + "labels": ["type/major"] + }, + { + "matchUpdateTypes": ["minor"], + "labels": ["type/minor"] + }, + { + "matchUpdateTypes": ["patch"], + "labels": ["type/patch"] + }, + { + "matchDatasources": ["docker"], + "addLabels": ["renovate/container"] + }, + { + "matchDatasources": ["helm"], + "addLabels": ["renovate/helm"] + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "addLabels": ["renovate/ansible"] + }, + { + "matchDatasources": ["terraform-provider"], + "addLabels": ["renovate/terraform"] + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "addLabels": ["renovate/github-release"] + }, + { + "matchManagers": ["github-actions"], + "addLabels": ["renovate/github-action"] + } + ] +} diff --git a/.github/renovate/semanticCommits.json b/.github/renovate/semanticCommits.json new file mode 100644 index 0000000000..557881d728 --- /dev/null +++ b/.github/renovate/semanticCommits.json @@ -0,0 +1,132 @@ +{ + "semanticCommitType": ":arrow_up:", + "semanticCommitScope": null, + "packageRules": [ + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(container)!: " + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["docker"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "container" + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(helm)!: " + }, + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "helm" + }, + + { + "matchDatasources": ["helm"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "helm" + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(ansible)!: " + }, + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "ansible" + }, + + { + "matchDatasources": ["galaxy", "galaxy-collection"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "ansible" + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(terraform)!: " + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "terraform" + }, + { + "matchDatasources": ["terraform-provider"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "terraform" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(github-release)!: " + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "github-release" + }, + { + "matchDatasources": ["github-releases", "github-tags"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "github-release" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["major"], + "commitMessagePrefix": ":arrow_up: feat(github-action)!: " + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["minor"], + "semanticCommitType": ":arrow_up: feat", + "semanticCommitScope": "github-action" + }, + { + "matchManagers": ["github-actions"], + "matchUpdateTypes": ["patch"], + "semanticCommitType": ":arrow_up: fix", + "semanticCommitScope": "github-action" + }, + + + { + "updateTypes": [ + "lockFileMaintenance", + "pin" + ], + "semanticCommitType": ":pushpin:" + }, + { + "updateTypes": [ + "rollback" + ], + "semanticCommitType": ":arrow_down:" + }, + { + "updateTypes": [ + "digest", + "bump" + ], + "semanticCommitType": ":bookmark:" + } + ] +} diff --git a/.github/renovate/updateSchedule.json b/.github/renovate/updateSchedule.json new file mode 100644 index 0000000000..3f3193f947 --- /dev/null +++ b/.github/renovate/updateSchedule.json @@ -0,0 +1,13 @@ +// { +// "packageRules": [ +// { +// "matchPackageNames": [ +// "ghcr.io/airsonic-advanced/airsonic-advanced", +// "ghcr.io/k8s-at-home/radarr-develop", +// "ghcr.io/k8s-at-home/sonarr-develop", +// "quay.io/minio/minio" +// ], +// "schedule": ["after 8am and before 3pm on saturday"] +// } +// ] +// } diff --git a/.github/scripts/container-parser.sh b/.github/scripts/container-parser.sh new file mode 100644 index 0000000000..473a06c44b --- /dev/null +++ b/.github/scripts/container-parser.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +# shellcheck source=/dev/null +source "$(dirname "${0}")/lib/functions.sh" + +set -o errexit +set -o nounset +set -o pipefail +shopt -s lastpipe + +show_help() { +cat << EOF +Usage: $(basename "$0") + -h, --help Display help + -f, --file File to scan for container images + --nothing Enable nothing mode +EOF +} + +main() { + local file= + local nothing= + parse_command_line "$@" + check "jo" + check "jq" + check "yq" + entry +} + +parse_command_line() { + while :; do + case "${1:-}" in + -h|--help) + show_help + exit + ;; + -f|--file) + if [[ -n "${2:-}" ]]; then + file="$2" + shift + else + echo "ERROR: '-f|--file' cannot be empty." >&2 + show_help + exit 1 + fi + ;; + --nothing) + nothing=1 + ;; + *) + break + ;; + esac + shift + done + + if [[ -z "$file" ]]; then + echo "ERROR: '-f|--file' is required." >&2 + show_help + exit 1 + fi + + if [[ -z "$nothing" ]]; then + nothing=0 + fi +} + +entry() { + # create new array to hold the images + images=() + + # look in hydrated flux helm releases + chart_registry_url=$(chart_registry_url "${file}") + chart_name=$(yq eval-all .spec.chart.spec.chart "${file}" 2>/dev/null) + if [[ -n ${chart_registry_url} && -n "${chart_name}" && ! "${chart_name}" =~ "null" ]]; then + chart_version=$(yq eval .spec.chart.spec.version "${file}" 2>/dev/null) + chart_values=$(yq eval .spec.values "${file}" 2>/dev/null) + pushd "$(mktemp -d)" > /dev/null 2>&1 + helm repo add main "${chart_registry_url}" > /dev/null 2>&1 + helm pull "main/${chart_name}" --untar --version "${chart_version}" + resources=$(echo "${chart_values}" | helm template "${chart_name}" "${chart_name}" --version "${chart_version}" -f -) + popd > /dev/null 2>&1 + images+=("$(echo "${resources}" | yq eval-all '.spec.template.spec.containers.[].image' -)") + helm repo remove main > /dev/null 2>&1 + fi + + # look in helm values + images+=("$(yq eval-all '[.. | select(has("repository")) | select(has("tag"))] | .[] | .repository + ":" + .tag' "${file}" 2>/dev/null)") + + # look in kubernetes deployments, statefulsets and daemonsets + images+=("$(yq eval-all '.spec.template.spec.containers.[].image' "${file}" 2>/dev/null)") + + # look in kubernetes pods + images+=("$(yq eval-all '.spec.containers.[].image' "${file}" 2>/dev/null)") + + # look in kubernetes cronjobs + images+=("$(yq eval-all '.spec.jobTemplate.spec.template.spec.containers.[].image' "${file}" 2>/dev/null)") + + # look in docker compose + images+=("$(yq eval-all '.services.*.image' "${file}" 2>/dev/null)") + + # remove duplicate values xD + IFS=" " read -r -a images <<< "$(tr ' ' '\n' <<< "${images[@]}" | sort -u | tr '\n' ' ')" + + # create new array to hold the parsed images + parsed_images=() + # loop thru the images removing any invalid items + for i in "${images[@]}"; do + # loop thru each image and split on new lines (for when yq finds multiple containers in the same file) + for b in ${i//\\n/ }; do + if [[ -z "${b}" || "${b}" == "null" || "${b}" == "---" ]]; then + continue + fi + parsed_images+=("${b}") + done + done + # check if parsed_images array has items + if (( ${#parsed_images[@]} )); then + # convert the bash array to json and wrap array in an containers object + jo -a "${parsed_images[@]}" | jq -c '{containers: [(.[])]}' + fi +} + +main "$@" diff --git a/.github/scripts/create-helmrelease-annotations.sh b/.github/scripts/create-helmrelease-annotations.sh new file mode 100644 index 0000000000..c9c118d137 --- /dev/null +++ b/.github/scripts/create-helmrelease-annotations.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# Wire up the env and cli validations +__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# shellcheck source=/dev/null +# source "${__dir}/environment.sh" + +export CLUSTER_ROOT=$(git rev-parse --show-toplevel) + +# CHANGEME - path to your helm repositories +export helm_repositories="${CLUSTER_ROOT}/k8s/global/" + +for helm_release in $(find ${CLUSTER_ROOT} -name "*.yaml"); do + # ignore flux-system namespace + # ignore wrong apiVersion + # ignore non HelmReleases + if [[ "${helm_release}" =~ "flux-system" + || $(yq r "${helm_release}" apiVersion) != "helm.toolkit.fluxcd.io/v2beta1" + || $(yq r "${helm_release}" kind) != "HelmRelease" ]]; then + continue + fi + + for helm_repository in "${helm_repositories}"/*.yaml; do + chart_name=$(yq r "${helm_repository}" metadata.name) + chart_url=$(yq r "${helm_repository}" spec.url) + + # only helmreleases where helm_release is related to chart_url + if [[ $(yq r "${helm_release}" spec.chart.spec.sourceRef.name) == "${chart_name}" ]]; then + # delete "renovate: registryUrl=" line + sed -i "/renovate: registryUrl=/d" "${helm_release}" + # insert "renovate: registryUrl=" line + sed -i "/.*chart: .*/i \ \ \ \ \ \ # renovate: registryUrl=${chart_url}" "${helm_release}" + echo "Annotated $(basename "${helm_release%.*}") with ${chart_name} for renovatebot..." + break + fi + done +done diff --git a/.github/scripts/helm-release-differ.sh b/.github/scripts/helm-release-differ.sh new file mode 100644 index 0000000000..8d7543ef9b --- /dev/null +++ b/.github/scripts/helm-release-differ.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash + +# shellcheck source=/dev/null +source "$(dirname "${0}")/lib/functions.sh" + +set -o errexit +set -o nounset +set -o pipefail +shopt -s lastpipe + +show_help() { +cat << EOF +Usage: $(basename "$0") + -h, --help Display help + --source-file Original helm release + --target-file New helm release + --remove-common-labels Remove common labels from manifests +EOF +} + +main() { + local source_file= + local target_file= + local remove_common_labels= + parse_command_line "$@" + check "helm" + check "yq" + entry +} + +parse_command_line() { + while :; do + case "${1:-}" in + -h|--help) + show_help + exit + ;; + --source-file) + if [[ -n "${2:-}" ]]; then + source_file="$2" + shift + else + echo "ERROR: '--source-file' cannot be empty." >&2 + show_help + exit 1 + fi + ;; + --target-file) + if [[ -n "${2:-}" ]]; then + target_file="$2" + shift + else + echo "ERROR: '--target-file' cannot be empty." >&2 + show_help + exit 1 + fi + ;; + --remove-common-labels) + remove_common_labels=true + ;; + *) + break + ;; + esac + shift + done + + if [[ -z "${source_file}" ]]; then + echo "ERROR: '--source-file' is required." >&2 + show_help + exit 1 + fi + + if [[ $(yq eval .kind "${source_file}" 2>/dev/null) != "HelmRelease" ]]; then + echo "ERROR: '--source-file' is not a HelmRelease" + show_help + exit 1 + fi + + if [[ -z "${target_file}" ]]; then + echo "ERROR: '--target-file' is required." >&2 + show_help + exit 1 + fi + + if [[ $(yq eval .kind "${target_file}" 2>/dev/null) != "HelmRelease" ]]; then + echo "ERROR: '--target-file' is not a HelmRelease" + show_help + exit 1 + fi + + if [[ -z "$remove_common_labels" ]]; then + remove_common_labels=false + fi +} + +_resources() { + local chart_name=${1} + local chart_version=${2} + local chart_registry_url=${3} + local chart_values=${4} + local resources= + + helm repo add main "${chart_registry_url}" > /dev/null 2>&1 + pushd "$(mktemp -d)" > /dev/null 2>&1 + helm pull "main/${chart_name}" --untar --version "${chart_version}" + resources=$(echo "${chart_values}" | helm template "${chart_name}" "${chart_name}" --version "${chart_version}" -f -) + if [[ "${remove_common_labels}" == "true" ]]; then + labels='.metadata.labels."helm.sh/chart"' + labels+=',.metadata.labels.chart' + labels+=',.metadata.labels."app.kubernetes.io/version"' + labels+=',.spec.template.metadata.labels."helm.sh/chart"' + labels+=',.spec.template.metadata.labels.chart' + labels+=',.spec.template.metadata.labels."app.kubernetes.io/version"' + echo "${resources}" | yq eval "del($labels)" - + else + echo "${resources}" + fi + popd > /dev/null 2>&1 + helm repo remove main > /dev/null 2>&1 +} + +entry() { + local comments= + + source_chart_name=$(chart_name "${source_file}") + source_chart_version=$(chart_version "${source_file}") + source_chart_registry_url=$(chart_registry_url "${source_file}") + source_chart_values=$(chart_values "${source_file}") + source_resources=$(_resources "${source_chart_name}" "${source_chart_version}" "${source_chart_registry_url}" "${source_chart_values}") + echo "${source_resources}" > /tmp/source_resources + + target_chart_version=$(chart_version "${target_file}") + target_chart_name=$(chart_name "${target_file}") + target_chart_registry_url=$(chart_registry_url "${target_file}") + target_chart_values=$(chart_values "${target_file}") + target_resources=$(_resources "${target_chart_name}" "${target_chart_version}" "${target_chart_registry_url}" "${target_chart_values}") + echo "${target_resources}" > /tmp/target_resources + + # Diff the files and always return true + diff -u /tmp/source_resources /tmp/target_resources > /tmp/diff || true + # Remove the filenames + sed -i -e '1,2d' /tmp/diff + + # Store the comment in an array + comments=() + + # shellcheck disable=SC2016 + comments+=( "$(printf 'Path: `%s`' "${target_file}")" ) + if [[ "${source_chart_name}" != "${target_chart_name}" ]]; then + # shellcheck disable=SC2016 + comments+=( "$(printf 'Chart: `%s` -> `%s`' "${source_chart_name}" "${target_chart_name}")" ) + fi + if [[ "${source_chart_version}" != "${target_chart_version}" ]]; then + # shellcheck disable=SC2016 + comments+=( "$(printf 'Version: `%s` -> `%s`' "${source_chart_version}" "${target_chart_version}")" ) + fi + if [[ "${source_chart_registry_url}" != "${target_chart_registry_url}" ]]; then + # shellcheck disable=SC2016 + comments+=( "$(printf 'Registry URL: `%s` -> `%s`' "${source_chart_registry_url}" "${target_chart_registry_url}")" ) + fi + comments+=( "$(printf '\n\n')" ) + if [[ -f /tmp/diff && -s /tmp/diff ]]; then + # shellcheck disable=SC2016 + comments+=( "$(printf '```diff\n%s\n```' "$(cat /tmp/diff)")" ) + else + # shellcheck disable=SC2016 + comments+=( "$(printf '```\nNo changes in detected in resources\n```')" ) + fi + + # Join the array with a new line and print it + printf "%s\n" "${comments[@]}" +} + +main "$@" diff --git a/.github/scripts/lib/functions.sh b/.github/scripts/lib/functions.sh new file mode 100644 index 0000000000..d0f6895397 --- /dev/null +++ b/.github/scripts/lib/functions.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail +shopt -s lastpipe + +check() { + command -v "${1}" >/dev/null 2>&1 || { + echo >&2 "ERROR: ${1} is not installed or not found in \$PATH" >&2 + exit 1 + } +} + +chart_registry_url() { + local helm_release= + local chart_id= + helm_release="${1}" + chart_id=$(yq eval .spec.chart.spec.sourceRef.name "${helm_release}" 2>/dev/null) + # Discover all HelmRepository + find . -iname '*-charts.yaml' -type f -print0 | while IFS= read -r -d '' file; do + # Skip non HelmRepository + [[ $(yq eval .kind "${file}" 2>/dev/null) != "HelmRepository" ]] && continue + # Skip unrelated HelmRepository + [[ "${chart_id}" != $(yq eval .metadata.name "${file}" 2>/dev/null) ]] && continue + yq eval .spec.url "${file}" + break + done +} + +chart_name() { + local helm_release= + helm_release="${1}" + yq eval .spec.chart.spec.chart "${helm_release}" 2>/dev/null +} + +chart_version() { + local helm_release= + helm_release="${1}" + yq eval .spec.chart.spec.version "${helm_release}" 2>/dev/null +} + +chart_values() { + local helm_release= + helm_release="${1}" + yq eval .spec.values "${helm_release}" 2>/dev/null +} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..02e333293f --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# Editors +.vscode/ +.idea/ +# Trash +.DS_Store +Thumbs.db + +tools/cilium-quick-install/charts diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..ed3e5e74c5 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,61 @@ +--- +exclude: | + (?x)^( + provision/ansible/roles/.* + )$ + +repos: + # - repo: https://github.com/adrienverge/yamllint + # rev: v1.26.3 + # hooks: + # - args: + # - --config-file + # - .github/linters/.yamllint.yaml + # id: yamllint + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + - id: check-merge-conflict + - id: detect-private-key + - id: end-of-file-fixer + - id: check-added-large-files + args: ["--maxkb=2000"] + + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.3.1 + hooks: + - id: remove-crlf + - id: remove-tabs + + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck + language: script + args: [--severity=error] + additional_dependencies: [] + + - repo: https://github.com/k8s-at-home/sops-pre-commit + rev: v2.1.1 + hooks: + - id: forbid-secrets + + - repo: https://github.com/sirosen/texthooks + rev: 0.3.1 + hooks: + - id: fix-smartquotes + - id: fix-ligatures + + - repo: https://github.com/zricethezav/gitleaks + rev: v8.11.0 + hooks: + - id: gitleaks + + # - repo: https://github.com/Truxnell/pre-commit + # rev: v0.0.9 + # hooks: + # - id: kustomize_build + # files: ^k8s/ + # args: [--dry-run=server] diff --git a/.sops.yaml b/.sops.yaml new file mode 100644 index 0000000000..299e61d79f --- /dev/null +++ b/.sops.yaml @@ -0,0 +1,6 @@ +--- +creation_rules: + - path_regex: cluster/.*\.sops\.ya?ml + encrypted_regex: "((?i)(pass|secret($|[^N])|key|token|^data$|^stringData))" + age: >- + age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q diff --git a/.taskfiles/bootstrap.yml b/.taskfiles/bootstrap.yml new file mode 100644 index 0000000000..ea4f2cc28c --- /dev/null +++ b/.taskfiles/bootstrap.yml @@ -0,0 +1,44 @@ +--- +version: "3" + +tasks: + update-cilium: + desc: Update Cilium quick-install + cmds: + - helm repo add cilium https://helm.cilium.io/ + - kustomize build --enable-helm ./tools/cilium-quick-install > ./tools/cilium-quick-install/quick-install.yaml + silent: false + + cilium: + desc: 2. Bootstrap cilium + cmds: + - kubectl apply -f ./tools/cilium-quick-install/quick-install.yaml + + sops: + desc: 3. Bootstrap sops + cmds: + - cat ~/.config/sops/age/keys.txt | kubectl -n flux-system create secret generic sops-age --from-file=age.agekey=/dev/stdin + + github: + desc: 4. decrypt and load github ssh key + cmds: + - sops --decrypt cluster/config/github-deploy-key.sops.yaml | kubectl apply -f - + + flux: + desc: 1. Bootstrap flux (as per version in manifest) + cmds: + - yq '.spec.ref.tag' cluster/repositories/git/flux.yaml | xargs -I{} flux install --version={} --export | kubectl apply -f - + + cluster: + desc: 5. Bootstrap Cluster + cmds: + - kubectl apply -k cluster + + all: + desc: run all bootstrap commands in order + cmds: + - go-task bootstrap:flux + - go-task bootstrap:cilium + - go-task bootstrap:sops + - go-task bootstrap:github + - go-task bootstrap:cluster diff --git a/.taskfiles/cluster.yml b/.taskfiles/cluster.yml new file mode 100644 index 0000000000..282588e7ae --- /dev/null +++ b/.taskfiles/cluster.yml @@ -0,0 +1,218 @@ +--- +version: "3" + +tasks: + + git-check-yaml: + desc: Check yaml for kustomize/kube errors + cmds: + - git diff --name-only origin/main.. | grep --color kustomization.yaml | sed 's/kustomization.yaml//' | xargs -I{} kustomize build {} + - git diff --name-only origin/main.. | grep --color .yaml | xargs cat | kubectl apply -f - --dry-run=server + + # nfs-deps-pause: + # desc: Pause all Helm Releases that rely on NFS storage + # cmds: + # - flux suspend hr -n media plex + # - kubectl scale -n media deploy/plex --replicas 0 + # - flux suspend hr -n downloads qbittorrent + # - kubectl scale -n downloads deploy/qbittorrent --replicas 0 + # - flux suspend hr -n downloads sabnzbd + # - kubectl scale -n downloads deploy/sabnzbd --replicas 0 + # - flux suspend hr -n downloads nzbget + # - kubectl scale -n downloads deploy/nzbget --replicas 0 + # - flux suspend hr -n organizarrs radarr + # - kubectl scale -n organizarrs deploy/radarr --replicas 0 + # - flux suspend hr -n organizarrs readarr + # - kubectl scale -n organizarrs deploy/readarr --replicas 0 + # - flux suspend hr -n organizarrs sonarr + # - kubectl scale -n organizarrs deploy/sonarr --replicas 0 + # - flux suspend hr -n organizarrs lidarr + # - kubectl scale -n organizarrs deploy/lidarr --replicas 0 + # - flux suspend hr -n organizarrs calibre + # - kubectl scale -n organizarrs deploy/calibre --replicas 0 + # - flux suspend hr -n organizarrs calibre-web + # - kubectl scale -n organizarrs deploy/calibre-web --replicas 0 + # - flux suspend hr -n organizarrs calibre-web + # - kubectl scale -n organizarrs deploy/calibre-web --replicas 0 + # - flux suspend hr -n system-monitoring thanos-compactor + # - kubectl scale -n system-monitoring deploy/thanos-compactor --replicas 0 + # - flux suspend hr -n system-monitoring loki + # - kubectl scale -n system-monitoring deploy/loki --replicas 0 + + # nfs-deps-resume: + # desc: Resume all Helm Releases that rely on NFS storage + # cmds: + # - flux resume hr -n media plex + # - kubectl scale -n media deploy/plex --replicas 1 + # - flux resume hr -n downloads qbittorrent + # - kubectl scale -n downloads deploy/qbittorrent --replicas 1 + # - flux resume hr -n downloads sabnzbd + # - kubectl scale -n downloads deploy/sabnzbd --replicas 1 + # - flux resume hr -n downloads nzbget + # - kubectl scale -n downloads deploy/nzbget --replicas 1 + # - flux resume hr -n organizarrs radarr + # - kubectl scale -n organizarrs deploy/radarr --replicas 1 + # - flux resume hr -n organizarrs readarr + # - kubectl scale -n organizarrs deploy/readarr --replicas 1 + # - flux resume hr -n organizarrs sonarr + # - kubectl scale -n organizarrs deploy/sonarr --replicas 1 + # - flux resume hr -n organizarrs lidarr + # - kubectl scale -n organizarrs deploy/lidarr --replicas 1 + # - flux resume hr -n organizarrs calibre + # - kubectl scale -n organizarrs deploy/calibre --replicas 1 + # - flux resume hr -n organizarrs calibre-web + # - kubectl scale -n organizarrs deploy/calibre-web --replicas 1 + # - flux resume hr -n system-monitoring thanos-compactor + # - kubectl scale -n system-monitoring deploy/thanos-compactor --replicas 1 + # - flux resume hr -n system-monitoring loki + # - kubectl scale -n system-monitoring deploy/loki --replicas 1 + + delete-failed-pods: + desc: Deletes failed pods + cmds: + - kubectl delete pods --field-selector status.phase=Failed -A --ignore-not-found=true + + debug-volume: + desc: Attach a volume to a container for debugging, ex. VOLUME=zigbee2mqtt-config-v1 NAMESPACE=home task debug-volume + interactive: true + silent: true + cmds: + - | + kubectl run debug-{{.VOLUME}} -n {{.NAMESPACE}} -i --tty --rm --image=null --overrides=' + { + "spec": { + "containers": [ + { + "name": "debug", + "image": "docker.io/library/alpine:3.15", + "command": [ + "/bin/sh" + ], + "stdin": true, + "stdinOnce": true, + "tty": true, + "lifecycle": { + "postStart": { + "exec": { + "command": [ + "/bin/sh", + "-c", + "apk add --no-cache curl vim" + ] + } + } + }, + "volumeMounts": [ + { + "name": "backups", + "mountPath": "/mnt/backups/" + }, + { + "name": "debug-volume", + "mountPath": "/mnt/volume/" + } + ] + } + ], + "volumes": [ + { + "name": "backups", + "nfs": { + "server": '{{.NAS_ADDRESS | default "hyperion"}}', + "path": '{{.NAS_PATH | default "/volume1/backups/"}}' + } + }, + { + "name": "debug-volume", + "persistentVolumeClaim": { + "claimName": "{{.VOLUME}}" + } + } + ], + "restartPolicy": "Never" + } + }' + + debug-node: + desc: Create a privileged container on a node for debugging, ex. NODE=anvil task debug-node + interactive: true + cmds: + - | + kubectl run debug-{{.NODE}} -i --tty --rm --image="docker.io/library/alpine:3.15" --privileged --overrides=' + { + "spec": { + "nodeSelector": { + "kubernetes.io/hostname": "{{.NODE}}" + }, + "restartPolicy": "Never" + } + }' + + list-dockerhub: + desc: What dockerhub images are running in my cluster + cmds: + - kubectl get pods --all-namespaces -o=jsonpath="{range .items[*]}{'\n'}{range .spec.containers[*]}{.image}{'\n'}{end}{end}" | sort | uniq | grep -Ev 'quay|gcr|ghcr|ecr|us-docker' | grep -Ev 'bitnami|rook|intel|grafana' | sed -e 's/docker\.io\///g' | sort | uniq + + netshoot: + desc: Create a netshoot container for debugging + cmds: + - kubectl run tmp-shell --rm -i --tty --image ghcr.io/nicolaka/netshoot:latest {{.CLI_ARGS}} + + reconcile: + desc: Force update Flux to pull in changes from your Git repository + cmds: + - flux reconcile -n flux-system source git k8s-home-ops + - flux reconcile -n flux-system kustomization apply-cluster-deployment + + nodes: + desc: List all the nodes in your cluster + cmds: + - kubectl get nodes {{.CLI_ARGS | default "-o wide"}} + + pods: + desc: List all the pods in your cluster + cmds: + - kubectl get pods {{.CLI_ARGS | default "-A"}} + + kustomizations: + desc: List all the kustomizations in your cluster + cmds: + - kubectl get kustomizations {{.CLI_ARGS | default "-A"}} + + helmreleases: + desc: List all the helmreleases in your cluster + cmds: + - kubectl get helmreleases {{.CLI_ARGS | default "-A"}} + + helmrepositories: + desc: List all the helmrepositories in your cluster + cmds: + - kubectl get helmrepositories {{.CLI_ARGS | default "-A"}} + + gitrepositories: + desc: List all the gitrepositories in your cluster + cmds: + - kubectl get gitrepositories {{.CLI_ARGS | default "-A"}} + + certificates: + desc: List all the certificates in your cluster + cmds: + - kubectl get certificates {{.CLI_ARGS | default "-A"}} + - kubectl get certificaterequests {{.CLI_ARGS | default "-A"}} + + ingresses: + desc: List all the ingresses in your cluster + cmds: + - kubectl get ingress {{.CLI_ARGS | default "-A"}} + + resources: + desc: Gather common resources in your cluster, useful when asking for support + cmds: + - task: nodes + - task: kustomizations + - task: helmreleases + - task: helmrepositories + - task: gitrepositories + # - task: certificates + # - task: ingresses + - task: pods diff --git a/.taskfiles/flux.yml b/.taskfiles/flux.yml new file mode 100644 index 0000000000..a5a2aac59e --- /dev/null +++ b/.taskfiles/flux.yml @@ -0,0 +1,58 @@ +--- +version: "3" + +tasks: + sync: + desc: Sync flux-system with the Git Repository + cmds: + - git push + - flux reconcile source git flux-system + silent: false + + sapps: + desc: Sync apps kustomization with the Git Repository, and display kustomization list + cmds: + - task flux:sync + - flux reconcile kustomization apps && task flux:k + silent: false + + sall: + desc: Sync apps kustomization with the Git Repository, and display kustomization list + cmds: + - task flux:sync + - flux reconcile kustomization flux-system + - flux reconcile kustomization operators + - flux reconcile kustomization infrastructure + - flux reconcile kustomization apps + - task flux:k + silent: false + + hr: + desc: List all Helm Releases + cmds: + - flux get hr -A | grep --colour -e "^" -e False + silent: true + + hs: + desc: List all Helm sources + cmds: + - flux get sources helm -A + silent: true + + hc: + desc: List all Helm charts + cmds: + - flux get sources chart -A + silent: true + + k: + desc: List all Kustomizations + cmds: + - flux get kustomizations -A + silent: true + + hr-restart: + desc: Restart all failed Helm Releases + cmds: + - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -l bash -c 'flux suspend hr $0 -n $1' + - kubectl get hr --all-namespaces | grep False | awk '{print $2, $1}' | xargs -l bash -c 'flux resume hr $0 -n $1' diff --git a/.taskfiles/format.yml b/.taskfiles/format.yml new file mode 100644 index 0000000000..0689f818bc --- /dev/null +++ b/.taskfiles/format.yml @@ -0,0 +1,33 @@ +--- +version: "3" + +tasks: + all: + - task: markdown + - task: yaml + markdown: + desc: Format Markdown + cmds: + - >- + prettier + --ignore-path '.github/lint/.prettierignore' + --config '.github/lint/.prettierrc.yaml' + --list-different + --ignore-unknown + --parser=markdown + --write '*.md' '**/*.md' + ignore_error: true + yaml: + desc: Format YAML + cmds: + - >- + prettier + --ignore-path '.github/lint/.prettierignore' + --config + '.github/lint/.prettierrc.yaml' + --list-different + --ignore-unknown + --parser=yaml + --write '*.y*ml' + '**/*.y*ml' + ignore_error: true diff --git a/.taskfiles/kubeadm.yml b/.taskfiles/kubeadm.yml new file mode 100644 index 0000000000..81299031a0 --- /dev/null +++ b/.taskfiles/kubeadm.yml @@ -0,0 +1,34 @@ +--- +version: "3" + +tasks: + init: + desc: Bring up cluster + cmds: + - sudo kubeadm init --config kubeadm-config.yaml + - mkdir -p $HOME/.kube + - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + - sudo chown $(id -u):$(id -g) $HOME/.kube/config + # - kubectl taint nodes --all node-role.kubernetes.io/control-plane- node-role.kubernetes.io/master- + silent: false + + drain: + desc: drain all nodes + cmds: + - kubectl get nodes | awk '{if (NR!=1) {print $1}}' | xargs -l bash -c 'kubectl drain $0 --ignore-daemonsets --delete-emptydir-data' + + uncordon: + desc: uncordon all nodes + cmds: + - kubectl get nodes | awk '{if (NR!=1) {print $1}}' | xargs -l bash -c 'kubectl uncordon $0' + + reset: + desc: Teardown cluster + cmds: + - go-task kubeadm:drain + - sudo kubeadm reset + + join-token: + desc: create join token + cmds: + - sudo kubeadm token create --print-join-command diff --git a/.taskfiles/pre-commit.yml b/.taskfiles/pre-commit.yml new file mode 100644 index 0000000000..ecf062763c --- /dev/null +++ b/.taskfiles/pre-commit.yml @@ -0,0 +1,12 @@ +--- +version: "3" + +tasks: + init: + desc: Initialize pre-commit hooks + cmds: + - pre-commit install-hooks + run: + desc: Run pre-commit + cmds: + - pre-commit run --all-files diff --git a/.taskfiles/sops.yml b/.taskfiles/sops.yml new file mode 100644 index 0000000000..28ca8abbc5 --- /dev/null +++ b/.taskfiles/sops.yml @@ -0,0 +1,14 @@ +--- +version: "3" + +tasks: + + encrypt: + desc: encrypt sops file 'to use must include -- before path to file.' eg "task sops:encrypt -- file.yml" + cmds: + - sops --encrypt --in-place {{.CLI_ARGS}} + + decrypt: + desc: decrypt sops file 'to use must include -- before path to file.' eg "task sops:decrypt -- file.yml" + cmds: + - sops --decrypt --in-place {{.CLI_ARGS}} diff --git a/Taskfile.yml b/Taskfile.yml new file mode 100644 index 0000000000..557da75065 --- /dev/null +++ b/Taskfile.yml @@ -0,0 +1,15 @@ +--- +version: "3" + +vars: + PROJECT_ROOT: + sh: git rev-parse --show-toplevel + +includes: + flux: .taskfiles/flux.yml + cluster: .taskfiles/cluster.yml + format: .taskfiles/format.yml + bootstrap: .taskfiles/bootstrap.yml + precommit: .taskfiles/pre-commit.yml + kubeadm: .taskfiles/kubeadm.yml + sops: .taskfiles/sops.yml diff --git a/cluster/config/cluster-config.yaml b/cluster/config/cluster-config.yaml new file mode 100644 index 0000000000..ba0ed17bcf --- /dev/null +++ b/cluster/config/cluster-config.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: flux-system + name: cluster-config +data: + CLUSTER_NAME: kokoro + CLUSTER_ID: "0" + + CLUSTER_CERT: letsencrypt + + NETWORK_K8S_POD_CIDR: 10.11.0.0/16 + NETWORK_K8S_SERVICE_CIDR: 10.10.0.0/16 + NETWORK_SERVERS_CIDR: 10.0.0.0/16 + + TZ: America/New_York + + LB_RANGE: 10.0.10.0/24 + + ANYA_IP: 10.0.0.15 + UIHARU_IP: 10.0.0.10 + RENA_IP: 10.0.0.16 + + #Pre-defined IP addresses + LB_NGINX_INGRESS: 10.0.10.1 + LB_MARIADB: 10.0.10.5 + LB_BABYBUDDY: 10.0.10.6 + LB_FLEXO: 10.0.10.7 + LB_SONARR: 10.0.10.8 + LB_RADARR: 10.0.10.9 + LB_JACKETT: 10.0.10.10 + LB_QBITTORRENT: 10.0.10.11 + LB_PLEX: 10.0.10.12 + LB_HAJIMARI: 10.0.10.13 + LB_ANIMARR: 10.0.10.14 diff --git a/cluster/config/cluster-secrets.sops.yaml b/cluster/config/cluster-secrets.sops.yaml new file mode 100644 index 0000000000..0aca24c054 --- /dev/null +++ b/cluster/config/cluster-secrets.sops.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cluster-secrets + namespace: flux-system +stringData: + EXTERNAL_DOMAIN: ENC[AES256_GCM,data:2yOXz90beUQrNQ==,iv:CPfAlRhLKI9PJ3PyCiX+uESdmohbDNux2tIMSKgM71I=,tag:CRFIsNh7/+QMHgVZsV0IVw==,type:str] + SECRET_MARIADB_ROOT_PASSWORD: ENC[AES256_GCM,data:+IVlEUASP6fb,iv:3h1qpozIsMtr8oKC1h1mY2vl1Dg1kZyN46imHm33wH0=,tag:QPjLHbBJe3rlM0SLuodU5w==,type:str] + ROUTER_IP: ENC[AES256_GCM,data:c4QI0eBQUVs=,iv:iGs8JjyVY89QGFu+UgUWEil/KWbPnmpFtCu0PWihNrQ=,tag:h/SsAeZUBpKMR9XbCG/rhQ==,type:str] + SECRET_PLEX_TOKEN: ENC[AES256_GCM,data:b7YTo7zxeRR51W1I74wTX8heJnRgxiHjGXc=,iv:Fyy9dlZcuGYYITq4XHAjg2eRKbS8seqHjjim0RWCD7w=,tag:SX2cgx3XusyC0cZtZrqg6Q==,type:str] + ACME_EMAIL: ENC[AES256_GCM,data:/x7ztQdbA85acd+omLafuZgo,iv:vkhTsTNAJbcdOjLPEowFJ2EAstLt3lNjPBxAKKgQqXI=,tag:qFMkOxUXAzEyfacSMykBQw==,type:str] + SECRET_GRAFANA_PASSWORD: ENC[AES256_GCM,data:6r6AvPgI,iv:g6DP0JrRbWgm5dxcPxdknD8I8iWebX9V88SF6Ap543Q=,tag:j/VwvYvPCCnwTkZoGMwjGQ==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOc2xCekMyN3B1NURCUHgz + UXR1RkNoeERuUEFkcTN1bG5lVTJJa3RJaWtNClY4WkYraVFZcUF3UFA5MmFGanNq + a1NmY1Z5Rzhxcjd4TEpsMXFFNDBCTjgKLS0tIFRvYTZ4MkJoeHlwZjRiVXhXcndr + MUp2YlNnUzE3Y1NzWU93N0diTmYrVTAKKla2c2g4nCgGCJmh6Uff85I1/lFI+c5G + VD7K2aR+W9qnauAwZlKdS1IKKFAJfXvEKHk9ZNOPPNB7zsYxPP9QKQ== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2022-08-23T17:32:46Z" + mac: ENC[AES256_GCM,data:y8AwFjW3FUpoQh5xHNc93Hib0zBmZEdpI7mt9pUKJT6y9+Q/PUBgH6GmnYUdlcueKs2/mi5IKUwc6Z90iTcMqrVbdSCrwVaMsum8EWBzxPK+LMxcd1A/+3jIcYC6AeKqOH19QlSQycQPKlmoaCqdQ5lk4Eudxr62eXfUSpemldY=,iv:MOAmVRO4YLE6Pj/dL8Txb50rDMsS2pgOydEiRyTAztE=,tag:ZhN5Qmr101HFWxAIeK+UqA==,type:str] + pgp: [] + encrypted_regex: ((?i)(pass|secret($|[^N])|key|token|^data$|^stringData)) + version: 3.7.3 diff --git a/cluster/config/github-deploy-key.sops.yaml b/cluster/config/github-deploy-key.sops.yaml new file mode 100644 index 0000000000..2771409f70 --- /dev/null +++ b/cluster/config/github-deploy-key.sops.yaml @@ -0,0 +1,31 @@ +# yamllint disable +apiVersion: v1 +kind: Secret +metadata: + name: github-deploy-key + namespace: flux-system +stringData: + #ENC[AES256_GCM,data:Ka249beIvyjMB7rnGMgRTSJu5iHwK/mlEJftyN9poB/beQ==,iv:MfrS2vG8Z6c/hUGnKe0v6/2Nv4fkYzzPXCVpNOy1JvQ=,tag:74/qoZrTjjjjD0Z5c24RjA==,type:comment] + identity: ENC[AES256_GCM,data:mfgOJZ8pGmOKZNeKhmcvo+ZlbCHoIdWTuTcbUnkc1YI50OOrj0+SiBryTWMm9EOn2rGF5HBgOP61RZxsx0VjAWG+kxKTDZRqryKzLPI1w7xKIQFaErR4S5J/vJQmbNQxOL9TwAlaDhNBFtD4tPfMlLm+CDBpnlJdr1E7YjNrTrFDitRNWT+cEO44+rFCZx2WptTX49PaN1sjsps6eJIL6cKZYIb0SLx9AO7jJwcsjMVgujWL7RbFFPv0vkBW4zbBegNive0OyTBJg8y3T8HAkEdacF85vpJ2LikK/ME3Sd5IU/ql/TPooVatnifRf+kr6I5b72isM7ygz2W5EcrDYcGmdLSD91nszuioV9+y1NQ3gDPyB9vqRUj1baM5UaGxp4K0dTN+41S/BqbeEwvnqqrDkUdsaSTe61VAtpMlLtpGJhfeZvmfSEjf/acr9JSA3TrMrqMGVFV0nNY8vWOQZtRv1epLgtxYMnAzKzsWe1SwexTqTUxCABnrwrQfDsULcQTFtaMQaKIvKcPcakPAQbic6KkVYFtSAQEk,iv:OsNjcGUDFopqVHP61M833mc6JWBhN7SFy20uEbDeFho=,tag:l5iMSGej3GnOlhdF0Y1uFg==,type:str] + #ENC[AES256_GCM,data:4DA6t+Jj1gRmIdHpNZM27nZUwThrvi480pW4csgl/JPgkQ5sws9jYtSA9hhUFMvpvthf1ONl+MyNUClw7rTK2bc/tGVSYT/sORhoH9ZPrdfgT0jbX1cJidDh0wIVOxXNloF3pbUX,iv:SKwRYVGeie0NCkHn9Qq1JFqShFGuUTK8yzBOV7SLdZ8=,tag:av6qpH0LxhS5c6/agerjgw==,type:comment] + known_hosts: ENC[AES256_GCM,data:KmFK59BUiPplPw4bOWRYcrXTA3nvm+P0yhr79vvlNqnMVix+Imqi818EG5rES0HH3X2x7l/vfRhli9p0GZz+JR4b0f2XJ1lzJ5XozNZQg2vMqS4lz+wt7ftTLrZiFbLgJNmhI7pRTY03gTP+o6oGKOgMUDl/7w27XxKbnlYbPLUCjCrOZW3Ypxn/aCVpB8tqt/I/mm/lr+xsf11fqdLQhkNnkV7tGou1TU2FDm1OUTSAD91zXNg0lZXzyR6nFb38j485HhMcflbz3LYs2Tcl6e88TmA36vhAgED2ZB5DebTjogSWvt84j7G2SwI1YWHDX61GfKgRXqvO+Js8y3nSEelb1ZY1RlwQPPf+IvNATF+9ILBEM616xLNHuJth8GynkRiVoF9fEa73UC0x2bprEAEYRYHn1anAmm6pTxOHSe2UYkA0bGxVbU/jQvOZb4TfWVj0kMEPMFtuOb0PnLpEnE1K/csS7uLbDXnkbwdo7ErdYl62D/C4RpU+SV9Asn1dtG+/G8PIVv6UeAZYznEtwMtm47I4jINq7w1NIbYYwqv+t93W7bxvNkPGK9T1oeyT+LAsuAISQ0yIOj2NZfxMfy+KLviYq4k08PwWwftyBZttUgZv22zRj+bNL4w17B8tOmoT2NxlLX01J3OcKPneT5hDdcJGExsRlGVvlpQBczMil/tf8QQU6oahYQayh7X/weQMqykZNIRW09/uqyJ45QTyh3rlFaTy+TebQ8QKlyCuvf/OpLq6rF1velKZKZqaOBOs2eHkDDnZJ3sN1eoXZ8uTR7Dfz0SsXBico46YgWpfM5HGlS08gyRlxY0yI2dTX/KfL3l7HALO9G9asBKkimUkw9ZnJb2h92kBQvjPfv4=,iv:UyrUWt7D6Bm0BuVX2ghv1CAwM1zgP/a988Oq7BvA7SM=,tag:FlNC0dB0t3i1NMAVYF1wuw==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6dS9iclFvclM1dTZDUmUz + Q3hNTTQvdnJ2dDVORTlld2RnQWh1Y3VMbUJ3CkNvMHBneXFDdkExUElCSVFCZnkw + dXp3dXNnNmRMWDJwVGNXSWJDMmczVzAKLS0tIFNJQ1NvSlR4U1NqaXBjVTRBZVpv + RVlWMTlYeWcvK045bmh0RkFqVE9GVUkKkF/DzF1lgWjCAEL4MYpZDwa5Mpb5DJTA + B6chV9kch8ORJ9bEe6Npv9KrCnHgWdmdw3t5oMNnhqUs6QlLIIAbVA== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2022-08-20T14:58:02Z" + mac: ENC[AES256_GCM,data:SWIbbL7ZS8BekjsxGWr+EVS+yG7IwFjEhiPF4R5lGqH/oRBeXs1KV1H8VC7wnqRq+OZj0H4tD4OaDqbsKEhLGMcUx/GWAM+44cTjaGQ80xIeHxN6ysSzr5Fvv6viinKCehwQigxH51ThdeiEqOyqSSCeOoI7rUr+Q/QDN8KoEqM=,iv:uV6rGOInuJ8Xh0d4ZLhZXLGwfaVm4nUT1HGFgUUTQWk=,tag:YdMfMFA90akxNU73LI4nbA==,type:str] + pgp: [] + encrypted_regex: ((?i)(pass|secret($|[^N])|key|token|^data$|^stringData)) + version: 3.7.3 diff --git a/cluster/config/kustomization.yaml b/cluster/config/kustomization.yaml new file mode 100644 index 0000000000..d30a8f4a56 --- /dev/null +++ b/cluster/config/kustomization.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./github-deploy-key.sops.yaml + - ./cluster-secrets.sops.yaml + - ./cluster-config.yaml + - ./porkbun-key.sops.yaml diff --git a/cluster/config/porkbun-key.sops.yaml b/cluster/config/porkbun-key.sops.yaml new file mode 100644 index 0000000000..7875ff6ad2 --- /dev/null +++ b/cluster/config/porkbun-key.sops.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Secret +metadata: + name: porkbun-key + namespace: cert-manager +type: Opaque +stringData: + api-key: ENC[AES256_GCM,data:qAxwsloNAhbgFeTauUTeHOmTkLssO4vYWCztyLYiZP1Ja47bNCgiH0aviRExVkq8+oAKKK2RmvPp7x5VmF2/qPIKbLo=,iv:P/Vvp5AawnVGez0oW6Ksre0Jjk3qBS2IYAm+uJz11SY=,tag:Dh+L1taM6drgwnSN0cw0gQ==,type:str] + secret-key: ENC[AES256_GCM,data:v2lLnUvlkls2CDgtykP2G5xMS36McRcDTRVHUZ2HYUEDsrCXhNnKOy+IkUkotFSPLAYU5Ud5CXgksS+A7wQPvTa5838=,iv:OxPnZgHXPbSniFie5+0eXW5LPYHozCev4r7Ahcl82s8=,tag:lBD5HzhYoQiQLYtKpi4Hkg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRZnVYMVRUN2NiS3NxLzRs + VnBPd25yZ2x1MkNyTHE5blNINGVMakpRWmswCjZrK01Za0JWYlo2VHpxa0ZWYThT + d3Q3dTB0RjY5cjd4dDFwTFRsK0lsN2cKLS0tIHF0MG5JWkVoUUE2MGdnQjZaWERP + eCtKWW5HWTlGQjhvd3RJVk8zMU5vS3MKQL2JxRyazhpaox/zQ3LlLsuA/QZhcpFD + M2jAFvjuCl3FLtBxcPu1P3qq7gdgyAaEQe6LWN6oFYARz+Hu43EKtw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2022-08-22T10:10:22Z" + mac: ENC[AES256_GCM,data:OyjdSHlTuyrIDZ++DHToLxM1GQ6ozVNnyiW2ZeZr/PDjBkUZ2/HNR1tMy/XDcZQnJjD7M+Qpi9BMXe1zpU4jnTbJL6mAHqTFZ0W2KfShnV04tP5WPd1ZaGQ4NUk9dW3oc0Sl7Qbp7KvE2+lLVhS1e5skXf1aiacrcwrJPkpi+ZY=,iv:HdyldkLAoa6K/ayAd30Dy9jaC3acCOIRYjlQmHiq7j0=,tag:5c3jgYu2SjpISDYbzHDWqw==,type:str] + pgp: [] + encrypted_regex: ((?i)(pass|secret($|[^N])|key|token|^data$|^stringData)) + version: 3.7.3 diff --git a/cluster/crds/democratic-csi/kustomization.yaml b/cluster/crds/democratic-csi/kustomization.yaml new file mode 100644 index 0000000000..d5b4a6de99 --- /dev/null +++ b/cluster/crds/democratic-csi/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - zfs-generic-nfs-config.sops.yaml + - zfs-generic-iscsi-config.sops.yaml diff --git a/cluster/crds/democratic-csi/zfs-generic-iscsi-config.sops.yaml b/cluster/crds/democratic-csi/zfs-generic-iscsi-config.sops.yaml new file mode 100644 index 0000000000..964557db78 --- /dev/null +++ b/cluster/crds/democratic-csi/zfs-generic-iscsi-config.sops.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Secret +metadata: + name: zfs-generic-iscsi-config + namespace: storage +stringData: + driver-config-file.yaml: ENC[AES256_GCM,data:MCA0tyMw6begxkpWfvdfMjesv+NjmHEbhUh9TgFckOGubrt9z2O0VCjL9Z6oCDJxVAXLSFQO0o+S/zOUxNDb8Sw9uA1xzCj4bXszTx8jSgH8CcRg4QFFeER1Sq6hpS11fuIunKOUcXhIHZpa3oe2qBCLgt1WLWjc0hnAmSKM71IGdzGtzIgOIZJqhiKNUZSoy1otJWnty7c8QeUccgeVSk1gTSl1YRfsDEmnRMnaAXtyo/QZ5Ok8ytB41qCOdSiY16PXiQUJi1FOsb/UzALuqL9j2zBFeNBMzTcAPLhL/byPtCyW2PDu9ZIElWK0oOuvAGgZvFQ9XKFPsfrcFM1KG6XD3r3BwNeRcdFN/6BHrzH6goDC9maTYD49Hdp/xkAqHMI4XF0uLBb4+nLYCRDttqtbmyNJTTHx6C9goWMFLuKkZLHaByb38kqC8+6Xnkctb+IAu2baA539JH83plcpfMrJ5dlcldYDyw8Z63fik+xPLnRd65hVhvy3QRDdL5EIalDXqe5QOs4WssZ3NCYyg7G5FGqVLB+jFJb766SYTiB+/ys+tJ6OLPeTdA54krMDctrVAaTj/gN0XlD08f1RLmpfbTOmhSyC16hPuA4dAVQxQcwUrA57cOHyTae8bL7SKbQfL+QtKPxiBXE47lLI73e57SIYV73vsF2HZ83TXXBsw3Wtu/fQJNpf9fJdu7P34EHJ0A+w2BfQWNrnYOWdaRpZgGyB9V2k/prFrzJBzQ7+62oQRBjox9WOSoQeE177Y3tfXgvL6jjdz1l7yuHqGLOkfB0oBJ2f+3D9qMLfL5XCI1nJTCQSiNwsY7uv/UKu6FRDasI0vLyibvOpD5qy3sK2QOFhhNWZkcBklwpmbUnfSrI8Zim1k0DFmBsRfVd/x6fqaywyeRLPKNqbAgOTunVRewy2NM341ZlnrWjXD144etGHJZDAElHBdtoCt6kaCt5GZtYYLneztEhpyZ7hI2F1p+cNqHIVWx9cpLU+Or/CkcvBvy3XgYVhGgCxPmc0F2rcQIfdMMqXWAxBkU05QDlUL93nMQtV+MjvwJZd9xnfTdVfGXnXcX04sPgvHmKDS6zkhwL5ydy730Aa6EX4KOzRPc8+0KW8zqHtpVxjlXhhaaO0Ui6rd3YB3ehj3M7UdW9jYOBAA+esvNAbbNYhGfqWxZo9drN6iqIulc9B/8aDjVqsVKOPC3bhNtj3OhEXx2coUQsz2H2NmeUKWLCWTrwjK0GsfQjAi/PmGE+vlrBMa4VsMqNHX2pTDtXvWcJAVyFL5RPYkCQVCt547tOnUM3mV3WcLHv1wkptBfktJY/crEBEszxrqV89SPwQi+PjG2V1fO3rV3/jloLt,iv:TRFqbkrl7oXvH1j6TWVISfHyARM+G+pwgjdgb3jBpDU=,tag:36/zEL6dsLWumNrky3uPBA==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBVY1pGSW5NSk5OVkNUUUtt + RExFZ3FSZUJMaEVzT0VsN0pkK0J0Yjk1ZkNFClAwZUVLR3F3VmNHWkZUUnBONFNO + L2VPbUpJZ29KUjBHVXBXV1JQa25RNkUKLS0tIGNGc25WcE4rdk9pRkdoTTR6L3Rq + NWh0N0xRME9lUUVpd3N3bTJRWTFEbE0KUjnwOR3gsolD/ktHja4Vvoz/WmSa+kQY + VGiBLnjzFfWiwOIym4nrKcvOk3sIDwEitafB8XwrEAvNlz1TMCMrTw== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2022-08-21T10:57:14Z" + mac: ENC[AES256_GCM,data:ezx8pxf7vhOuyDcPhkv1gZiot7CZxNvPpbusQenOd+PeF6dKOjj6GQsNUd32ctf+h7YPT0mt4VZthWSMGYWjeNU5lDeHCJrr+0ZpB14dgok1rlTAL2Ehsz00u9jEV/qGKy8rWOGyOWmedCV+DCOg3pQwNtbvplROonaemeVuxnk=,iv:2mD9+4dNJ1WyykY72V2jjb/AnTuL+rjtNi6RY94kgRA=,tag:paV80i/yEjpQuCC9eCjrpg==,type:str] + pgp: [] + encrypted_regex: ((?i)(pass|secret($|[^N])|key|token|^data$|^stringData)) + version: 3.7.3 diff --git a/cluster/crds/democratic-csi/zfs-generic-nfs-config.sops.yaml b/cluster/crds/democratic-csi/zfs-generic-nfs-config.sops.yaml new file mode 100644 index 0000000000..8b8ecbbc24 --- /dev/null +++ b/cluster/crds/democratic-csi/zfs-generic-nfs-config.sops.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Secret +metadata: + name: zfs-generic-nfs-config + namespace: storage +stringData: + driver-config-file.yaml: ENC[AES256_GCM,data:5Ghb6WVFZXK+j/FBxvcF8ty1ap6sWxWOZldhFhvMS989heT/mYjH9edGoOZfQO9BF8NPQPmkfnP7mpzx6XEPyxU73U0MpxYpVWEcTpI9HLC1SwRRCKvr/XbsGjeB/6NLMHRFa9iND/E1q/+u/QtQm9iO2jwSKwMebCfsyYe0k80RMwikHwsPom29jOJ3xH6MxzvGpnLLLPg2x7OAowJQL0gpT5mnSmxL8Ls5jD7BxjuwXnijZ0fawxm5IBBhzxuiNkSwhcKJU7PFYuQgbzz0r8qJ0z7boJrDLuLO+31EbKzZC1y3BTbgU4OXdo0b7XykL0S2zhAMr66jMSOWFVeUTyJypnx1kO0ioXj1SkejjbU+IBZ2iuzvUGoaX3r/jh8gW4e5FK8HCZwBBaWwxH5BZOGXXIcf+/VFL4c3wsZUmOyHd9jBjU2iYODPZXN+wlySrysbJNGAWAcnDDofAKZ2kIVXo5CanF4cSIGTpesvaKtEOm5QkXPanZi3HhD3EDRM+P35V1P1lv2n1qh4ZBKXNGY2sKjST9KoHzDxdlw6WiFfoj3KqQbnG1TB131ChmyoMc17sYecYX2dkE7HzFxOJlJAfxTkc7yg6KTSWoWzAf8+OfK5FeZqjOorPI0CCuyTxHNK+kAb6ugBAheSw6IWYFIzWMfR0FsbfzC3qRAoz1NDf1c/WNqhxNk/oaq3yYo8A16SSiYmkpxoztl4QZ5h4yHbWYmd3V8cssEAwVlZgxm9uHN1phq2/qHKDFw5BkfncLhhOhJD/tR8iD29cDbC2Q7Kn3od1qGQHJKMkFlKdcYQWvkYYj7atLcP0SG1EZV9GMkfHRN3kX2gCQPPOu3Uz5CDHUE4jHVYva7K1Kvl4Uy8WbpEG75sdqXUfG8tQML3CA4pqb6lUKf3W+07FfAW3ar6+3+ij1gN6HzfYj53kcvNVEcZGtMwHUa47YHmB16zdpBjLHoOh9I9Qd9GCkg836wE1ZGuBa34T53ewWFPwx7ZHYFmDk1TTZPFt/TBOXzI3VgHcXEqfclyOR5kWjWPHaZIPz0+3xoDhm2LvBxr3bx+b95niLr1qqPlNLaUu066Gif7VP+YDF85NLHT81TIFwF/GWbS/2DR3AGx0TG8w8eDfHu3OuNQINq1/MzDxAmsUMEykWbX728r4Mnw5bStX9t2OaArGMwwPPOtihp0EqVqGBkxT+/Qg+aqN9WFQ9oUtCIFP113aRWXM22DuX4RDZv8e0Hqd0mwaeefZt42Zc2IrqWEptb0WOTcTF3RB4Jih9/VfEoQbQnRuWMdVsCrWqE9httY56/a5+MJte9KHx8yCzP3RfiD,iv:ZpxvWn1PI/xumnzFNSykEtdBGDSaNmWsFjgflklwRH0=,tag:W2vu+8YRIH68N/JmIb4ULg==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + hc_vault: [] + age: + - recipient: age1ayu5h4m5gqfchewyk6vqm2ts95n3q947lldcwf4cjclqx0kec5fq5dha7q + enc: | + -----BEGIN AGE ENCRYPTED FILE----- + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBDY2szaFhzeFk2ZUZzZURT + SHp0d3VCSGpFTHRsNU96cUMvOG1OczQ2dVNvCkd1RlhuMWZWenBSR3ZyRG1YQ2Ri + bW1KQ1h6VFZiR291NjdnbWdISDRVZjQKLS0tIFJFN0NjK0s2Z2tFZUhRdlVXMHU5 + OGlkN1U1N2JxQlF3MW5mYk40S2Q4OXcK3F1G4YbhtfhJg9hIMBJU2udI8oGEUXmN + Mkf9Ua+d1EdC8wU569xTP4WWtmO6/8r/n9iZgwNrbytFYVjC78oWIg== + -----END AGE ENCRYPTED FILE----- + lastmodified: "2022-08-21T10:57:32Z" + mac: ENC[AES256_GCM,data:B+ty52zW44ytwyRcR7fsMi5L0wcDm9B17/i/HwB6NjViNQrwuwS/KST0dzOtkKs4t6DH1QV/R4vDgb0GcBnR07aqt6fpbVwH5it9VwEwc9O4J5uUM9mH4HptHl+UFVGAGbG8kLNYi5zgxtosoF1+32IUaanuMWu1IOZvpvDTIAM=,iv:LRgjoBiwQoBPdfWRdeF4Gnade7RCW+SppdVzuEkAEhE=,tag:5bJMB7/JeW/fvi1tLSuY5A==,type:str] + pgp: [] + encrypted_regex: ((?i)(pass|secret($|[^N])|key|token|^data$|^stringData)) + version: 3.7.3 diff --git a/cluster/crds/kustomization.yaml b/cluster/crds/kustomization.yaml new file mode 100644 index 0000000000..b906b25aac --- /dev/null +++ b/cluster/crds/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./metallb + - ./democratic-csi diff --git a/cluster/crds/metallb/ip-address-pool.yaml b/cluster/crds/metallb/ip-address-pool.yaml new file mode 100644 index 0000000000..bce441a2cb --- /dev/null +++ b/cluster/crds/metallb/ip-address-pool.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: default + namespace: network +spec: + addresses: + - ${LB_RANGE} diff --git a/cluster/crds/metallb/kustomization.yaml b/cluster/crds/metallb/kustomization.yaml new file mode 100644 index 0000000000..c58c69f5e8 --- /dev/null +++ b/cluster/crds/metallb/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ip-address-pool.yaml + - l2-advertisement.yaml diff --git a/cluster/crds/metallb/l2-advertisement.yaml b/cluster/crds/metallb/l2-advertisement.yaml new file mode 100644 index 0000000000..4c88cc5117 --- /dev/null +++ b/cluster/crds/metallb/l2-advertisement.yaml @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: default + namespace: network +spec: + ipAddressPools: + - default diff --git a/cluster/deploy-cluster.yaml b/cluster/deploy-cluster.yaml new file mode 100644 index 0000000000..6edb0cd805 --- /dev/null +++ b/cluster/deploy-cluster.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-deployment + namespace: flux-system +spec: + interval: 10m0s + path: ./cluster/flux/deploy + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/deploy/apply-cluster-config.yaml b/cluster/flux/deploy/apply-cluster-config.yaml new file mode 100644 index 0000000000..3510e3f79e --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-config.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-config + namespace: flux-system +spec: + dependsOn: + - name: apply-flux + - name: apply-cluster-namespaces + interval: 10m0s + path: ./cluster/config + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age diff --git a/cluster/flux/deploy/apply-cluster-crds.yaml b/cluster/flux/deploy/apply-cluster-crds.yaml new file mode 100644 index 0000000000..45dd6ffa33 --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-crds.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-crds + namespace: flux-system +spec: + interval: 10m0s + dependsOn: + - name: apply-cluster-config + - name: apply-cluster-repositories + - name: apply-cluster-namespaces + - name: apply-flux + path: ./cluster/crds + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: not-used + namespace: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + target: + kind: Kustomization + group: kustomize.toolkit.fluxcd.io + version: v1beta2 diff --git a/cluster/flux/deploy/apply-cluster-namespaces.yaml b/cluster/flux/deploy/apply-cluster-namespaces.yaml new file mode 100644 index 0000000000..04612cd4a2 --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-namespaces.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-namespaces + namespace: flux-system +spec: + dependsOn: + - name: apply-flux + interval: 10m0s + path: ./cluster/namespaces + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age diff --git a/cluster/flux/deploy/apply-cluster-orchestration-core.yaml b/cluster/flux/deploy/apply-cluster-orchestration-core.yaml new file mode 100644 index 0000000000..5b790eda42 --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-orchestration-core.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-orchestration-core + namespace: flux-system +spec: + interval: 10m0s + dependsOn: + - name: apply-cluster-config + - name: apply-cluster-repositories + - name: apply-cluster-namespaces + - name: apply-flux + path: ./cluster/flux/orchestration/core + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: not-used + namespace: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + target: + kind: Kustomization + group: kustomize.toolkit.fluxcd.io + version: v1beta2 diff --git a/cluster/flux/deploy/apply-cluster-orchestration-user.yaml b/cluster/flux/deploy/apply-cluster-orchestration-user.yaml new file mode 100644 index 0000000000..f9bfd42c0e --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-orchestration-user.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-orchestration-user + namespace: flux-system +spec: + interval: 10m0s + dependsOn: + - name: apply-cluster-orchestration-core + path: ./cluster/flux/orchestration/user + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + patches: + - patch: |- + apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 + kind: Kustomization + metadata: + name: not-used + namespace: not-used + spec: + decryption: + provider: sops + secretRef: + name: sops-age + postBuild: + substitute: {} + substituteFrom: + - kind: ConfigMap + name: cluster-config + - kind: Secret + name: cluster-secrets + target: + kind: Kustomization + group: kustomize.toolkit.fluxcd.io + version: v1beta2 diff --git a/cluster/flux/deploy/apply-cluster-repositories.yaml b/cluster/flux/deploy/apply-cluster-repositories.yaml new file mode 100644 index 0000000000..64485e66a6 --- /dev/null +++ b/cluster/flux/deploy/apply-cluster-repositories.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-cluster-repositories + namespace: flux-system +spec: + interval: 10m0s + path: ./cluster/repositories + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops + decryption: + provider: sops + secretRef: + name: sops-age diff --git a/cluster/flux/deploy/apply-flux.yaml b/cluster/flux/deploy/apply-flux.yaml new file mode 100644 index 0000000000..39f07cab97 --- /dev/null +++ b/cluster/flux/deploy/apply-flux.yaml @@ -0,0 +1,55 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: apply-flux + namespace: flux-system +spec: + dependsOn: + - name: apply-cluster-repositories + interval: 10m0s + path: ./manifests/install + prune: true + wait: true + sourceRef: + kind: GitRepository + name: flux + patches: + - target: + kind: Deployment + labelSelector: control-plane=controller + patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: not-used + spec: + template: + spec: + containers: + - name: manager + resources: + requests: + memory: 512Mi + limits: + memory: "2Gi" + - target: + kind: Namespace + labelSelector: app.kubernetes.io/instance=flux-system + patch: |- + apiVersion: apps/v1 + kind: Namespace + metadata: + name: not-used + labels: + goldilocks.fairwinds.com~1enabled: "true" + - target: + group: networking.k8s.io + version: v1 + kind: NetworkPolicy + patch: |- + $patch: delete + apiVersion: networking.k8s.io/v1 + kind: NetworkPolicy + metadata: + name: not-used diff --git a/cluster/flux/deploy/kustomization.yaml b/cluster/flux/deploy/kustomization.yaml new file mode 100644 index 0000000000..1d2b39a602 --- /dev/null +++ b/cluster/flux/deploy/kustomization.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./apply-cluster-config.yaml + - ./apply-cluster-crds.yaml + - ./apply-cluster-namespaces.yaml + - ./apply-cluster-orchestration-core.yaml + - ./apply-cluster-orchestration-user.yaml + - ./apply-cluster-repositories.yaml + - ./apply-flux.yaml diff --git a/cluster/flux/orchestration/core/cert-manager.yaml b/cluster/flux/orchestration/core/cert-manager.yaml new file mode 100644 index 0000000000..ecbc3f9e4b --- /dev/null +++ b/cluster/flux/orchestration/core/cert-manager.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cert-manager-certmanager + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/cert-manager/cert-manager/base" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cert-manager-certmanager-config + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/cert-manager/cert-manager/config" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: cert-manager-porkbun-webhook + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/cert-manager/porkbun-webhook" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/orchestration/core/kube-system.yaml b/cluster/flux/orchestration/core/kube-system.yaml new file mode 100644 index 0000000000..02b12a11f2 --- /dev/null +++ b/cluster/flux/orchestration/core/kube-system.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: kube-system-cilium + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/kube-system/cilium" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/orchestration/core/kustomization.yaml b/cluster/flux/orchestration/core/kustomization.yaml new file mode 100644 index 0000000000..1f8ea19605 --- /dev/null +++ b/cluster/flux/orchestration/core/kustomization.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./cert-manager.yaml + - ./kube-system.yaml + - ./network.yaml + - ./system-monitoring.yaml + - ./storage.yaml diff --git a/cluster/flux/orchestration/core/network.yaml b/cluster/flux/orchestration/core/network.yaml new file mode 100644 index 0000000000..36b4dd320f --- /dev/null +++ b/cluster/flux/orchestration/core/network.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: network-metallb + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/network/metallb" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: network-multus + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/network/multus" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: network-ingress-nginx + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/network/ingress-nginx" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/orchestration/core/storage.yaml b/cluster/flux/orchestration/core/storage.yaml new file mode 100644 index 0000000000..2944c93038 --- /dev/null +++ b/cluster/flux/orchestration/core/storage.yaml @@ -0,0 +1,28 @@ +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: storage-csi-nfs +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/storage/democratic-csi-nfs" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: storage-csi-iscsi + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/storage/democratic-csi-iscsi" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/orchestration/core/system-monitoring.yaml b/cluster/flux/orchestration/core/system-monitoring.yaml new file mode 100644 index 0000000000..a1e56d4cc6 --- /dev/null +++ b/cluster/flux/orchestration/core/system-monitoring.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: system-monitoring-kube-prom-stack + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/system-monitoring/kube-prometheus-stack" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +# # --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-thanos +# namespace: flux-system +# spec: +# interval: 15m +# path: "./k8s/manifests/core/system-monitoring/thanos" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-grafana +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/system-monitoring/grafana" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-vector-aggregator +# namespace: flux-system +# spec: +# interval: 15m +# path: "./k8s/manifests/core/system-monitoring/vector/aggregator" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-vector-agent +# namespace: flux-system +# spec: +# interval: 15m +# path: "./k8s/manifests/core/system-monitoring/vector/agent" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-loki +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/system-monitoring/loki" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: system-monitoring-prometheus-rules +# namespace: flux-system +# spec: +# dependsOn: +# - name: system-monitoring-kube-prom-stack +# interval: 15m +# path: "./k8s/manifests/core/system-monitoring/prometheus-rules" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops diff --git a/cluster/flux/orchestration/user/databases.yaml b/cluster/flux/orchestration/user/databases.yaml new file mode 100644 index 0000000000..ec1d607bb3 --- /dev/null +++ b/cluster/flux/orchestration/user/databases.yaml @@ -0,0 +1,30 @@ +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: databases-mariadb +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/databases/mariadb" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: databases-babybuddy +# namespace: flux-system +# spec: +# dependsOn: +# - name: databases-mariadb +# interval: 10m +# path: "./cluster/manifests/databases/babybuddy" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops diff --git a/cluster/flux/orchestration/user/kube-system.yaml b/cluster/flux/orchestration/user/kube-system.yaml new file mode 100644 index 0000000000..64489b4672 --- /dev/null +++ b/cluster/flux/orchestration/user/kube-system.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: kube-system-reloader + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/kube-system/reloader" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: kube-system-metrics-server + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/kube-system/metrics-server" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: kube-system-descheduler + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/kube-system/descheduler" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: kube-system-node-feature-discovery + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/kube-system/node-feature-discovery" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: kube-system-intel-gpu-plugin +# namespace: flux-system +# spec: +# interval: 15m +# path: "./k8s/manifests/user/kube-system/intel-gpu-plugin" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops diff --git a/cluster/flux/orchestration/user/kustomization.yaml b/cluster/flux/orchestration/user/kustomization.yaml new file mode 100644 index 0000000000..f912c56f61 --- /dev/null +++ b/cluster/flux/orchestration/user/kustomization.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./databases.yaml + - ./kube-system.yaml + - ./organizarrs.yaml + # - ./media.yaml + - ./services.yaml diff --git a/cluster/flux/orchestration/user/media.yaml b/cluster/flux/orchestration/user/media.yaml new file mode 100644 index 0000000000..0b9d434fb6 --- /dev/null +++ b/cluster/flux/orchestration/user/media.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: media-plex + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/media/plex" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: media-tautulli + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/media/tautulli" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/flux/orchestration/user/organizarrs.yaml b/cluster/flux/orchestration/user/organizarrs.yaml new file mode 100644 index 0000000000..6c4d8dd33f --- /dev/null +++ b/cluster/flux/orchestration/user/organizarrs.yaml @@ -0,0 +1,168 @@ +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-prowlarr +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/prowlarr" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: organizarrs-animarr + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/organizarrs/animarr" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: organizarrs-sonarr + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/organizarrs/sonarr" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: organizarrs-radarr + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/organizarrs/radarr" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-lidarr +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/lidarr" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-readarr +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/readarr" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-gaps +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/gaps" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-overseerr +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/overseerr" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-calibre +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/calibre" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-calibre-web +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/calibre-web" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-recyclarr +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/recyclarr" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops +# --- +# apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +# kind: Kustomization +# metadata: +# name: organizarrs-openbooks +# namespace: flux-system +# spec: +# interval: 15m +# path: "./cluster/manifests/organizarrs/openbooks" +# prune: true +# wait: true +# sourceRef: +# kind: GitRepository +# name: k8s-home-ops diff --git a/cluster/flux/orchestration/user/services.yaml b/cluster/flux/orchestration/user/services.yaml new file mode 100644 index 0000000000..3efa972a27 --- /dev/null +++ b/cluster/flux/orchestration/user/services.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: services-hajimari + namespace: flux-system +spec: + interval: 15m + path: "./cluster/manifests/services/hajimari" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops +--- +apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 +kind: Kustomization +metadata: + name: services-theme-park + namespace: flux-system +spec: + interval: 5m + path: "./cluster/manifests/services/theme-park" + prune: true + wait: true + sourceRef: + kind: GitRepository + name: k8s-home-ops diff --git a/cluster/kustomization.yaml b/cluster/kustomization.yaml new file mode 100644 index 0000000000..33a4dbbff1 --- /dev/null +++ b/cluster/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./repositories/git/k8s-home-ops.yaml + - ./deploy-cluster.yaml diff --git a/cluster/manifests/cert-manager/cert-manager/base/helmrelease.yaml b/cluster/manifests/cert-manager/cert-manager/base/helmrelease.yaml new file mode 100644 index 0000000000..e8276bdba3 --- /dev/null +++ b/cluster/manifests/cert-manager/cert-manager/base/helmrelease.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 15m + chart: + spec: + chart: cert-manager + version: v1.9.1 + sourceRef: + kind: HelmRepository + name: jetstack-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 5 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 5 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + valuesFrom: + - kind: ConfigMap + name: cert-manager-values diff --git a/cluster/manifests/cert-manager/cert-manager/base/kustomization.yaml b/cluster/manifests/cert-manager/cert-manager/base/kustomization.yaml new file mode 100644 index 0000000000..a09e99e178 --- /dev/null +++ b/cluster/manifests/cert-manager/cert-manager/base/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml + - ./values.yaml diff --git a/cluster/manifests/cert-manager/cert-manager/base/values.yaml b/cluster/manifests/cert-manager/cert-manager/base/values.yaml new file mode 100644 index 0000000000..147a2b9e6f --- /dev/null +++ b/cluster/manifests/cert-manager/cert-manager/base/values.yaml @@ -0,0 +1,557 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-values + namespace: cert-manager +data: + values.yaml: |2- + + # Default values for cert-manager. + # This is a YAML-formatted file. + # Declare variables to be passed into your templates. + global: + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + # Aggregate ClusterRoles to Kubernetes default facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#facing-roles + aggregateClusterRoles: true + + podSecurityPolicy: + enabled: false + useAppArmor: true + + # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. + logLevel: 2 + + leaderElection: + # Override the namespace used to store the ConfigMap for leader election + namespace: "kube-system" + + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + + installCRDs: true + + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Comma separated list of feature gates that should be enabled on the + # controller pod. + featureGates: "" + + image: + repository: quay.io/jetstack/cert-manager-controller + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-controller + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + pullPolicy: IfNotPresent + + # Override the namespace used to store DNS provider credentials etc. for ClusterIssuer + # resources. By default, the same namespace as cert-manager is deployed within is + # used. This namespace will not be automatically created by the Helm chart. + clusterResourceNamespace: "" + + # This namespace allows you to define where the services will be installed into + # if not set then they will use the namespace of the release + # This is helpful when installing cert manager as a chart dependency (sub chart) + namespace: "" + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the controller's ServiceAccount + # labels: {} + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + # Additional command line flags to pass to cert-manager controller binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-controller: --help + extraArgs: [] + # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted + # - --enable-certificate-owner-ref=true + # Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver + # - --controllers=*,-certificaterequests-approver + + extraEnv: [] + # - name: SOME_VAR + # value: 'some value' + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + # Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the controller component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + + volumes: [] + + volumeMounts: [] + + # Optional additional annotations to add to the controller Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the controller Pods + # podAnnotations: {} + + podLabels: {} + + # Optional annotations to add to the controller Service + # serviceAnnotations: {} + + # Optional additional labels to add to the controller Service + # serviceLabels: {} + + # Optional DNS settings, useful if you have a public and private DNS zone for + # the same domain on Route 53. What follows is an example of ensuring + # cert-manager can access an ingress or DNS TXT records at all times. + # NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for + # the cluster to work. + podDnsPolicy: "None" + podDnsConfig: + nameservers: + - "1.1.1.1" + - "8.8.8.8" + + nodeSelector: + kubernetes.io/os: linux + + ingressShim: {} + # defaultIssuerName: "" + # defaultIssuerKind: "" + # defaultIssuerGroup: "" + + prometheus: + enabled: false + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + honorLabels: false + + # Use these variables to configure the HTTP_PROXY environment variables + # http_proxy: "http://proxy:8080" + # https_proxy: "https://proxy:8080" + # no_proxy: 127.0.0.1,localhost + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + # for example: + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: foo.bar.com/role + # operator: In + # values: + # - master + affinity: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + # for example: + # tolerations: + # - key: foo.bar.com/role + # operator: Equal + # value: master + # effect: NoSchedule + tolerations: [] + + webhook: + replicaCount: 1 + timeoutSeconds: 10 + + # Used to configure options for the webhook pod. + # This allows setting options that'd usually be provided via flags. + # An APIVersion and Kind must be specified in your values.yaml file. + # Flags will override options that are set here. + config: + # apiVersion: webhook.config.cert-manager.io/v1alpha1 + # kind: WebhookConfiguration + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + # This should be uncommented and set as a default by the chart once we graduate + # the apiVersion of WebhookConfiguration past v1alpha1. + # securePort: 10250 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the webhook component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the webhook component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Optional additional annotations to add to the webhook Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the webhook Pods + # podAnnotations: {} + + # Optional additional annotations to add to the webhook Service + # serviceAnnotations: {} + + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration + # mutatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration + # validatingWebhookConfigurationAnnotations: {} + + # Additional command line flags to pass to cert-manager webhook binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook: --help + extraArgs: [] + # Path to a file containing a WebhookConfiguration object used to configure the webhook + # - --config= + + resources: + requests: + cpu: 15m + memory: 381M + limits: + memory: 395M + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the Webhook Pods + podLabels: {} + + # Optional additional labels to add to the Webhook Service + serviceLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-webhook + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-webhook + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Optional additional labels to add to the webhook's ServiceAccount + # labels: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 + securePort: 10250 + + # Specifies if the webhook should be started in hostNetwork mode. + # + # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom + # CNI (such as calico), because control-plane managed by AWS cannot communicate + # with pods' IP CIDR and admission webhooks are not working + # + # Since the default port for the webhook conflicts with kubelet on the host + # network, `webhook.securePort` should be changed to an available port if + # running in hostNetwork mode. + hostNetwork: false + + # Specifies how the service should be handled. Useful if you want to expose the + # webhook to outside of the cluster. In some cases, the control plane cannot + # reach internal services. + serviceType: ClusterIP + # loadBalancerIP: + + # Overrides the mutating webhook and validating webhook so they reach the webhook + # service using the `url` field instead of a service. + url: {} + # host: + + cainjector: + enabled: true + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the cainjector component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the cainjector component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + + # Optional additional annotations to add to the cainjector Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the cainjector Pods + # podAnnotations: {} + + # Additional command line flags to pass to cert-manager cainjector binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector: --help + extraArgs: [] + # Enable profiling for cainjector + # - --enable-profiling=true + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the CA Injector Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-cainjector + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-cainjector + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the cainjector's ServiceAccount + # labels: {} + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + # This startupapicheck is a Helm post-install hook that waits for the webhook + # endpoints to become available. + # The check is implemented using a Kubernetes Job- if you are injecting mesh + # sidecar proxies into cert-manager pods, you probably want to ensure that they + # are not injected into this Job's pod. Otherwise the installation may time out + # due to the Job never being completed because the sidecar proxy does not exit. + # See https://github.com/cert-manager/cert-manager/pull/4414 for context. + startupapicheck: + enabled: true + + # Pod Security Context to be set on the startupapicheck component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + + # Container Security Context to be set on the controller component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Timeout for 'kubectl check api' command + timeout: 1m + + # Job backoffLimit + backoffLimit: 4 + + # Optional additional annotations to add to the startupapicheck Job + jobAnnotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "1" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Optional additional annotations to add to the startupapicheck Pods + # podAnnotations: {} + + # Additional command line flags to pass to startupapicheck binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl: --help + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the startupapicheck Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-ctl + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-ctl + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + rbac: + # annotations for the startup API Check job RBAC and PSP resources + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + serviceAccount: + # Specifies whether a service account should be created + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + + # Optional additional annotations to add to the Job's ServiceAccount + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Optional additional labels to add to the startupapicheck's ServiceAccount + # labels: {} diff --git a/cluster/manifests/cert-manager/cert-manager/config/clusterissuer.yaml b/cluster/manifests/cert-manager/cert-manager/config/clusterissuer.yaml new file mode 100644 index 0000000000..e067eb4e21 --- /dev/null +++ b/cluster/manifests/cert-manager/cert-manager/config/clusterissuer.yaml @@ -0,0 +1,45 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt +spec: + acme: + server: https://acme-v02.api.letsencrypt.org/directory + email: ${ACME_EMAIL} + privateKeySecretRef: + name: letsencrypt-key + solvers: + - selector: + dnsZones: + - ${EXTERNAL_DOMAIN} + dns01: + webhook: + groupName: acme.${EXTERNAL_DOMAIN} + solverName: porkbun + config: + apiKeySecretRef: + name: porkbun-key + key: api-key + secretKeySecretRef: + name: porkbun-key + key: secret-key +# --- +# apiVersion: cert-manager.io/v1 +# kind: ClusterIssuer +# metadata: +# name: letsencrypt-production +# spec: +# acme: +# email: acme@${EXTERNAL_DOMAIN} +# preferredChain: "" +# privateKeySecretRef: +# name: letsencrypt-production +# server: https://acme-v02.api.letsencrypt.org/directory +# solvers: +# - selector: {} +# dns01: +# cloudflare: +# apiTokenSecretRef: +# key: api-token +# name: cloudflare-api-token-secret +# email: acme@${EXTERNAL_DOMAIN} diff --git a/cluster/manifests/cert-manager/cert-manager/config/kustomization.yaml b/cluster/manifests/cert-manager/cert-manager/config/kustomization.yaml new file mode 100644 index 0000000000..a5bdb7ad07 --- /dev/null +++ b/cluster/manifests/cert-manager/cert-manager/config/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./clusterissuer.yaml diff --git a/cluster/manifests/cert-manager/porkbun-webhook/helmrelease.yaml b/cluster/manifests/cert-manager/porkbun-webhook/helmrelease.yaml new file mode 100644 index 0000000000..0df8dc10d8 --- /dev/null +++ b/cluster/manifests/cert-manager/porkbun-webhook/helmrelease.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: porkbun-webhook + namespace: cert-manager +spec: + interval: 15m + chart: + spec: + chart: ./deploy/porkbun-webhook + version: 0.1.2 + sourceRef: + kind: GitRepository + name: porkbun-webhook + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 5 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 5 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + # The GroupName here is used to identify your company or business unit that + # created this webhook. + # For example, this may be "acme.mycompany.com". + # This name will need to be referenced in each Issuer's `webhook` stanza to + # inform cert-manager of where to send ChallengePayload resources in order to + # solve the DNS01 challenge. + # This group name should be **unique**, hence using your own company's domain + # here is recommended. + groupName: acme.${EXTERNAL_DOMAIN} + + certManager: + namespace: cert-manager + serviceAccountName: cert-manager + + image: + repository: ghcr.io/mdonoughe/porkbun-webhook + tag: v0.1.2 + pullPolicy: IfNotPresent + + nameOverride: "" + fullnameOverride: "" + + service: + type: ClusterIP + port: 443 + + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + + nodeSelector: {} + + tolerations: [] + + affinity: {} diff --git a/cluster/manifests/cert-manager/porkbun-webhook/kustomization.yaml b/cluster/manifests/cert-manager/porkbun-webhook/kustomization.yaml new file mode 100644 index 0000000000..b5b97586cf --- /dev/null +++ b/cluster/manifests/cert-manager/porkbun-webhook/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./rbac.yaml + - ./helmrelease.yaml diff --git a/cluster/manifests/cert-manager/porkbun-webhook/rbac.yaml b/cluster/manifests/cert-manager/porkbun-webhook/rbac.yaml new file mode 100644 index 0000000000..0ddca4b04e --- /dev/null +++ b/cluster/manifests/cert-manager/porkbun-webhook/rbac.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: porkbun-key:reader + namespace: cert-manager +rules: + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["porkbun-key"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: porkbun-webhook:key-reader + namespace: cert-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: porkbun-key:reader +subjects: + - apiGroup: "" + kind: ServiceAccount + name: porkbun-webhook diff --git a/cluster/manifests/databases/babybuddy/babybuddy.yaml b/cluster/manifests/databases/babybuddy/babybuddy.yaml new file mode 100644 index 0000000000..1629592484 --- /dev/null +++ b/cluster/manifests/databases/babybuddy/babybuddy.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: babybuddy + namespace: databases + labels: + app: babybuddy +spec: + type: LoadBalancer + loadBalancerIP: ${LB_BABYBUDDY} + externalTrafficPolicy: Local + ports: + - port: 80 + targetPort: 8000 + protocol: TCP + name: babybuddy + selector: + app: babybuddy +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: babybuddy + namespace: databases + labels: + app: babybuddy +spec: + replicas: 1 + revisionHistoryLimit: 1 + serviceName: babybuddy + selector: + matchLabels: + app: babybuddy + template: + metadata: + labels: + app: babybuddy + spec: + containers: + - name: babybuddy + image: "lscr.io/linuxserver/babybuddy:1.12.2" + imagePullPolicy: IfNotPresent + env: + - name: PUID + value: "1000" + - name: GUID + value: "1000" + - name: TZ + value: 'America/New_York' + # - name: DB_ENGINE + # value: "django.db.backends.mysql" + # - name: DB_HOST + # value: "${LB_MARIADB}" + # - name: DB_NAME + # value: babybuddy + # - name: DB_PASSWORD + # value: "${SECRET_MARIADB_ROOT_PASSWORD}" + # - name: DB_USER + # value: root + # - name: DB_PORT + # value: "3306" + ports: + - containerPort: 8000 + name: web + volumeMounts: + - mountPath: /config + name: config + resources: {} + volumes: + - name: config + persistentVolumeClaim: + claimName: babybuddy-config-v1 diff --git a/cluster/manifests/databases/babybuddy/config-pvc.yaml b/cluster/manifests/databases/babybuddy/config-pvc.yaml new file mode 100644 index 0000000000..f0e0199657 --- /dev/null +++ b/cluster/manifests/databases/babybuddy/config-pvc.yaml @@ -0,0 +1,15 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: babybuddy-config-v1 + namespace: databases + labels: + kasten.io/backup-volume: "enabled" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 1Gi diff --git a/cluster/manifests/databases/babybuddy/kustomization.yaml b/cluster/manifests/databases/babybuddy/kustomization.yaml new file mode 100644 index 0000000000..6c604e23c1 --- /dev/null +++ b/cluster/manifests/databases/babybuddy/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - babybuddy.yaml + - config-pvc.yaml diff --git a/cluster/manifests/databases/mariadb/config-pvc.yaml b/cluster/manifests/databases/mariadb/config-pvc.yaml new file mode 100644 index 0000000000..dbd9d098a3 --- /dev/null +++ b/cluster/manifests/databases/mariadb/config-pvc.yaml @@ -0,0 +1,15 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: mariadb-config-v1 + namespace: databases + labels: + kasten.io/backup-volume: "enabled" +spec: + accessModes: + - ReadWriteOnce + storageClassName: tank-iscsi-csi + resources: + requests: + storage: 10Gi diff --git a/cluster/manifests/databases/mariadb/helmrelease.yaml b/cluster/manifests/databases/mariadb/helmrelease.yaml new file mode 100644 index 0000000000..96341c2332 --- /dev/null +++ b/cluster/manifests/databases/mariadb/helmrelease.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: mariadb + namespace: databases +spec: + interval: 15m + chart: + spec: + chart: mariadb + version: 11.1.8 + sourceRef: + kind: HelmRepository + name: bitnami-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + # Values link: https://github.com/bitnami/charts/blob/master/bitnami/mariadb/values.yaml + values: + auth: + rootPassword: ${SECRET_MARIADB_ROOT_PASSWORD} + primary: + # annotations: + # kanister.kasten.io/blueprint: bitnami-maraidb # FIXME + persistence: + enabled: true + existingClaim: mariadb-config-v1 + service: + enabled: true + type: LoadBalancer + loadBalancerIP: ${LB_MARIADB} + # metrics: + # enabled: true + # serviceMonitor: + # enabled: true + + # resources: + # requests: + # cpu: 10m + # memory: 32M + # limits: + # memory: 32M + + resources: + requests: + cpu: 10m + memory: 183M + limits: + memory: 237M diff --git a/cluster/manifests/databases/mariadb/kustomization.yaml b/cluster/manifests/databases/mariadb/kustomization.yaml new file mode 100644 index 0000000000..f930cca0cb --- /dev/null +++ b/cluster/manifests/databases/mariadb/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml + - config-pvc.yaml diff --git a/cluster/manifests/kube-system/cilium/helmrelease.yaml b/cluster/manifests/kube-system/cilium/helmrelease.yaml new file mode 100644 index 0000000000..88e4f96091 --- /dev/null +++ b/cluster/manifests/kube-system/cilium/helmrelease.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: cilium + namespace: kube-system + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm +spec: + interval: 15m + chart: + spec: + chart: cilium + version: 1.12.1 + sourceRef: + kind: HelmRepository + name: cilium-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + cluster: + name: "${CLUSTER_NAME}" + id: "${CLUSTER_ID}" + + rollOutCiliumPods: true + + localRedirectPolicy: true + + kubeProxyReplacement: "strict" + kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 + + ipv4NativeRoutingCIDR: ${NETWORK_K8S_POD_CIDR} + + k8sServiceHost: 10.0.0.15 + k8sServicePort: 6443 + + loadBalancer: + algorithm: "maglev" + mode: "dsr" + + tunnel: "disabled" + + autoDirectNodeRoutes: true + + endpointRoutes: + enabled: true + + ipam: + mode: "kubernetes" + + operator: + rollOutPods: true + + containerRuntime: + integration: crio + socketPath: 'unix:///run/crio/crio.sock' + + hubble: + enabled: true + serviceMonitor: + enabled: true + metrics: + enabled: + - dns:query;ignoreAAAA + - drop + - tcp + - flow + - port-distribution + - icmp + - http + relay: + enabled: true + rollOutPods: true + ui: + enabled: true + rollOutPods: true + + bgp: + enabled: false + + externalIPs: + enabled: false diff --git a/cluster/manifests/kube-system/cilium/kustomization.yaml b/cluster/manifests/kube-system/cilium/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/kube-system/cilium/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/kube-system/descheduler/helmrelease.yaml b/cluster/manifests/kube-system/descheduler/helmrelease.yaml new file mode 100644 index 0000000000..6e297d40e8 --- /dev/null +++ b/cluster/manifests/kube-system/descheduler/helmrelease.yaml @@ -0,0 +1,95 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: descheduler + namespace: kube-system +spec: + interval: 5m + chart: + spec: + chart: descheduler + version: 0.24.1 + sourceRef: + kind: HelmRepository + name: descheduler-charts + namespace: flux-system + interval: 5m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + kind: Deployment + replicas: 3 + + leaderElection: + enabled: true + # leaseDuration: 15 + # renewDeadline: 10 + # retryPeriod: 2 + # resourceLock: "leases" + # resourceName: "descheduler" + # resourceNamescape: "kube-system" + + podAnnotations: + botkube.io/disable: "true" + configmap.reloader.stakater.com/reload: "descheduler" + + deschedulerPolicy: + strategies: + RemoveDuplicates: + enabled: true + + RemovePodsViolatingNodeTaints: + enabled: true + + RemovePodsViolatingNodeAffinity: + enabled: true + params: + nodeAffinityType: + - requiredDuringSchedulingIgnoredDuringExecution + + RemovePodsViolatingTopologySpreadConstraint: + enabled: true + params: + includeSoftConstraints: true + + RemovePodsViolatingInterPodAntiAffinity: + enabled: true + params: + nodeFit: true + + LowNodeUtilization: + enabled: false + + RemoveFailedPods: + enabled: true + params: + failedPods: + # reasons: + # - OutOfcpu + # - CreateContainerConfigError + includingInitContainers: true + excludeOwnerKinds: + - Job + minPodLifetimeSeconds: 3600 + + RemovePodsHavingTooManyRestarts: + enabled: true + params: + podsHavingTooManyRestarts: + podRestartThreshold: 10 + includingInitContainers: true + + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M diff --git a/cluster/manifests/kube-system/descheduler/kustomization.yaml b/cluster/manifests/kube-system/descheduler/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/kube-system/descheduler/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/kube-system/metrics-server/helmrelease.yaml b/cluster/manifests/kube-system/metrics-server/helmrelease.yaml new file mode 100644 index 0000000000..5344d058e3 --- /dev/null +++ b/cluster/manifests/kube-system/metrics-server/helmrelease.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: metrics-server + namespace: kube-system +spec: + interval: 5m + chart: + spec: + chart: metrics-server + version: 3.8.2 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-metrics-server-charts + namespace: flux-system + interval: 5m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + + values: + image: + repository: k8s.gcr.io/metrics-server/metrics-server + tag: v0.6.1 + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M diff --git a/cluster/manifests/kube-system/metrics-server/kustomization.yaml b/cluster/manifests/kube-system/metrics-server/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/kube-system/metrics-server/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/kube-system/node-feature-discovery/helmrelease.yaml b/cluster/manifests/kube-system/node-feature-discovery/helmrelease.yaml new file mode 100644 index 0000000000..255819370f --- /dev/null +++ b/cluster/manifests/kube-system/node-feature-discovery/helmrelease.yaml @@ -0,0 +1,109 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: node-feature-discovery + namespace: kube-system +spec: + interval: 5m + chart: + spec: + chart: node-feature-discovery + version: 0.11.1 + sourceRef: + kind: HelmRepository + name: kubernetes-sigs-nfd-charts + namespace: flux-system + interval: 5m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + master: + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + + resources: + requests: + cpu: 23m + memory: 64M + limits: + memory: 64M + + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: In + values: [""] + + worker: + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M + + config: + core: + sources: + - custom + - pci + - usb + sources: + usb: + deviceClassWhitelist: + - "02" + - "0e" + - "ef" + - "fe" + - "ff" + deviceLabelFields: + - "class" + - "vendor" + - "device" + custom: + - name: "alarmdecoder" + matchOn: + - usbId: + class: ["ff"] + vendor: ["0403"] + device: ["6015"] + - name: "zwave" + matchOn: + - usbId: + class: ["02"] + vendor: ["0658"] + device: ["0200"] + - name: "coral-tpu" + matchOn: + - usbId: + vendor: ["1a6e", "18d1"] + - name: "intel-gpu" + matchOn: + - pciId: + class: ["0300"] + vendor: ["8086"] + + annotations: + configmap.reloader.stakater.com/reload: "nfd-worker-conf" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" diff --git a/cluster/manifests/kube-system/node-feature-discovery/kustomization.yaml b/cluster/manifests/kube-system/node-feature-discovery/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/kube-system/node-feature-discovery/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/kube-system/reloader/helmrelease.yaml b/cluster/manifests/kube-system/reloader/helmrelease.yaml new file mode 100644 index 0000000000..849f2fbcb1 --- /dev/null +++ b/cluster/manifests/kube-system/reloader/helmrelease.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: reloader + namespace: kube-system +spec: + interval: 15m + chart: + spec: + chart: reloader + version: v0.0.118 + sourceRef: + kind: HelmRepository + name: stakater-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + nameOverride: reloader + + fullnameOverride: reloader + + reloader: + deployment: + image: + name: ghcr.io/k8s-at-home/reloader + podMonitor: + enabled: true + namespace: kube-system + + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M diff --git a/cluster/manifests/kube-system/reloader/kustomization.yaml b/cluster/manifests/kube-system/reloader/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/kube-system/reloader/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/media/plex/config-pvc.yaml b/cluster/manifests/media/plex/config-pvc.yaml new file mode 100644 index 0000000000..10d4f561bd --- /dev/null +++ b/cluster/manifests/media/plex/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: plex-config-v1 + namespace: media + labels: + app.kubernetes.io/name: &name plex + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 80Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/media/plex/helmrelease.yaml b/cluster/manifests/media/plex/helmrelease.yaml new file mode 100644 index 0000000000..bd2b105140 --- /dev/null +++ b/cluster/manifests/media/plex/helmrelease.yaml @@ -0,0 +1,78 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: plex + namespace: media +spec: + releaseName: plex + interval: 15m + chart: + spec: + chart: plex + version: 6.4.3 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + # Values link: https://github.com/k8s-at-home/charts/blob/master/charts/stable/plex/values.yaml + image: + repository: ghcr.io/k8s-at-home/plex + tag: v1.28.1.6104-788f82488 + env: + TZ: "${TZ}" + ADVERTISE_IP: "https://plex.${EXTERNAL_DOMAIN}:443/,http://${LB_PLEX}:32400/" + PLEX_CLAIM: "${SECRET_PLEX_TOKEN}" + + podSecurityContext: + supplementalGroups: + - 44 + - 109 + - 100 + - 65539 + service: + main: + enabled: true + type: LoadBalancer + loadBalancerIP: ${LB_PLEX} + externalTrafficPolicy: Local + annotations: + metallb.universe.tf/allow-shared-ip: plex + + persistence: + config: + enabled: true + existingClaim: plex-config-v1 + + transcode: + enabled: true + type: emptyDir + + nfs-media: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/media + mountPath: /data + readOnly: false + resources: + requests: + cpu: 2 + memory: 2000M + # gpu.intel.com/i915: 1 + limits: + memory: 10000M + # gpu.intel.com/i915: 1 diff --git a/cluster/manifests/media/plex/kustomization.yaml b/cluster/manifests/media/plex/kustomization.yaml new file mode 100644 index 0000000000..4ed091d81e --- /dev/null +++ b/cluster/manifests/media/plex/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config-pvc.yaml + - ./helmrelease.yaml diff --git a/cluster/manifests/media/tautulli/config-pvc.yaml b/cluster/manifests/media/tautulli/config-pvc.yaml new file mode 100644 index 0000000000..a61c55a565 --- /dev/null +++ b/cluster/manifests/media/tautulli/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: tautulli-config-v1 + namespace: media + labels: + app.kubernetes.io/name: &name tautulli + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 10Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/media/tautulli/helmrelease.yaml b/cluster/manifests/media/tautulli/helmrelease.yaml new file mode 100644 index 0000000000..ec7778ee29 --- /dev/null +++ b/cluster/manifests/media/tautulli/helmrelease.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: tautulli + namespace: media +spec: + interval: 15m + chart: + spec: + chart: tautulli + version: 11.4.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + image: + repository: ghcr.io/k8s-at-home/tautulli + tag: v2.10.3 + + env: + TZ: "${TZ}" + + persistence: + config: + enabled: true + existingClaim: tautulli-config-v1 + + resources: + requests: + cpu: 92m + memory: 249M + limits: + memory: 300M diff --git a/cluster/manifests/media/tautulli/kustomization.yaml b/cluster/manifests/media/tautulli/kustomization.yaml new file mode 100644 index 0000000000..1499689e84 --- /dev/null +++ b/cluster/manifests/media/tautulli/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - config-pvc.yaml + - helmrelease.yaml diff --git a/cluster/manifests/network/ingress-nginx/dashboard/kustomization.yaml b/cluster/manifests/network/ingress-nginx/dashboard/kustomization.yaml new file mode 100644 index 0000000000..b1b2dabdef --- /dev/null +++ b/cluster/manifests/network/ingress-nginx/dashboard/kustomization.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +configMapGenerator: + - name: nginx-dashboard + files: + - nginx-dashboard.json=https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json +generatorOptions: + disableNameSuffixHash: true + annotations: + kustomize.toolkit.fluxcd.io/substitute: disabled + grafana_folder: Ingress NGINX + labels: + grafana_dashboard: "true" diff --git a/cluster/manifests/network/ingress-nginx/helmrelease.yaml b/cluster/manifests/network/ingress-nginx/helmrelease.yaml new file mode 100644 index 0000000000..626ef7fc1f --- /dev/null +++ b/cluster/manifests/network/ingress-nginx/helmrelease.yaml @@ -0,0 +1,112 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: ingress-nginx + namespace: network +spec: + interval: 15m + chart: + spec: + chart: ingress-nginx + version: 4.2.1 + sourceRef: + kind: HelmRepository + name: ingress-nginx-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + # dependsOn: + # - name: kube-prometheus-stack + # namespace: system-monitoring + values: + controller: + replicaCount: 3 + extraEnvs: + - name: TZ + value: "${TZ}" + + service: + loadBalancerIP: "${LB_NGINX_INGRESS}" + externalTrafficPolicy: Local + + publishService: + enabled: true + + ingressClassResource: + default: true + + config: + client-header-timeout: 120 + client-body-buffer-size: "100M" + client-body-timeout: 120 + # custom-http-errors: >- + # 401,403,404,500,501,502,503 + enable-brotli: "true" + forwarded-for-header: "CF-Connecting-IP" + hsts-max-age: "31449600" + keep-alive: 120 + keep-alive-requests: 10000 + proxy-body-size: "100M" + ssl-protocols: "TLSv1.3 TLSv1.2" + use-forwarded-headers: "true" + # The below X-Clacks-Overehead is a do-nothing header + # That is simply a memoriam for Sir Terry Pratchett + # Ref: https://xclacksoverhead.org/home/about + location-snippet: | + add_header X-Clacks-Overhead "GNU Terry Pratchett" always; + # metrics: + # enabled: true + # serviceMonitor: + # enabled: true + # namespace: network + # namespaceSelector: + # any: true + # prometheusRule: + # enabled: false + + # extraArgs: + # default-ssl-certificate: "network/${CLUSTER_DOMAIN/./-}-tls" + + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + app.kubernetes.io/instance: ingress-nginx + + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + + # podAnnotations: + # configmap.reloader.stakater.com/reload: "cloudflare-proxied-networks" + + resources: + requests: + memory: 250Mi + cpu: 20m + limits: + memory: 1000Mi + + defaultBackend: + enabled: true + image: + repository: ghcr.io/tarampampam/error-pages + tag: 2.18.0 + extraEnvs: + - name: TEMPLATE_NAME + value: cats + - name: SHOW_DETAILS + value: "false" diff --git a/cluster/manifests/network/ingress-nginx/kustomization.yaml b/cluster/manifests/network/ingress-nginx/kustomization.yaml new file mode 100644 index 0000000000..8cc8dfe9e9 --- /dev/null +++ b/cluster/manifests/network/ingress-nginx/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: network +resources: + # - dashboard + - helmrelease.yaml diff --git a/cluster/manifests/network/metallb/helmrelease.yaml b/cluster/manifests/network/metallb/helmrelease.yaml new file mode 100644 index 0000000000..cef072bdf7 --- /dev/null +++ b/cluster/manifests/network/metallb/helmrelease.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: metallb + namespace: network +spec: + interval: 15m + chart: + spec: + chart: metallb + version: 0.13.4 + sourceRef: + kind: HelmRepository + name: metallb-charts + namespace: flux-system + interval: 15m + values: + prometheus: + podMonitor: + enabled: false + prometheusRule: + enabled: false + + speaker: + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + # frr: + # enabled: true + # image: + # repository: docker.io/frrouting/frr + # tag: v8.3.0 + + crds: + enabled: true + + postRenderers: + - kustomize: + patchesStrategicMerge: + - $patch: delete + apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + metadata: + name: metallb-webhook-configuration diff --git a/cluster/manifests/network/metallb/kustomization.yaml b/cluster/manifests/network/metallb/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/network/metallb/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/network/multus/helmrelease.yaml b/cluster/manifests/network/multus/helmrelease.yaml new file mode 100644 index 0000000000..fe940696b5 --- /dev/null +++ b/cluster/manifests/network/multus/helmrelease.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: multus + namespace: network +spec: + interval: 15m + chart: + spec: + chart: multus + version: 3.5.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + values: + image: + repository: ghcr.io/k8snetworkplumbingwg/multus-cni + tag: v3.9.1 + + cni: + image: + repository: ghcr.io/k8s-at-home/cni-plugins + tag: v1.1.1 + + paths: + config: /etc/cni/net.d + bin: /opt/cni/bin + + version: "0.3.1" diff --git a/cluster/manifests/network/multus/kustomization.yaml b/cluster/manifests/network/multus/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/network/multus/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/organizarrs/animarr/config-pvc.yaml b/cluster/manifests/organizarrs/animarr/config-pvc.yaml new file mode 100644 index 0000000000..346ec62942 --- /dev/null +++ b/cluster/manifests/organizarrs/animarr/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: animarr-config-v1 + namespace: organizarrs + labels: + app.kubernetes.io/name: &name animarr + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 10Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/organizarrs/animarr/helmrelease.yaml b/cluster/manifests/organizarrs/animarr/helmrelease.yaml new file mode 100644 index 0000000000..6f3d8970af --- /dev/null +++ b/cluster/manifests/organizarrs/animarr/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: animarr + namespace: organizarrs +spec: + interval: 15m + chart: + spec: + chart: sonarr + version: 16.3.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + + valuesFrom: + - kind: ConfigMap + name: animarr-values diff --git a/cluster/manifests/organizarrs/animarr/kustomization.yaml b/cluster/manifests/organizarrs/animarr/kustomization.yaml new file mode 100644 index 0000000000..f08fdbb571 --- /dev/null +++ b/cluster/manifests/organizarrs/animarr/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config-pvc.yaml + - ./helmrelease.yaml + - ./values.yaml diff --git a/cluster/manifests/organizarrs/animarr/values.yaml b/cluster/manifests/organizarrs/animarr/values.yaml new file mode 100644 index 0000000000..b497078192 --- /dev/null +++ b/cluster/manifests/organizarrs/animarr/values.yaml @@ -0,0 +1,124 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: animarr-values + namespace: organizarrs +data: + values.yaml: |- + + controllerType: deployment + image: + repository: ghcr.io/k8s-at-home/sonarr + tag: v3.0.9.1549 + + env: + TZ: "${TZ}" + + service: + main: + type: LoadBalancer + loadBalancerIP: ${LB_ANIMARR} + + ingress: + main: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + # nginx.ingress.kubernetes.io/configuration-snippet: | + # proxy_set_header Accept-Encoding ""; + # sub_filter '' ''; + # sub_filter_once on; + hajimari.io/enable: "true" + hajimari.io/icon: "television-box" + hajimari.io/appName: animarr + hosts: + - host: &host "animarr.${EXTERNAL_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: &tls tls.animarr + + api: + enabled: true + ingressClassName: "nginx" + annotations: + external-dns.home.arpa/enabled: "true" + external-dns.alpha.kubernetes.io/target: ipv4.${EXTERNAL_DOMAIN} + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" + hosts: + - host: *host + paths: + - path: /api + pathType: Prefix + tls: + - hosts: + - *host + secretName: *tls + + persistence: + config: + enabled: true + existingClaim: animarr-config-v1 + + nfs-nas-media: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/media + mountPath: /media + readOnly: false + + nfs-nas-backup: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/backup/kubernetes/animarr + mountPath: /config/Backups + readOnly: false + + nfs-nas-bittorrent: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /smoltank/bittorrent + mountPath: /bittorrent + readOnly: false + + podSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + + resources: + requests: + cpu: 10m + memory: 477M + limits: + memory: 1300M + + # metrics: + # enabled: true + # serviceMonitor: + # interval: 3m + # scrapeTimeout: 1m + # prometheusRule: + # enabled: true + # exporter: + # image: + # repository: ghcr.io/onedr0p/exportarr + # tag: v1.1.0 + # env: + # port: 9794 + # additionalMetrics: true + # unknownQueueItems: false diff --git a/cluster/manifests/organizarrs/radarr/config-pvc.yaml b/cluster/manifests/organizarrs/radarr/config-pvc.yaml new file mode 100644 index 0000000000..3d062d2358 --- /dev/null +++ b/cluster/manifests/organizarrs/radarr/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: radarr-config-v1 + namespace: organizarrs + labels: + app.kubernetes.io/name: &name radarr + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 15Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/organizarrs/radarr/helmrelease.yaml b/cluster/manifests/organizarrs/radarr/helmrelease.yaml new file mode 100644 index 0000000000..8b1989ee5f --- /dev/null +++ b/cluster/manifests/organizarrs/radarr/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: radarr + namespace: organizarrs +spec: + interval: 15m + chart: + spec: + chart: radarr + version: 16.3.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + + valuesFrom: + - kind: ConfigMap + name: radarr-values diff --git a/cluster/manifests/organizarrs/radarr/kustomization.yaml b/cluster/manifests/organizarrs/radarr/kustomization.yaml new file mode 100644 index 0000000000..f08fdbb571 --- /dev/null +++ b/cluster/manifests/organizarrs/radarr/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config-pvc.yaml + - ./helmrelease.yaml + - ./values.yaml diff --git a/cluster/manifests/organizarrs/radarr/values.yaml b/cluster/manifests/organizarrs/radarr/values.yaml new file mode 100644 index 0000000000..9f8be43858 --- /dev/null +++ b/cluster/manifests/organizarrs/radarr/values.yaml @@ -0,0 +1,130 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: radarr-values + namespace: organizarrs +data: + values.yaml: |- + + image: + repository: ghcr.io/k8s-at-home/radarr + tag: v4.1.0.6175 + + env: + TZ: ${TZ} + + service: + main: + type: LoadBalancer + loadBalancerIP: ${LB_RADARR} + + ingress: + main: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header Accept-Encoding ""; + sub_filter '' ''; + sub_filter_once on; + hajimari.io/enable: "true" + hajimari.io/icon: "filmstrip" + hosts: + - host: &host "radarr.${EXTERNAL_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: &tls tls.radarr + + api: + enabled: true + ingressClassName: "nginx" + annotations: + external-dns.home.arpa/enabled: "true" + external-dns.alpha.kubernetes.io/target: ipv4.${EXTERNAL_DOMAIN} + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" + hosts: + - host: *host + paths: + - path: /api + pathType: Prefix + tls: + - hosts: + - *host + secretName: *tls + + persistence: + config: + enabled: true + existingClaim: radarr-config-v1 + + nfs-nas-media: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/media + mountPath: /media + readOnly: false + + nfs-nas-backup: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/backup/kubernetes/radarr + mountPath: /config/Backups + readOnly: false + + nfs-nas-bittorrent: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /smoltank/bittorrent + mountPath: /bittorrent + readOnly: false + + podSecurityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + supplementalGroups: + - 5600 + + resources: + requests: + cpu: 10m + memory: 512M + limits: + memory: 974M + + # prometheus: + # podMonitor: + # enabled: true + # interval: 3m + # scrapeTimeout: 1m30s + + # metrics: + # enabled: true + # serviceMonitor: + # interval: 3m + # scrapeTimeout: 1m + # prometheusRule: + # enabled: true + # exporter: + # image: + # repository: ghcr.io/onedr0p/exportarr + # tag: v1.1.0 + # env: + # port: 9794 + # additionalMetrics: true + # unknownQueueItems: false diff --git a/cluster/manifests/organizarrs/sonarr/config-pvc.yaml b/cluster/manifests/organizarrs/sonarr/config-pvc.yaml new file mode 100644 index 0000000000..ec9cc208d8 --- /dev/null +++ b/cluster/manifests/organizarrs/sonarr/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sonarr-config-v1 + namespace: organizarrs + labels: + app.kubernetes.io/name: &name sonarr + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 10Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/organizarrs/sonarr/helmrelease.yaml b/cluster/manifests/organizarrs/sonarr/helmrelease.yaml new file mode 100644 index 0000000000..349cdd3611 --- /dev/null +++ b/cluster/manifests/organizarrs/sonarr/helmrelease.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: sonarr + namespace: organizarrs +spec: + interval: 15m + chart: + spec: + chart: sonarr + version: 16.3.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + + valuesFrom: + - kind: ConfigMap + name: sonarr-values diff --git a/cluster/manifests/organizarrs/sonarr/kustomization.yaml b/cluster/manifests/organizarrs/sonarr/kustomization.yaml new file mode 100644 index 0000000000..f08fdbb571 --- /dev/null +++ b/cluster/manifests/organizarrs/sonarr/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./config-pvc.yaml + - ./helmrelease.yaml + - ./values.yaml diff --git a/cluster/manifests/organizarrs/sonarr/values.yaml b/cluster/manifests/organizarrs/sonarr/values.yaml new file mode 100644 index 0000000000..3d8ed6e8f3 --- /dev/null +++ b/cluster/manifests/organizarrs/sonarr/values.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sonarr-values + namespace: organizarrs +data: + values.yaml: |- + + controllerType: deployment + image: + repository: ghcr.io/k8s-at-home/sonarr + tag: v3.0.9.1549 + + env: + TZ: "${TZ}" + + service: + main: + type: LoadBalancer + loadBalancerIP: ${LB_SONARR} + + ingress: + main: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + # nginx.ingress.kubernetes.io/configuration-snippet: | + # proxy_set_header Accept-Encoding ""; + # sub_filter '' ''; + # sub_filter_once on; + hajimari.io/enable: "true" + hajimari.io/icon: "television-box" + hajimari.io/appName: sonarr + hosts: + - host: &host "sonarr.${EXTERNAL_DOMAIN}" + paths: + - path: / + pathType: Prefix + tls: + - hosts: + - *host + secretName: &tls tls.sonarr + + api: + enabled: true + ingressClassName: "nginx" + annotations: + external-dns.home.arpa/enabled: "true" + external-dns.alpha.kubernetes.io/target: ipv4.${EXTERNAL_DOMAIN} + external-dns.alpha.kubernetes.io/cloudflare-proxied: "true" + hosts: + - host: *host + paths: + - path: /api + pathType: Prefix + tls: + - hosts: + - *host + secretName: *tls + + persistence: + config: + enabled: true + existingClaim: sonarr-config-v1 + + nfs-nas-media: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/media + mountPath: /media + readOnly: false + + nfs-nas-bittorrent: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /smoltank/bittorrent + mountPath: /bittorrent + readOnly: false + + nfs-nas-backup: + enabled: true + type: custom + volumeSpec: + nfs: + server: ${UIHARU_IP} + path: /tank/backup/kubernetes/sonarr + mountPath: /config/Backups + readOnly: false diff --git a/cluster/manifests/services/hajimari/helmrelease.yaml b/cluster/manifests/services/hajimari/helmrelease.yaml new file mode 100644 index 0000000000..96c2d0ec8a --- /dev/null +++ b/cluster/manifests/services/hajimari/helmrelease.yaml @@ -0,0 +1,86 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: hajimari + namespace: services +spec: + interval: 15m + chart: + spec: + chart: hajimari + version: 1.2.0 + sourceRef: + kind: HelmRepository + name: hajimari-charts + namespace: flux-system + interval: 15m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + + service: + main: + type: LoadBalancer + loadBalancerIP: ${LB_HAJIMARI} + + + image: + repository: ghcr.io/toboshii/hajimari + tag: v0.2.0 + + env: + TZ: "${TZ}" + + hajimari: + defaultEnable: false + namespaceSelector: + any: true + + name: "Chris" + groups: + - name: Git + links: + - name: k8s-home-ops + url: https://github.com/cbc02009/k8s-home-ops + # - name: Trux Home Assistant + # url: https://github.com/Truxnell/hass-config + - name: k@h charts + url: https://github.com/k8s-at-home/charts/tree/master/charts + + podAnnotations: + configmap.reloader.stakater.com/reload: "hajimari-settings" + + ingress: + main: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + prometheus.io/probe: "true" + prometheus.io/protocol: http + + hosts: + - host: "hajimari.${EXTERNAL_DOMAIN}" + paths: + - path: / + pathType: Prefix + + tls: + - secretName: tls.hajimari + hosts: + - "hajimari.${EXTERNAL_DOMAIN}" + + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M diff --git a/cluster/manifests/services/hajimari/kustomization.yaml b/cluster/manifests/services/hajimari/kustomization.yaml new file mode 100644 index 0000000000..842719e954 --- /dev/null +++ b/cluster/manifests/services/hajimari/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml + # - probe.yaml diff --git a/cluster/manifests/services/theme-park/helmrelease.yaml b/cluster/manifests/services/theme-park/helmrelease.yaml new file mode 100644 index 0000000000..5ff000c630 --- /dev/null +++ b/cluster/manifests/services/theme-park/helmrelease.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: theme-park + namespace: services +spec: + interval: 5m + chart: + spec: + chart: theme-park + version: 1.2.2 + sourceRef: + kind: HelmRepository + name: k8s-at-home-charts + namespace: flux-system + interval: 5m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 3 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 3 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + values: + image: + repository: ghcr.io/k8s-at-home/theme-park + tag: v1.10.1 + + nameOverride: "theme-park" + + env: + TZ: "${TZ}" + + service: + main: + ports: + http: + enabled: true + port: 8080 + + ingress: + main: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + + hosts: + - host: &host "theme-park.${EXTERNAL_DOMAIN}" + paths: + - path: / + + tls: + - secretName: tls.theme-park + hosts: + - *host + + resources: + requests: + cpu: 15m + memory: 64M + limits: + memory: 64M diff --git a/cluster/manifests/services/theme-park/kustomization.yaml b/cluster/manifests/services/theme-park/kustomization.yaml new file mode 100644 index 0000000000..5dd7baca73 --- /dev/null +++ b/cluster/manifests/services/theme-park/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./helmrelease.yaml diff --git a/cluster/manifests/storage/democratic-csi-iscsi/helmrelease.yaml b/cluster/manifests/storage/democratic-csi-iscsi/helmrelease.yaml new file mode 100644 index 0000000000..76061d0a42 --- /dev/null +++ b/cluster/manifests/storage/democratic-csi-iscsi/helmrelease.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: csi-iscsi + namespace: storage +spec: + interval: 15m + chart: + spec: + chart: democratic-csi + version: 0.13.4 + sourceRef: + kind: HelmRepository + name: democratic-csi-charts + namespace: flux-system + interval: 15m + values: + csiDriver: + name: "org.democratic-csi.iscsi" + + storageClasses: + - name: tank-iscsi-csi + defaultClass: true + reclaimPolicy: Retain + ## For testing + # reclaimPolicy: Delete + volumeBindingMode: Immediate + allowVolumeExpansion: true + parameters: + fsType: ext4 + + driver: + config: + driver: zfs-generic-iscsi + existingConfigSecret: zfs-generic-iscsi-config diff --git a/cluster/manifests/storage/democratic-csi-iscsi/kustomization.yaml b/cluster/manifests/storage/democratic-csi-iscsi/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/storage/democratic-csi-iscsi/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/storage/democratic-csi-nfs/helmrelease.yaml b/cluster/manifests/storage/democratic-csi-nfs/helmrelease.yaml new file mode 100644 index 0000000000..2d20b8d178 --- /dev/null +++ b/cluster/manifests/storage/democratic-csi-nfs/helmrelease.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: csi-nfs + namespace: storage +spec: + interval: 15m + chart: + spec: + chart: democratic-csi + version: 0.13.4 + sourceRef: + kind: HelmRepository + name: democratic-csi-charts + namespace: flux-system + interval: 15m + values: + csiDriver: + name: "org.democratic-csi.nfs" + + storageClasses: + - name: tank-nfs-csi + defaultClass: false + # reclaimPolicy: Retain + ## For testing + reclaimPolicy: Delete + volumeBindingMode: Immediate + allowVolumeExpansion: true + parameters: + fsType: nfs + mountOptions: + - noatime + - nfsvers=4 + + driver: + config: + driver: zfs-generic-nfs + existingConfigSecret: zfs-generic-nfs-config diff --git a/cluster/manifests/storage/democratic-csi-nfs/kustomization.yaml b/cluster/manifests/storage/democratic-csi-nfs/kustomization.yaml new file mode 100644 index 0000000000..dbc604ed71 --- /dev/null +++ b/cluster/manifests/storage/democratic-csi-nfs/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml diff --git a/cluster/manifests/system-monitoring/grafana/config-pvc.yaml b/cluster/manifests/system-monitoring/grafana/config-pvc.yaml new file mode 100644 index 0000000000..2c994babca --- /dev/null +++ b/cluster/manifests/system-monitoring/grafana/config-pvc.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-config-v1 + namespace: system-monitoring + labels: + app.kubernetes.io/name: &name grafana + app.kubernetes.io/instance: *name + snapshot.home.arpa/enabled: "true" +spec: + accessModes: + - ReadWriteOnce + + resources: + requests: + storage: 5Gi + + storageClassName: tank-iscsi-csi diff --git a/cluster/manifests/system-monitoring/grafana/helmrelease.yaml b/cluster/manifests/system-monitoring/grafana/helmrelease.yaml new file mode 100644 index 0000000000..155fd2961e --- /dev/null +++ b/cluster/manifests/system-monitoring/grafana/helmrelease.yaml @@ -0,0 +1,315 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: grafana + namespace: system-monitoring +spec: + interval: 5m + chart: + spec: + chart: grafana + version: 6.32.14 + sourceRef: + kind: HelmRepository + name: grafana-charts + namespace: flux-system + interval: 5m + install: + createNamespace: true + remediation: # perform remediation when helm install fails + retries: 5 + upgrade: + remediation: # perform remediation when helm upgrade fails + retries: 5 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + dependsOn: + - name: kube-prometheus-stack + namespace: system-monitoring + # - name: thanos + # namespace: system-monitoring + values: + replicas: 1 + env: + GF_EXPLORE_ENABLED: true + GF_PANELS_DISABLE_SANITIZE_HTML: true + GF_LOG_FILTERS: rendering:debug + GF_DATE_FORMATS_USE_BROWSER_LOCALE: true + GF_DATE_FORMATS_FULL_DATE: "MMM Do, YYYY hh:mm:ss a" + GF_DATE_FORMATS_INTERVAL_SECOND: "hh:mm:ss a" + GF_DATE_FORMATS_INTERVAL_MINUTE: "hh:mm a" + GF_DATE_FORMATS_INTERVAL_HOUR: "DD/MM hh:mm a" + GF_DATE_FORMATS_INTERVAL_DAY: "DD/MM" + GF_DATE_FORMATS_INTERVAL_MONTH: "MM-YYYY" + GF_DATE_FORMATS_INTERVAL_YEAR: "YYYY" + GF_DEFAULT_FORCE_MIGRATION: true + adminPassword: "${SECRET_GRAFANA_PASSWORD}" + grafana.ini: + server: + root_url: "https://grafana.${EXTERNAL_DOMAIN}" + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: false + log: + mode: console + grafana_net: + url: https://grafana.net + auth.basic: + disable_login_form: false + + datasources: + datasources.yaml: + apiVersion: 1 + # deleteDatasources: + # - name: Loki + # orgId: 1 + datasources: + - name: Prometheus + type: prometheus + url: http://thanos-query.system-monitoring:9090/ + access: proxy + isDefault: true + # - name: Loki + # type: loki + # access: proxy + # url: http://loki.system-monitoring:3100 + # - name: AlertManager + # type: alertmanager + # access: proxy + # url: http://alertmanager-operated.system-monitoring:9093 + # jsonData: + # implementation: "prometheus" + + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: "default" + orgId: 1 + folder: "default" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default + + - name: "cilium" + orgId: 1 + folder: "cilium" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/cilium + + - name: "power" + orgId: 1 + folder: "power" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/power + + # - name: "unifi" + # orgId: 1 + # folder: "unifi" + # type: file + # disableDeletion: false + # editable: true + # options: + # path: /var/lib/grafana/dashboards/unifi + + # - name: "ceph" + # orgId: 1 + # folder: "ceph" + # type: file + # disableDeletion: false + # editable: true + # options: + # path: /var/lib/grafana/dashboards/ceph + + - name: "flux" + orgId: 1 + folder: "flux" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/flux + + - name: "organizarrs" + orgId: 1 + folder: "organizarrs" + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/organizarrs + + dashboards: + default: + # Ref: https://grafana.com/grafana/dashboards/13502 + # minio: + # gnetId: 13502 + # revision: 10 + # datasource: Prometheus + # k8s-pvc-overview: + # gnetId: 13646 + # revision: 2 + # datasource: Prometheus + cert-manager: + url: https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json + datasource: Prometheus + # loki: + # gnetId: 13407 + # revision: 1 + # datasource: Prometheus + # node-problem-detector: + # gnetId: 15549 + # revision: 1 + # datasource: Prometeus + # node-exporter-full: + # url: https://grafana.com/api/dashboards/1860/revisions/22/download + # datasource: Prometheus + + cilium: + cilium-agent: + gnetId: 15513 + revision: 1 + datasource: Prometheus + # Ref: https://grafana.com/grafana/dashboards/15514 + cilium-operator: + gnetId: 15514 + revision: 1 + datasource: Prometheus + # Ref: https://grafana.com/grafana/dashboards/15515 + cilium-hubble: + gnetId: 15515 + revision: 1 + datasource: Prometheus + + power: + # Ref: https://grafana.com/grafana/dashboards/14371 + nut: + gnetId: 14371 + revision: 1 + datasource: Prometheus + + # unifi: + # # Ref: https://grafana.com/grafana/dashboards/11315 + # unifi-client-insights: + # gnetId: 11315 + # revision: 8 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/11311 + # unifi-network-sites: + # gnetId: 11311 + # revision: 4 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/11314 + # unifi-uap-insights: + # gnetId: 11314 + # revision: 9 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/11312 + # unifi-usw-insights: + # gnetId: 11312 + # revision: 8 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/13646 + # ceph: + # # Ref: https://grafana.com/grafana/dashboards/5336 + # ceph-cluster: + # gnetId: 2842 + # revision: 14 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/5342 + # ceph-osd: + # gnetId: 5336 + # revision: 5 + # datasource: Prometheus + # # Ref: https://grafana.com/grafana/dashboards/7845 + # ceph-pools: + # gnetId: 5342 + # revision: 5 + # datasource: Prometheus + flux: + flux-cluster: + url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/cluster.json + datasource: Prometheus + flux-control-plane: + url: https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/monitoring-config/dashboards/control-plane.json + datasource: Prometheus + + organizarrs: + # lidarr: + # url: https://raw.githubusercontent.com/k8s-at-home/grafana-dashboards/main/lidarr.json + # datasource: Prometheus + radarr: + url: https://raw.githubusercontent.com/k8s-at-home/grafana-dashboards/main/radarr.json + datasource: Prometheus + sonarr: + url: https://raw.githubusercontent.com/k8s-at-home/grafana-dashboards/main/sonarr.json + datasource: Prometheus + # qbittorrent: + # url: https://raw.githubusercontent.com/esanchezm/prometheus-qbittorrent-exporter/master/grafana/dashboard.json + # datasource: Prometheus + + sidecar: + dashboards: + enabled: true + searchNamespace: ALL + labelValue: "" + datasources: + enabled: true + searchNamespace: ALL + labelValue: "" + + plugins: + - natel-discrete-panel + - pr0ps-trackmap-panel + - grafana-piechart-panel + - vonage-status-panel + - grafana-worldmap-panel + - grafana-clock-panel + + serviceMonitor: + enabled: true + + ingress: + enabled: true + ingressClassName: "nginx" + annotations: + cert-manager.io/cluster-issuer: ${CLUSTER_CERT} + hajimari.io/enable: "true" + hajimari.io/icon: "graph-line" + hosts: + - &host "grafana.${EXTERNAL_DOMAIN}" + tls: + - hosts: + - *host + secretName: tls.grafana + + serviceAccount: + create: true + autoMount: true + + persistence: + enabled: true + existingClaim: grafana-config-v1 + + resources: + requests: + cpu: 34m + memory: 94M + limits: + memory: 213M + + podAnnotations: + configmap.reloader.stakater.com/reload: "grafana" diff --git a/cluster/manifests/system-monitoring/grafana/kustomization.yaml b/cluster/manifests/system-monitoring/grafana/kustomization.yaml new file mode 100644 index 0000000000..f930cca0cb --- /dev/null +++ b/cluster/manifests/system-monitoring/grafana/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - helmrelease.yaml + - config-pvc.yaml diff --git a/cluster/manifests/system-monitoring/kube-prometheus-stack/helmrelease.yaml b/cluster/manifests/system-monitoring/kube-prometheus-stack/helmrelease.yaml new file mode 100644 index 0000000000..2fcfc48fd3 --- /dev/null +++ b/cluster/manifests/system-monitoring/kube-prometheus-stack/helmrelease.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: helm.toolkit.fluxcd.io/v2beta1 +kind: HelmRelease +metadata: + name: kube-prometheus-stack + namespace: system-monitoring +spec: + interval: 6h + maxHistory: 2 + timeout: 20m + chart: + spec: + chart: kube-prometheus-stack + version: 35.6.2 + sourceRef: + kind: HelmRepository + name: prometheus-community-charts + namespace: flux-system + install: + createNamespace: true + crds: CreateReplace + remediation: # perform remediation when helm install fails + retries: 5 + upgrade: + crds: CreateReplace + remediation: # perform remediation when helm upgrade fails + retries: 5 + remediateLastFailure: true # remediate the last failure, when no retries remain + cleanupOnFail: true + + # valuesFrom: + # # - kind: Secret + # # name: kube-prometheus-stack + # # valuesKey: discord-webhook + # # targetPath: alertmanager.config.global.slack_api_url + # # optional: false + # - kind: ConfigMap + # name: kps-generic-values + # - kind: ConfigMap + # name: kps-prometheus-values + # - kind: ConfigMap + # name: kps-alertmanager-values diff --git a/cluster/manifests/system-monitoring/kube-prometheus-stack/kustomization.yaml b/cluster/manifests/system-monitoring/kube-prometheus-stack/kustomization.yaml new file mode 100644 index 0000000000..3d24181c09 --- /dev/null +++ b/cluster/manifests/system-monitoring/kube-prometheus-stack/kustomization.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + # - ./secret.sops.yaml + - ./helmrelease.yaml +# namespace: system-monitoring +# configurations: +# - kustomize-config.yaml +# configMapGenerator: +# # - name: kps-alertmanager-values +# # files: +# # - values.yaml=alertmanager-values.yaml +# - name: kps-prometheus-values +# files: +# - values.yaml=prometheus-values.yaml +# - name: kps-generic-values +# files: +# - values.yaml=generic-values.yaml diff --git a/cluster/namespaces/cert-manager.yaml b/cluster/namespaces/cert-manager.yaml new file mode 100644 index 0000000000..f56d668a90 --- /dev/null +++ b/cluster/namespaces/cert-manager.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/databases.yaml b/cluster/namespaces/databases.yaml new file mode 100644 index 0000000000..bce6c6cdf7 --- /dev/null +++ b/cluster/namespaces/databases.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: databases + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/flux-system.yaml b/cluster/namespaces/flux-system.yaml new file mode 100644 index 0000000000..17dbc10e12 --- /dev/null +++ b/cluster/namespaces/flux-system.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: flux-system + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/kube-system.yaml b/cluster/namespaces/kube-system.yaml new file mode 100644 index 0000000000..2c4b6b8d49 --- /dev/null +++ b/cluster/namespaces/kube-system.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/kustomization.yaml b/cluster/namespaces/kustomization.yaml new file mode 100644 index 0000000000..0158f814f6 --- /dev/null +++ b/cluster/namespaces/kustomization.yaml @@ -0,0 +1,23 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - cert-manager.yaml + - databases.yaml + # - downloads.yaml + # - dev.yaml + - flux-system.yaml + # - games.yaml + # - home-automation.yaml + # - kasten-io.yaml + - kube-system.yaml + - media.yaml + - network.yaml + - storage.yaml + - organizarrs.yaml + # - rook-ceph.yaml + # - security.yaml + - services.yaml + - system-monitoring.yaml + # - testing.yaml + # - vpn.yaml diff --git a/cluster/namespaces/media.yaml b/cluster/namespaces/media.yaml new file mode 100644 index 0000000000..2bb899e841 --- /dev/null +++ b/cluster/namespaces/media.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: media + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/network.yaml b/cluster/namespaces/network.yaml new file mode 100644 index 0000000000..733e9c1914 --- /dev/null +++ b/cluster/namespaces/network.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: network + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/organizarrs.yaml b/cluster/namespaces/organizarrs.yaml new file mode 100644 index 0000000000..00406d3446 --- /dev/null +++ b/cluster/namespaces/organizarrs.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: organizarrs + labels: + vpn-routed-gateway: "true" + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/services.yaml b/cluster/namespaces/services.yaml new file mode 100644 index 0000000000..e62530e64a --- /dev/null +++ b/cluster/namespaces/services.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: services + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/storage.yaml b/cluster/namespaces/storage.yaml new file mode 100644 index 0000000000..0a2b94dcce --- /dev/null +++ b/cluster/namespaces/storage.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: storage + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/namespaces/system-monitoring.yaml b/cluster/namespaces/system-monitoring.yaml new file mode 100644 index 0000000000..240602bdf6 --- /dev/null +++ b/cluster/namespaces/system-monitoring.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: system-monitoring + labels: + goldilocks.fairwinds.com/enabled: "true" diff --git a/cluster/repositories/git/flux.yaml b/cluster/repositories/git/flux.yaml new file mode 100644 index 0000000000..6267aca30e --- /dev/null +++ b/cluster/repositories/git/flux.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: flux + namespace: flux-system +spec: + interval: 10m0s + ref: + # renovate: datasource=github-releases depName=fluxcd/flux2 + tag: v0.32.0 + url: https://github.com/fluxcd/flux2 + ignore: | + # exclude all + /* + # include manifest dir + !/manifests diff --git a/cluster/repositories/git/k8s-home-ops.yaml b/cluster/repositories/git/k8s-home-ops.yaml new file mode 100644 index 0000000000..4849c221da --- /dev/null +++ b/cluster/repositories/git/k8s-home-ops.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: k8s-home-ops + namespace: flux-system +spec: + interval: 1m0s + ref: + branch: main + url: ssh://git@github.com/cbc02009/k8s-home-ops + secretRef: + name: github-deploy-key + ignore: | + # exclude all + /* + # include gitops dirs + !/cluster + # exclude files from gitops dirs + /cluster/**/*.md + /cluster/**/*.tmpl diff --git a/cluster/repositories/git/kustomization.yaml b/cluster/repositories/git/kustomization.yaml new file mode 100644 index 0000000000..e321997458 --- /dev/null +++ b/cluster/repositories/git/kustomization.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./flux.yaml + - ./k8s-home-ops.yaml + - ./porkbun-webhook.yaml diff --git a/cluster/repositories/git/porkbun-webhook.yaml b/cluster/repositories/git/porkbun-webhook.yaml new file mode 100644 index 0000000000..308fe840c5 --- /dev/null +++ b/cluster/repositories/git/porkbun-webhook.yaml @@ -0,0 +1,15 @@ +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: porkbun-webhook + namespace: flux-system +spec: + interval: 10m + url: https://github.com/mdonoughe/porkbun-webhook + ref: + branch: main + ignore: | + # exclude all + /* + # include charts directory + !/deploy/porkbun-webhook diff --git a/cluster/repositories/helm/bitnami-charts.yaml b/cluster/repositories/helm/bitnami-charts.yaml new file mode 100644 index 0000000000..7b6801d55f --- /dev/null +++ b/cluster/repositories/helm/bitnami-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: bitnami-charts + namespace: flux-system +spec: + interval: 30m + url: https://charts.bitnami.com/bitnami + timeout: 3m diff --git a/cluster/repositories/helm/cilium-charts.yaml b/cluster/repositories/helm/cilium-charts.yaml new file mode 100644 index 0000000000..948bc81a64 --- /dev/null +++ b/cluster/repositories/helm/cilium-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: cilium-charts + namespace: flux-system +spec: + interval: 30m + url: https://helm.cilium.io + timeout: 3m diff --git a/cluster/repositories/helm/democratic-csi-charts.yaml b/cluster/repositories/helm/democratic-csi-charts.yaml new file mode 100644 index 0000000000..b4004ca456 --- /dev/null +++ b/cluster/repositories/helm/democratic-csi-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: democratic-csi-charts + namespace: flux-system +spec: + interval: 30m + url: https://democratic-csi.github.io/charts/ + timeout: 3m diff --git a/cluster/repositories/helm/external-dns-charts.yaml b/cluster/repositories/helm/external-dns-charts.yaml new file mode 100644 index 0000000000..bc7aee0131 --- /dev/null +++ b/cluster/repositories/helm/external-dns-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: external-dns-charts + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/external-dns + timeout: 3m diff --git a/cluster/repositories/helm/grafana-charts.yaml b/cluster/repositories/helm/grafana-charts.yaml new file mode 100644 index 0000000000..fe5b152b4f --- /dev/null +++ b/cluster/repositories/helm/grafana-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: grafana-charts + namespace: flux-system +spec: + interval: 30m + url: https://grafana.github.io/helm-charts + timeout: 3m diff --git a/cluster/repositories/helm/hajimari-charts.yaml b/cluster/repositories/helm/hajimari-charts.yaml new file mode 100644 index 0000000000..48836790ce --- /dev/null +++ b/cluster/repositories/helm/hajimari-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: hajimari-charts + namespace: flux-system +spec: + interval: 30m + url: https://hajimari.io + timeout: 3m diff --git a/cluster/repositories/helm/ingress-nginx-charts.yaml b/cluster/repositories/helm/ingress-nginx-charts.yaml new file mode 100644 index 0000000000..f753e79450 --- /dev/null +++ b/cluster/repositories/helm/ingress-nginx-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: ingress-nginx-charts + namespace: flux-system +spec: + interval: 15m + url: https://kubernetes.github.io/ingress-nginx + timeout: 3m diff --git a/cluster/repositories/helm/jetstack-charts.yaml b/cluster/repositories/helm/jetstack-charts.yaml new file mode 100644 index 0000000000..47049a5b02 --- /dev/null +++ b/cluster/repositories/helm/jetstack-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: jetstack-charts + namespace: flux-system +spec: + interval: 30m + url: https://charts.jetstack.io + timeout: 3m diff --git a/cluster/repositories/helm/k8s-at-home-charts.yaml b/cluster/repositories/helm/k8s-at-home-charts.yaml new file mode 100644 index 0000000000..34b96123cf --- /dev/null +++ b/cluster/repositories/helm/k8s-at-home-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: k8s-at-home-charts + namespace: flux-system +spec: + interval: 30m + url: https://k8s-at-home.com/charts/ + timeout: 3m diff --git a/cluster/repositories/helm/kubernetes-sigs-descheduler-charts.yaml b/cluster/repositories/helm/kubernetes-sigs-descheduler-charts.yaml new file mode 100644 index 0000000000..d3ea071cb5 --- /dev/null +++ b/cluster/repositories/helm/kubernetes-sigs-descheduler-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: descheduler-charts + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/descheduler + timeout: 3m diff --git a/cluster/repositories/helm/kubernetes-sigs-metrics-server-charts.yaml b/cluster/repositories/helm/kubernetes-sigs-metrics-server-charts.yaml new file mode 100644 index 0000000000..af17cba575 --- /dev/null +++ b/cluster/repositories/helm/kubernetes-sigs-metrics-server-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kubernetes-sigs-metrics-server-charts + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/metrics-server/ + timeout: 3m diff --git a/cluster/repositories/helm/kubernetes-sigs-nfd-charts.yaml b/cluster/repositories/helm/kubernetes-sigs-nfd-charts.yaml new file mode 100644 index 0000000000..73daac9d2b --- /dev/null +++ b/cluster/repositories/helm/kubernetes-sigs-nfd-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: kubernetes-sigs-nfd-charts + namespace: flux-system +spec: + interval: 30m + url: https://kubernetes-sigs.github.io/node-feature-discovery/charts + timeout: 3m diff --git a/cluster/repositories/helm/kustomization.yaml b/cluster/repositories/helm/kustomization.yaml new file mode 100644 index 0000000000..cd88498a19 --- /dev/null +++ b/cluster/repositories/helm/kustomization.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: flux-system +resources: + - ./cilium-charts.yaml + - ./external-dns-charts.yaml + - ./grafana-charts.yaml + - ./jetstack-charts.yaml + - ./kubernetes-sigs-descheduler-charts.yaml + - ./kubernetes-sigs-metrics-server-charts.yaml + - ./kubernetes-sigs-nfd-charts.yaml + - ./k8s-at-home-charts.yaml + - ./metallb-charts.yaml + - ./prometheus-community-charts.yaml + - ./democratic-csi-charts.yaml + - ./bitnami-charts.yaml + - ./hajimari-charts.yaml + - ./traefik-charts.yaml + - ./ingress-nginx-charts.yaml + - ./stakater-charts.yaml diff --git a/cluster/repositories/helm/metallb-charts.yaml b/cluster/repositories/helm/metallb-charts.yaml new file mode 100644 index 0000000000..3c3941344f --- /dev/null +++ b/cluster/repositories/helm/metallb-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: metallb-charts + namespace: flux-system +spec: + interval: 10m + url: https://metallb.github.io/metallb + timeout: 3m diff --git a/cluster/repositories/helm/prometheus-community-charts.yaml b/cluster/repositories/helm/prometheus-community-charts.yaml new file mode 100644 index 0000000000..9f79fee835 --- /dev/null +++ b/cluster/repositories/helm/prometheus-community-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: prometheus-community-charts + namespace: flux-system +spec: + interval: 30m + url: https://prometheus-community.github.io/helm-charts + timeout: 3m diff --git a/cluster/repositories/helm/stakater-charts.yaml b/cluster/repositories/helm/stakater-charts.yaml new file mode 100644 index 0000000000..07f2124d2d --- /dev/null +++ b/cluster/repositories/helm/stakater-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: stakater-charts + namespace: flux-system +spec: + interval: 30m + url: https://stakater.github.io/stakater-charts + timeout: 3m diff --git a/cluster/repositories/helm/traefik-charts.yaml b/cluster/repositories/helm/traefik-charts.yaml new file mode 100644 index 0000000000..4262cea2cd --- /dev/null +++ b/cluster/repositories/helm/traefik-charts.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: traefik-charts + namespace: flux-system +spec: + interval: 10m + url: https://helm.traefik.io/traefik + timeout: 3m diff --git a/cluster/repositories/kustomization.yaml b/cluster/repositories/kustomization.yaml new file mode 100644 index 0000000000..4c7df4767f --- /dev/null +++ b/cluster/repositories/kustomization.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ./git + - ./helm diff --git a/kubeadm-config.yaml b/kubeadm-config.yaml new file mode 100644 index 0000000000..f0d7a4da41 --- /dev/null +++ b/kubeadm-config.yaml @@ -0,0 +1,31 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 10.0.0.15 + bindPort: 6443 +nodeRegistration: + criSocket: 'unix:///run/crio/crio.sock' + imagePullPolicy: IfNotPresent + name: anya +skipPhases: + - addon/kube-proxy +--- +apiServer: + timeoutForControlPlane: 4m0s +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: k8s.gcr.io +kind: ClusterConfiguration +networking: + dnsDomain: cluster.local + serviceSubnet: 10.10.0.0/16 + podSubnet: 10.11.0.0/16 +scheduler: {} +featureGates: + PublicKeysECDSA: true diff --git a/tools/cilium-quick-install/kustomization.yaml b/tools/cilium-quick-install/kustomization.yaml new file mode 100644 index 0000000000..cafaf969f1 --- /dev/null +++ b/tools/cilium-quick-install/kustomization.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +helmCharts: + - name: cilium + repo: https://helm.cilium.io/ + version: 1.12.1 + releaseName: cilium + namespace: kube-system + valuesFile: values.yaml +commonAnnotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system +commonLabels: + app.kubernetes.io/managed-by: Helm diff --git a/tools/cilium-quick-install/quick-install.yaml b/tools/cilium-quick-install/quick-install.yaml new file mode 100644 index 0000000000..06289c30c3 --- /dev/null +++ b/tools/cilium-quick-install/quick-install.yaml @@ -0,0 +1,861 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-operator + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get +- apiGroups: + - cilium.io + resources: + - ciliumbgploadbalancerippools + - ciliumbgppeeringpolicies + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumegressnatpolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints/status + - ciliumendpoints + verbs: + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + - create + - update + - deletecollection + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + - patch + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + verbs: + - create + - update + - get + - list + - watch + - delete +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resourceNames: + - ciliumbgploadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumegressnatpolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumexternalworkloads.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + resources: + - customresourcedefinitions + verbs: + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system +--- +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + arping-refresh-period: 30s + auto-direct-node-routes: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-sock: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 15m0s + cluster-id: "0" + cluster-name: default + custom-cni-conf: "false" + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-auto-protect-node-port-range: "true" + enable-bgp-control-plane: "false" + enable-bpf-clock-probe: "true" + enable-endpoint-health-checking: "true" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-ipv4: "true" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-masquerade: "true" + enable-k8s-terminating-endpoint: "true" + enable-l2-neigh-discovery: "true" + enable-l7-proxy: "true" + enable-local-redirect-policy: "false" + enable-policy: default + enable-remote-node-identity: "true" + enable-svc-source-range-check: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + identity-allocation-mode: crd + install-iptables-rules: "true" + install-no-conntrack-iptables-rules: "false" + ipam: kubernetes + kube-proxy-replacement: strict + kube-proxy-replacement-healthz-bind-address: "" + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + node-port-bind-protection: "true" + nodes-gc-interval: 15m0s + operator-api-serve-addr: 127.0.0.1:9234 + preallocate-bpf-maps: "false" + procfs: /host/proc + remove-cilium-node-taints: "true" + set-cilium-is-up-condition: "true" + sidecar-istio-proxy-image: cilium/istio_proxy + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "50" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-min-ttl: "3600" + tofqdns-proxy-response-max-delay: 100ms + tunnel: vxlan + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + name: cilium-config + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + io.cilium/app: operator + name: cilium-operator + name: cilium-operator + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/managed-by: Helm + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + io.cilium/app: operator + name: cilium-operator + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/managed-by: Helm + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + command: + - cilium-operator-generic + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: 10.0.0.15 + - name: KUBERNETES_SERVICE_PORT + value: "6443" + image: quay.io/cilium/operator-generic:v1.12.1@sha256:93d5aaeda37d59e6c4325ff05030d7b48fabde6576478e3fdbfb9bb4a68ec4a1 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + name: cilium-operator + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + restartPolicy: Always + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + volumes: + - configMap: + name: cilium-config + name: cilium-config-path +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + annotations: + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + template: + metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined + container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined + container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined + container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined + meta.helm.sh/release-name: cilium + meta.helm.sh/release-namespace: kube-system + labels: + app.kubernetes.io/managed-by: Helm + k8s-app: cilium + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: 10.0.0.15 + - name: KUBERNETES_SERVICE_PORT + value: "6443" + image: quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - /cni-install.sh + - --enable-debug=false + - --cni-exclusive=true + - --log-file=/var/run/cilium/cilium-cni.log + preStop: + exec: + command: + - /cni-uninstall.sh + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: 1 + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + hostNetwork: true + initContainers: + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: 10.0.0.15 + - name: KUBERNETES_SERVICE_PORT + value: "6443" + image: quay.io/cilium/cilium:v1.12.1@sha256:ea2db1ee21b88127b5c18a96ad155c25485d0815a667ef77c2b7c7f31cab601b + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 256 + optional: true + secretName: cilium-clustermesh + - configMap: + name: cilium-config + name: cilium-config-path + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + updateStrategy: + rollingUpdate: + maxUnavailable: 2 + type: RollingUpdate diff --git a/tools/cilium-quick-install/values.yaml b/tools/cilium-quick-install/values.yaml new file mode 100644 index 0000000000..8681fcb6ee --- /dev/null +++ b/tools/cilium-quick-install/values.yaml @@ -0,0 +1,31 @@ +--- +# kubeProxyReplacement enables kube-proxy replacement in Cilium BPF datapath +kubeProxyReplacement: "strict" + +k8sServiceHost: 10.0.0.15 +k8sServicePort: 6443 + +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stabl # - Get Started: get-started/index.md + mode: "kubernetes" + +# autoDirectNodeRoutes enables installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +# autoDirectNodeRoutes: true + +# nativeRoutingCIDR allows to explicitly specify the CIDR for native routing. This +# value corresponds to the configured cluster-cidr. +# nativeRoutingCIDR: 10.1.1.0/24 + +# Cilium leverages MetalLB's simplified BGP announcement system for service type: LoadBalancer +bgp: + enabled: false + +hubble: + # -- Enable Hubble (true by default). + enabled: false + +ipv6: + # -- Enable IPv6 support. + enabled: false