diff --git a/kubernetes/core/cilium.nix b/kubernetes/core/cilium.nix index c908651..41c515f 100644 --- a/kubernetes/core/cilium.nix +++ b/kubernetes/core/cilium.nix @@ -27,13 +27,6 @@ loadBalancer.mode = "hybrid"; endpointRoutes.enabled = true; - # Fix for Tailscale - # https://tailscale.com/kb/1236/kubernetes-operator#cilium-in-kube-proxy-replacement-mode - socketLB = { - enabled = true; - hostNamespaceOnly = true; - }; - # We use globally routable addresses for IPv6, so no NAT is needed. # We're running IPv6-only, but Discord still needs IPv4... # For now, that is accomplished with NAT64 on the host. :) diff --git a/kubernetes/core/tailscale.nix b/kubernetes/core/tailscale.nix deleted file mode 100644 index 352f1ac..0000000 --- a/kubernetes/core/tailscale.nix +++ /dev/null @@ -1,38 +0,0 @@ -{ transpire, ... }: - -{ - namespaces.tailscale = { - helmReleases.tailscale-operator = { - chart = transpire.fetchFromHelm { - repo = "https://pkgs.tailscale.com/helmcharts"; - name = "tailscale-operator"; - version = "1.68.1"; - sha256 = "3j+DRDFF/iPvgGlyXFw2riniHwEb1diFKeMLb3Kp+HA="; - }; - - values = { - operatorConfig = { - image.repository = "ghcr.io/tailscale/k8s-operator"; - defaultTags = [ "tag:hfym-ds-operator" ]; - hostname = "hfym-ds-operator"; - }; - proxyConfig = { - image.repository = "ghcr.io/tailscale/tailscale"; - defaultTags = "tag:hfym-ds"; - }; - }; - }; - - resources.v1.Secret.operator-oauth = { - type = "Opaque"; - stringData = { - client_id = ""; - client_secret = ""; - }; - }; - - resources."tailscale.com/v1alpha1".ProxyClass.prod.spec = { - statefulSet.pod.tailscaleContainer.securityContext.privileged = true; - }; - }; -}