diff --git a/README.md b/README.md index efdfafa2a2..0a3b73bcef 100644 --- a/README.md +++ b/README.md @@ -46,10 +46,6 @@ binary releases. | [v0.15.23](https://github.com/cilium/cilium-cli/releases/tag/v0.15.23) | Yes | Cilium 1.14 and newer | | [v0.14.8](https://github.com/cilium/cilium-cli/releases/tag/v0.14.8) | Yes | Cilium 1.13 | -Please see [`helm` installation mode](#helm-installation-mode) section -regarding our plan to migrate to the new `helm` installation mode and deprecate -the current implementation. - ## Capabilities ### Install Cilium @@ -346,27 +342,6 @@ Install a Cilium in a cluster and enable encryption with IPsec 🚀 Creating Operator Deployment... ⌛ Waiting for Cilium to be installed... -## `helm` installation mode - -`cilium-cli` v0.14 introduces a new `helm` installation mode. In the current installation mode -(we now call it `classic` mode), `cilium-cli` directly calls Kubernetes APIs to manage resources -related to Cilium. In the new `helm` mode, `cilium-cli` delegates all the installation state -management to Helm. This enables you to use `cilium-cli` and `helm` interchangeably to manage your -Cilium installation, while taking advantage of `cilium-cli`'s advanced features such as Cilium -configuration auto-detection. - -In `cilium-cli` v0.15, the `helm` mode is the default installation mode, and the `classic` mode is -deprecated. To use the `classic` mode, set `CILIUM_CLI_MODE` environment variable to `classic`: - - export CILIUM_CLI_MODE=classic - -> **Warnings** -> - The `classic` installation mode will be removed after v0.15 release. -> - Cilium CLI does not support converting `classic` mode installations to -> `helm` mode installations and vice versa. -> - Cilium CLI does not support running commands in `helm` mode against classic -> mode installations. - ### Examples #### `install` examples diff --git a/install/autodetect.go b/install/autodetect.go index 982d3f8b55..8186a25853 100644 --- a/install/autodetect.go +++ b/install/autodetect.go @@ -6,11 +6,9 @@ package install import ( "context" "fmt" - "regexp" "strconv" "strings" - "github.com/cilium/cilium-cli/internal/utils" "github.com/cilium/cilium-cli/k8s" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,8 +28,6 @@ var ( &kindVersionValidation{}, }, } - - clusterNameValidation = regexp.MustCompile(`^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$`) ) func (p Parameters) checkDisabled(name string) bool { @@ -43,14 +39,6 @@ func (p Parameters) checkDisabled(name string) bool { return false } -func (k *K8sUninstaller) autodetect(ctx context.Context) { - k.flavor = k.client.AutodetectFlavor(ctx) - - if k.flavor.Kind != k8s.KindUnknown { - k.Log("🔮 Auto-detected Kubernetes kind: %s", k.flavor.Kind) - } -} - func (k *K8sInstaller) detectDatapathMode() error { if k.params.DatapathMode != "" { k.Log("ℹī¸ Custom datapath mode: %s", k.params.DatapathMode) @@ -169,43 +157,12 @@ func (k *K8sInstaller) autodetectAndValidate(ctx context.Context, helmValues map return err } - // TODO: remove when removing "ipam" flag (marked as deprecated), kept for - // backwards compatibility - if k.params.IPAM != "" { - k.Log("ℹī¸ Custom IPAM mode: %s", k.params.IPAM) - } - - if !utils.IsInHelmMode() { - if strings.Contains(k.params.ClusterName, ".") { - k.Log("❌ Cluster name %q cannot contain dots", k.params.ClusterName) - return fmt.Errorf("invalid cluster name, dots are not allowed") - } - - if !clusterNameValidation.MatchString(k.params.ClusterName) { - k.Log("❌ Cluster name %q is not valid, must match regular expression: %s", k.params.ClusterName, clusterNameValidation) - return fmt.Errorf("invalid cluster name") - } - } - - switch k.params.Encryption { - case encryptionDisabled, - encryptionIPsec, - encryptionWireguard, - encryptionUnspecified: - // nothing to do for valid values - default: - k.Log("❌ Invalid encryption mode: %q", k.params.Encryption) - return fmt.Errorf("invalid encryption mode") - } - k.autodetectKubeProxy(ctx) return k.autoEnableBPFMasq() } func (k *K8sInstaller) autodetectKubeProxy(ctx context.Context) error { - if k.params.UserSetKubeProxyReplacement { - return nil - } else if k.flavor.Kind == k8s.KindK3s { + if k.flavor.Kind == k8s.KindK3s { return nil } diff --git a/install/azure.go b/install/azure.go index 52020e0e49..37f07ff033 100644 --- a/install/azure.go +++ b/install/azure.go @@ -4,19 +4,11 @@ package install import ( - "context" "encoding/json" "fmt" - "github.com/cilium/cilium/pkg/versioncheck" "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/getter" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/internal/utils" - yamlUtils "github.com/cilium/cilium-cli/utils/yaml" ) type accountInfo struct { @@ -138,12 +130,7 @@ func (k *K8sInstaller) azureRetrieveAKSClusterInfo() error { return err } if k.params.Azure.ResourceGroupName == "" { - var requiredFlagsNote string - if utils.IsInHelmMode() { - requiredFlagsNote = "azure.resourceGroup Helm value" - } else { - requiredFlagsNote = "--azure-resource-group or azure.resourceGroup Helm value" - } + requiredFlagsNote := "azure.resourceGroup Helm value" k.Log("❌ Azure resource group is required, please specify %s", requiredFlagsNote) return fmt.Errorf("missing Azure resource group name") } @@ -233,41 +220,3 @@ func (k *K8sInstaller) azExec(args ...string) ([]byte, error) { args = append(args, "--output", "json", "--only-show-errors") return k.Exec("az", args...) } - -func (k *K8sInstaller) createAKSSecrets(ctx context.Context) error { - // Check if secret already exists and reuse it - _, err := k.client.GetSecret(ctx, k.params.Namespace, defaults.AKSSecretName, metav1.GetOptions{}) - if err == nil { - k.Log("🔑 Found existing AKS secret %s", defaults.AKSSecretName) - return nil - } - - var ( - secretFileName string - ) - - switch { - case versioncheck.MustCompile(">=1.12.0")(k.chartVersion): - secretFileName = "templates/cilium-operator/secret.yaml" - default: - return fmt.Errorf("cilium version unsupported %s", k.chartVersion) - } - - secretFile := k.manifests[secretFileName] - - var secret corev1.Secret - yamlUtils.MustUnmarshal([]byte(secretFile), &secret) - - k.Log("🔑 Generated AKS secret %s", defaults.AKSSecretName) - _, err = k.client.CreateSecret(ctx, k.params.Namespace, &secret, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("unable to create AKS secret %s/%s: %w", k.params.Namespace, defaults.AKSSecretName, err) - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.AKSSecretName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Secret: %s", defaults.AKSSecretName, err) - } - }) - - return nil -} diff --git a/install/bgp.go b/install/bgp.go deleted file mode 100644 index 4a45380e74..0000000000 --- a/install/bgp.go +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package install - -func (k *K8sInstaller) bgpEnabled() bool { - return k.params.configOverwrites["bgp-announce-lb-ip"] == "true" -} diff --git a/install/certs.go b/install/certs.go deleted file mode 100644 index 8a2109247b..0000000000 --- a/install/certs.go +++ /dev/null @@ -1,131 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package install - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/cloudflare/cfssl/config" - "github.com/cloudflare/cfssl/csr" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/k8s" -) - -func (k *K8sInstaller) createHubbleServerCertificate(ctx context.Context) error { - commonName := fmt.Sprintf("*.%s.hubble-grpc.cilium.io", strings.ReplaceAll(k.params.ClusterName, ".", "-")) - certReq := &csr.CertificateRequest{ - Names: []csr.Name{{C: "US", ST: "San Francisco", L: "CA"}}, - KeyRequest: csr.NewKeyRequest(), - Hosts: []string{commonName}, - CN: commonName, - } - - signConf := &config.Signing{ - Default: &config.SigningProfile{Expiry: 5 * 365 * 24 * time.Hour}, - Profiles: map[string]*config.SigningProfile{ - defaults.HubbleServerSecretName: { - Expiry: 3 * 365 * 24 * time.Hour, - Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, - }, - }, - } - - cert, key, err := k.certManager.GenerateCertificate(defaults.HubbleServerSecretName, certReq, signConf) - if err != nil { - return fmt.Errorf("unable to generate certificate %s: %w", defaults.HubbleServerSecretName, err) - } - - data := map[string][]byte{ - corev1.TLSCertKey: cert, - corev1.TLSPrivateKeyKey: key, - defaults.CASecretCertName: k.certManager.CACertBytes(), - } - - _, err = k.client.CreateSecret(ctx, k.params.Namespace, k8s.NewTLSSecret(defaults.HubbleServerSecretName, k.params.Namespace, data), metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("unable to create secret %s/%s: %w", k.params.Namespace, defaults.HubbleServerSecretName, err) - } - - return nil -} - -func (k *K8sUninstaller) uninstallCerts(ctx context.Context) (err error) { - if err = k.client.DeleteSecret(ctx, k.params.Namespace, defaults.HubbleServerSecretName, metav1.DeleteOptions{}); err != nil { - err = fmt.Errorf("unable to delete secret %s/%s: %w", k.params.Namespace, defaults.HubbleServerSecretName, err) - } - if err2 := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.CASecretName, metav1.DeleteOptions{}); err2 != nil { - err2 = fmt.Errorf("unable to delete CA secret %s/%s: %w", k.params.Namespace, defaults.CASecretName, err2) - if err == nil { - err = err2 - } - } - - return err -} - -func (k *K8sInstaller) installCerts(ctx context.Context) error { - if k.params.InheritCA != "" { - caCluster, err := k8s.NewClient(k.params.InheritCA, "", k.params.Namespace) - if err != nil { - return fmt.Errorf("unable to create Kubernetes client to derive CA from: %w", err) - } - - s, err := caCluster.GetSecret(ctx, k.params.Namespace, defaults.CASecretName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("secret %s not found to derive CA from: %w", defaults.CASecretName, err) - } - - newSecret := k8s.NewSecret(defaults.CASecretName, k.params.Namespace, s.Data) - _, err = k.client.CreateSecret(ctx, k.params.Namespace, newSecret, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("unable to create secret to store CA: %w", err) - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.CASecretName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Secret: %s", defaults.CASecretName, err) - } - }) - } - - caSecret, created, err := k.certManager.GetOrCreateCASecret(ctx, defaults.CASecretName, true) - if err != nil { - k.Log("❌ Unable to get or create the Cilium CA Secret: %s", err) - return err - } - - if caSecret != nil { - err = k.certManager.LoadCAFromK8s(caSecret) - if err != nil { - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteSecret(ctx, k.params.Namespace, caSecret.Name, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Secret: %s", caSecret.Name, err) - } - }) - return err - } - if created { - k.Log("🔑 Created CA in secret %s", caSecret.Name) - } else { - k.Log("🔑 Found CA in secret %s", caSecret.Name) - } - } - - k.Log("🔑 Generating certificates for Hubble...") - if err := k.createHubbleServerCertificate(ctx); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.HubbleServerSecretName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Secret: %s", defaults.HubbleServerSecretName, err) - } - }) - - return nil -} diff --git a/install/encryption.go b/install/encryption.go deleted file mode 100644 index 882d147eb1..0000000000 --- a/install/encryption.go +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package install - -import ( - "context" - "crypto/rand" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/k8s" -) - -func generateRandomKey() (string, error) { - random := make([]byte, 20) - _, err := rand.Read(random) - if err != nil { - return "", fmt.Errorf("unable to generate random sequence for key: %w", err) - } - - key := "3 rfc4106(gcm(aes)) " - for _, c := range random { - key += fmt.Sprintf("%02x", c) - } - key += " 128" - - return key, nil -} - -func (k *K8sInstaller) createEncryptionSecret(ctx context.Context) error { - // Check if secret already exists and reuse it - _, err := k.client.GetSecret(ctx, k.params.Namespace, defaults.EncryptionSecretName, metav1.GetOptions{}) - if err == nil { - k.Log("🔑 Found existing encryption secret %s", defaults.EncryptionSecretName) - return nil - } - - key, err := generateRandomKey() - if err != nil { - return err - } - - data := map[string][]byte{"keys": []byte(key)} - - k.Log("🔑 Generated encryption secret %s", defaults.EncryptionSecretName) - _, err = k.client.CreateSecret(ctx, k.params.Namespace, k8s.NewSecret(defaults.EncryptionSecretName, k.params.Namespace, data), metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("unable to create encryption secret %s/%s: %w", k.params.Namespace, defaults.HubbleServerSecretName, err) - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.EncryptionSecretName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Secret: %s", defaults.EncryptionSecretName, err) - } - }) - - return nil -} diff --git a/install/encryption_test.go b/install/encryption_test.go deleted file mode 100644 index 1748765934..0000000000 --- a/install/encryption_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package install - -import ( - "strings" - "testing" - - "gopkg.in/check.v1" -) - -func Test(t *testing.T) { - check.TestingT(t) -} - -type InstallSuite struct{} - -var _ = check.Suite(&InstallSuite{}) - -func (b *InstallSuite) TestGenerateKey(c *check.C) { - k, err := generateRandomKey() - c.Assert(err, check.IsNil) - tokens := strings.Split(k, " ") - // 3 rfc4106(gcm(aes)) 5118217c0a040d9a4cc22da05bcd677db33cb30d 128 - c.Assert(len(tokens), check.Equals, 4) -} diff --git a/install/helm.go b/install/helm.go index 2b3d8bb992..0418b05208 100644 --- a/install/helm.go +++ b/install/helm.go @@ -6,55 +6,15 @@ package install import ( - "context" "fmt" - "os" - "strconv" - "strings" "github.com/cilium/cilium-cli/defaults" "github.com/cilium/cilium-cli/internal/helm" "github.com/cilium/cilium-cli/k8s" "github.com/cilium/cilium/pkg/versioncheck" - "github.com/spf13/pflag" - "helm.sh/helm/v3/pkg/chartutil" ) -func (k *K8sInstaller) generateManifests(ctx context.Context) error { - vals, err := k.getHelmValues() - if err != nil { - return err - } - - apiVersions := k.getAPIVersions(ctx) - - helm.PrintHelmTemplateCommand(k, vals, k.params.HelmChartDirectory, k.params.Namespace, k.chartVersion, apiVersions) - - yamlValue, err := chartutil.Values(vals).YAML() - if err != nil { - return err - } - - if k.params.HelmGenValuesFile != "" { - return os.WriteFile(k.params.HelmGenValuesFile, []byte(yamlValue), 0o600) - } - - k8sVersionStr, err := k.getKubernetesVersion() - if err != nil { - return err - } - - manifests, err := helm.GenManifests(ctx, k.params.HelmChartDirectory, k8sVersionStr, k.chartVersion, k.params.Namespace, vals, apiVersions) - if err != nil { - return err - } - - k.manifests = manifests - k.helmYAMLValues = yamlValue - return nil -} - func (k *K8sInstaller) getHelmValues() (map[string]interface{}, error) { helmMapOpts := map[string]string{} deprecatedCfgOpts := map[string]string{} @@ -64,112 +24,10 @@ func (k *K8sInstaller) getHelmValues() (map[string]interface{}, error) { // These were tested for the >=1.11.0. In case something breaks for versions // older than 1.11.0 we will fix it afterwards. case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - // case versioncheck.MustCompile(">=1.11.0")(ciliumVer): - // If the user has specified a version with `--image-tag` then - // set all image tags with that version. The user will have the - // option to overwrite any of these options with the specific - // helm option. - imageTag := k.params.ImageTag - if imageTag == "" { - // If the user has specified a version with `--version` then - // set all image tags with that version. The user will have the - // option to overwrite any of these options with the specific - // helm option. - imageTag = k.getImagesSHA() - } - - imageSuffix := k.params.ImageSuffix - if imageSuffix != "" { - // When using suffix tag must be defaulted to "latest" to prevent deduced - // version from being used as the operator tag by Cilium Helm charts. - // This also makes k8s pod list to actually contain the image tag. - if imageTag == "" { - imageTag = "latest" - k.Log("ℹī¸ Defaulting image tag to %q due to --image-suffix option", imageTag) - } - colonTag := ":" + imageTag - helmMapOpts["image.override"] = fmt.Sprintf("quay.io/cilium/cilium%s%s", imageSuffix, colonTag) - helmMapOpts["image.useDigest"] = "false" - // operator has different cloud variants and supports image.suffix - helmMapOpts["operator.image.suffix"] = imageSuffix - helmMapOpts["operator.image.tag"] = imageTag - helmMapOpts["operator.image.useDigest"] = "false" - helmMapOpts["hubble.relay.image.override"] = fmt.Sprintf("quay.io/cilium/hubble-relay%s%s", imageSuffix, colonTag) - helmMapOpts["hubble.relay.image.useDigest"] = "false" - helmMapOpts["preflight.image.override"] = fmt.Sprintf("quay.io/cilium/cilium%s%s", imageSuffix, colonTag) - helmMapOpts["preflight.image.useDigest"] = "false" - helmMapOpts["clustermesh.apiserver.image.override"] = fmt.Sprintf("quay.io/cilium/clustermesh-apiserver%s%s", imageSuffix, colonTag) - helmMapOpts["clustermesh.apiserver.image.useDigest"] = "false" - } else if imageTag != "" { - // Helm ignores image.tag if image.override is set - helmMapOpts["image.tag"] = imageTag - helmMapOpts["image.useDigest"] = "false" - helmMapOpts["operator.image.tag"] = imageTag - helmMapOpts["operator.image.useDigest"] = "false" - helmMapOpts["hubble.relay.image.tag"] = imageTag - helmMapOpts["hubble.relay.image.useDigest"] = "false" - helmMapOpts["preflight.image.tag"] = imageTag - helmMapOpts["preflight.image.useDigest"] = "false" - helmMapOpts["clustermesh.apiserver.image.tag"] = imageTag - helmMapOpts["clustermesh.apiserver.image.useDigest"] = "false" - } - - // Pre-define all deprecated flags as helm options - for flagName, helmOpt := range FlagsToHelmOpts { - if v, ok := FlagValues[flagName]; ok { - if val := v.String(); val != "" { - helmMapOpts[helmOpt] = val - } - } - } - // Handle the "config" values in a special way since they are a - // stringSlice - if v, ok := FlagValues["config"]; ok { - switch sv := v.(type) { - case pflag.SliceValue: - for _, cfgOpt := range sv.GetSlice() { - cfgOptSplit := strings.Split(cfgOpt, "=") - if len(cfgOptSplit) != 2 { - return nil, fmt.Errorf("--config should be in the format of , got %s", cfgOpt) - } - deprecatedCfgOpts[cfgOptSplit[0]] = cfgOptSplit[1] - } - - default: - panic("Config should be type pflag.SliceValue") - } - } - - helmMapOpts["serviceAccounts.cilium.name"] = defaults.AgentServiceAccountName - helmMapOpts["serviceAccounts.operator.name"] = defaults.OperatorServiceAccountName - // TODO(aanm) to keep the previous behavior unchanged we will set the number // of the operator replicas to 1. Ideally this should be the default in the helm chart helmMapOpts["operator.replicas"] = "1" - switch k.params.Encryption { - case encryptionIPsec: - helmMapOpts["encryption.enabled"] = "true" - helmMapOpts["encryption.type"] = "ipsec" - if k.params.NodeEncryption { - helmMapOpts["encryption.nodeEncryption"] = "true" - } - case encryptionWireguard: - helmMapOpts["encryption.enabled"] = "true" - helmMapOpts["encryption.type"] = "wireguard" - // TODO(gandro): Future versions of Cilium will remove the following - // two limitations, we will need to have set the config map values - // based on the installed Cilium version - if versioncheck.MustCompile("<1.14.0")(k.chartVersion) { - helmMapOpts["l7Proxy"] = "false" - k.Log("ℹī¸ L7 proxy disabled due to Wireguard encryption") - - if k.params.NodeEncryption { - k.Log("⚠ī¸ī¸ Wireguard does not support node encryption yet") - } - } - } - // Set nodeinit enabled option if needsNodeInit(k.flavor.Kind, k.chartVersion) { helmMapOpts["nodeinit.enabled"] = "true" @@ -261,24 +119,6 @@ func (k *K8sInstaller) getHelmValues() (map[string]interface{}, error) { helmMapOpts["cluster.name"] = k.params.ClusterName } - // TODO: remove when removing "cluster-id" flag (marked as deprecated), kept - // for backwards compatibility - if k.params.ClusterID != 0 { - helmMapOpts["cluster.id"] = strconv.FormatInt(int64(k.params.ClusterID), 10) - } - - // TODO: remove when removing "ipam" flag (marked as deprecated), kept for - // backwards compatibility - if k.params.IPAM != "" { - helmMapOpts["ipam.mode"] = k.params.IPAM - } - - // TODO: remove when removing "config" flag (marked as deprecated), kept - // for backwards compatibility - if k.bgpEnabled() { - helmMapOpts["bgp.enabled"] = "true" - } - // TODO: remove when removing "ipv4-native-routing-cidr" flag (marked as // deprecated), kept for backwards compatibility if k.params.IPv4NativeRoutingCIDR != "" { @@ -312,38 +152,3 @@ func (k *K8sInstaller) getHelmValues() (map[string]interface{}, error) { return helm.MergeVals(k.params.HelmOpts, helmMapOpts, nil, extraConfigMap) } - -func (k *K8sInstaller) getAPIVersions(ctx context.Context) []string { - // Pull APIVersions and filter for known needed CRDs, if not provided by the user. - // _Each value_ in apiVersions passed to helm.MergeVals will be logged in the `helm template` command, so - // pulling all values from the API server will add a ton of '--api-versions ' arguments to - // the printed command if filtering is not performed. - // Filtering reduces this output to a reasonable size for users, and works for now since there is a limited - // set of CRDs needed for helm template verification. - apiVersions := k.params.APIVersions - if len(apiVersions) == 0 { - gvs, err := k.client.ListAPIResources(ctx) - if err != nil { - k.Log("⚠ī¸ Unable to list kubernetes api resources, try --api-versions if needed: %s", err) - } - for _, gv := range gvs { - switch gv { - case "monitoring.coreos.com/v1": - apiVersions = append(apiVersions, gv) - } - } - } - return apiVersions -} - -func (k *K8sInstaller) getKubernetesVersion() (string, error) { - k8sVersionStr := k.params.K8sVersion - if k8sVersionStr != "" { - return k8sVersionStr, nil - } - k8sVersion, err := k.client.GetServerVersion() - if err != nil { - return "", fmt.Errorf("error getting Kubernetes version, try --k8s-version: %s", err) - } - return k8sVersion.String(), nil -} diff --git a/install/install.go b/install/install.go index 6262e8d786..eac6fe8ae8 100644 --- a/install/install.go +++ b/install/install.go @@ -4,16 +4,12 @@ package install import ( - "bytes" "context" "fmt" "io" - "regexp" - "strings" "time" "github.com/blang/semver/v4" - "github.com/spf13/pflag" "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/cli" @@ -21,25 +17,14 @@ import ( "helm.sh/helm/v3/pkg/getter" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" - "github.com/cilium/cilium/api/v1/models" - ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "github.com/cilium/cilium/pkg/versioncheck" - "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/internal/certs" "github.com/cilium/cilium-cli/internal/helm" "github.com/cilium/cilium-cli/internal/utils" "github.com/cilium/cilium-cli/k8s" - "github.com/cilium/cilium-cli/status" - jsonUtils "github.com/cilium/cilium-cli/utils/json" - yamlUtils "github.com/cilium/cilium-cli/utils/yaml" ) const ( @@ -68,226 +53,26 @@ const ( routingModeTunnel = "tunnel" ) -const ( - encryptionUnspecified = "" - encryptionDisabled = "disabled" - encryptionIPsec = "ipsec" - encryptionWireguard = "wireguard" -) - const ( Microk8sSnapPath = "/var/snap/microk8s/current" ) -func (k *K8sInstaller) generateAgentDaemonSet() *appsv1.DaemonSet { - var ( - dsFilename string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - dsFilename = "templates/cilium-agent/daemonset.yaml" - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - dsFilename = "templates/cilium-agent-daemonset.yaml" - } - - dsFile := k.manifests[dsFilename] - - var ds appsv1.DaemonSet - yamlUtils.MustUnmarshal([]byte(dsFile), &ds) - return &ds -} - -func (k *K8sInstaller) generateOperatorDeployment() *appsv1.Deployment { - var ( - deployFilename string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - deployFilename = "templates/cilium-operator/deployment.yaml" - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - deployFilename = "templates/cilium-operator-deployment.yaml" - } - - deployFile := k.manifests[deployFilename] - - var deploy appsv1.Deployment - yamlUtils.MustUnmarshal([]byte(deployFile), &deploy) - return &deploy -} - -func (k *K8sInstaller) generateIngressClass() *networkingv1.IngressClass { - var ( - ingressFileName string - ) - - switch { - case versioncheck.MustCompile(">=1.12.0")(k.chartVersion): - ingressFileName = "templates/cilium-ingress-class.yaml" - } - - ingressClassFile, exists := k.manifests[ingressFileName] - if !exists { - return nil - } - - var ingressClass networkingv1.IngressClass - yamlUtils.MustUnmarshal([]byte(ingressClassFile), &ingressClass) - return &ingressClass -} - -func (k *K8sInstaller) generateIngressService() *corev1.Service { - var ( - ingressServiceFilename string - ) - - switch { - case versioncheck.MustCompile(">=1.13.0")(k.chartVersion): - ingressServiceFilename = "templates/cilium-ingress-service.yaml" - } - - ingressServiceFile, exists := k.manifests[ingressServiceFilename] - if !exists { - return nil - } - - var ingressService corev1.Service - yamlUtils.MustUnmarshal([]byte(ingressServiceFile), &ingressService) - return &ingressService -} - -func (k *K8sInstaller) generateIngressEndpoint() *corev1.Endpoints { - var ( - ingressEndpointFilename string - ) - - switch { - case versioncheck.MustCompile(">=1.13.0")(k.chartVersion): - ingressEndpointFilename = "templates/cilium-ingress-service.yaml" - } - - _, exists := k.manifests[ingressEndpointFilename] - if !exists { - return nil - } - - // as the file templates/cilium-ingress-service.yaml is having multiple objects, - // using yamlUtils.MustUnmarshal will only unmarshal the first object. - // Hence, reconstructing the endpoint object here. - return &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cilium-ingress", - }, - Subsets: []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{{IP: "192.192.192.192"}}, - Ports: []corev1.EndpointPort{{Port: 9999}}, - }, - }, - } -} - -func (k *K8sInstaller) getSecretNamespace() string { - var ( - nsFilename string - ) - - switch { - case versioncheck.MustCompile(">1.11.99")(k.chartVersion): - nsFilename = "templates/cilium-secrets-namespace.yaml" - } - - nsFile, ok := k.manifests[nsFilename] - if !ok { - return "" - } - - var ns corev1.Namespace - yamlUtils.MustUnmarshal([]byte(nsFile), &ns) - return ns.GetName() -} - type k8sInstallerImplementation interface { - ClusterName() string GetAPIServerHostAndPort() (string, string) - ListNodes(ctx context.Context, options metav1.ListOptions) (*corev1.NodeList, error) - PatchNode(ctx context.Context, nodeName string, pt types.PatchType, data []byte) (*corev1.Node, error) - GetCiliumExternalWorkload(ctx context.Context, name string, opts metav1.GetOptions) (*ciliumv2.CiliumExternalWorkload, error) - CreateCiliumExternalWorkload(ctx context.Context, cew *ciliumv2.CiliumExternalWorkload, opts metav1.CreateOptions) (*ciliumv2.CiliumExternalWorkload, error) - DeleteCiliumExternalWorkload(ctx context.Context, name string, opts metav1.DeleteOptions) error - ListCiliumExternalWorkloads(ctx context.Context, opts metav1.ListOptions) (*ciliumv2.CiliumExternalWorkloadList, error) - CreateServiceAccount(ctx context.Context, namespace string, account *corev1.ServiceAccount, opts metav1.CreateOptions) (*corev1.ServiceAccount, error) - DeleteServiceAccount(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - GetConfigMap(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.ConfigMap, error) - CreateConfigMap(ctx context.Context, namespace string, config *corev1.ConfigMap, opts metav1.CreateOptions) (*corev1.ConfigMap, error) - DeleteConfigMap(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - CreateClusterRole(ctx context.Context, config *rbacv1.ClusterRole, opts metav1.CreateOptions) (*rbacv1.ClusterRole, error) - DeleteClusterRole(ctx context.Context, name string, opts metav1.DeleteOptions) error - CreateClusterRoleBinding(ctx context.Context, role *rbacv1.ClusterRoleBinding, opts metav1.CreateOptions) (*rbacv1.ClusterRoleBinding, error) - DeleteClusterRoleBinding(ctx context.Context, name string, opts metav1.DeleteOptions) error - CreateRole(ctx context.Context, namespace string, role *rbacv1.Role, opts metav1.CreateOptions) (*rbacv1.Role, error) - UpdateRole(ctx context.Context, namespace string, role *rbacv1.Role, opts metav1.UpdateOptions) (*rbacv1.Role, error) - DeleteRole(ctx context.Context, namespace string, name string, opts metav1.DeleteOptions) error - CreateRoleBinding(ctx context.Context, namespace string, roleBinding *rbacv1.RoleBinding, opts metav1.CreateOptions) (*rbacv1.RoleBinding, error) - UpdateRoleBinding(ctx context.Context, namespace string, roleBinding *rbacv1.RoleBinding, opts metav1.UpdateOptions) (*rbacv1.RoleBinding, error) - DeleteRoleBinding(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - CreateDaemonSet(ctx context.Context, namespace string, ds *appsv1.DaemonSet, opts metav1.CreateOptions) (*appsv1.DaemonSet, error) ListDaemonSet(ctx context.Context, namespace string, o metav1.ListOptions) (*appsv1.DaemonSetList, error) GetDaemonSet(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*appsv1.DaemonSet, error) - DeleteDaemonSet(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error PatchDaemonSet(ctx context.Context, namespace, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*appsv1.DaemonSet, error) - GetService(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.Service, error) GetEndpoints(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.Endpoints, error) - CreateEndpoints(ctx context.Context, namespace string, ep *corev1.Endpoints, opts metav1.CreateOptions) (*corev1.Endpoints, error) - DeleteEndpoints(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - CreateService(ctx context.Context, namespace string, service *corev1.Service, opts metav1.CreateOptions) (*corev1.Service, error) - DeleteService(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - DeleteDeployment(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - CreateDeployment(ctx context.Context, namespace string, deployment *appsv1.Deployment, opts metav1.CreateOptions) (*appsv1.Deployment, error) - GetDeployment(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*appsv1.Deployment, error) - PatchDeployment(ctx context.Context, namespace, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*appsv1.Deployment, error) - CheckDeploymentStatus(ctx context.Context, namespace, deployment string) error - DeleteNamespace(ctx context.Context, namespace string, opts metav1.DeleteOptions) error - CreateNamespace(ctx context.Context, namespace *corev1.Namespace, opts metav1.CreateOptions) (*corev1.Namespace, error) - GetNamespace(ctx context.Context, namespace string, options metav1.GetOptions) (*corev1.Namespace, error) - ListPods(ctx context.Context, namespace string, options metav1.ListOptions) (*corev1.PodList, error) - DeletePod(ctx context.Context, namespace, name string, options metav1.DeleteOptions) error - ExecInPod(ctx context.Context, namespace, pod, container string, command []string) (bytes.Buffer, error) - CreateSecret(ctx context.Context, namespace string, secret *corev1.Secret, opts metav1.CreateOptions) (*corev1.Secret, error) - UpdateSecret(ctx context.Context, namespace string, secret *corev1.Secret, opts metav1.UpdateOptions) (*corev1.Secret, error) - DeleteSecret(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error - GetSecret(ctx context.Context, namespace, name string, opts metav1.GetOptions) (*corev1.Secret, error) - PatchSecret(ctx context.Context, namespace, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*corev1.Secret, error) - CreateResourceQuota(ctx context.Context, namespace string, r *corev1.ResourceQuota, opts metav1.CreateOptions) (*corev1.ResourceQuota, error) - DeleteResourceQuota(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error AutodetectFlavor(ctx context.Context) k8s.Flavor ContextName() (name string) - CiliumStatus(ctx context.Context, namespace, pod string) (*models.StatusResponse, error) - CiliumDbgEndpoints(ctx context.Context, namespace, pod string) ([]*models.Endpoint, error) - ListCiliumEndpoints(ctx context.Context, namespace string, opts metav1.ListOptions) (*ciliumv2.CiliumEndpointList, error) - GetRunningCiliumVersion(ctx context.Context, namespace string) (string, error) - GetPlatform(ctx context.Context) (*k8s.Platform, error) - GetServerVersion() (*semver.Version, error) - CreateIngressClass(ctx context.Context, r *networkingv1.IngressClass, opts metav1.CreateOptions) (*networkingv1.IngressClass, error) - GetIngress(ctx context.Context, namespace string, ingressName string, opts metav1.GetOptions) (*networkingv1.Ingress, error) - CreateIngress(ctx context.Context, namespace string, ingress *networkingv1.Ingress, opts metav1.CreateOptions) (*networkingv1.Ingress, error) - DeleteIngressClass(ctx context.Context, name string, opts metav1.DeleteOptions) error - CiliumLogs(ctx context.Context, namespace, pod string, since time.Time, filter *regexp.Regexp) (string, error) - ListAPIResources(ctx context.Context) ([]string, error) - GetHelmState(ctx context.Context, namespace string, secretName string) (*helm.State, error) } type K8sInstaller struct { - client k8sInstallerImplementation - params Parameters - flavor k8s.Flavor - certManager *certs.CertManager - rollbackSteps []rollbackStep - manifests map[string]string - helmYAMLValues string - chartVersion semver.Version - chart *chart.Chart + client k8sInstallerImplementation + params Parameters + flavor k8s.Flavor + chartVersion semver.Version + chart *chart.Chart } type AzureParameters struct { @@ -301,60 +86,17 @@ type AzureParameters struct { IsBYOCNI bool } -var ( - // FlagsToHelmOpts maps the deprecated install flags to the helm - // options - FlagsToHelmOpts = map[string]string{ - "agent-image": "image.override", - "azure-client-id": "azure.clientID", - "azure-client-secret": "azure.clientSecret", - "azure-resource-group": "azure.resourceGroup", - "azure-subscription-id": "azure.subscriptionID", - "azure-tenant-id": "azure.tenantID", - "cluster-id": "cluster.id", - "cluster-name": "cluster.name", - "ipam": "ipam.mode", - "ipv4-native-routing-cidr": "ipv4NativeRoutingCIDR", - "node-encryption": "encryption.nodeEncryption", - "operator-image": "operator.image.override", - } - // FlagValues maps all FlagsToHelmOpts keys to their values - FlagValues = map[string]pflag.Value{} -) - type Parameters struct { Namespace string Writer io.Writer ClusterName string DisableChecks []string Version string - AgentImage string - OperatorImage string - RelayImage string - ClusterMeshAPIImage string - InheritCA string Wait bool WaitDuration time.Duration DatapathMode string IPv4NativeRoutingCIDR string - ClusterID int - IPAM string Azure AzureParameters - RestartUnmanagedPods bool - Encryption string - NodeEncryption bool - ConfigOverwrites []string - configOverwrites map[string]string - Rollback bool - - // CiliumReadyTimeout defines the wait timeout for Cilium to become ready - // after installing. - CiliumReadyTimeout time.Duration - - // K8sVersion is the Kubernetes version that will be used to generate the - // kubernetes manifests. If the auto-detection fails, this flag can be used - // as a workaround. - K8sVersion string // HelmChartDirectory points to the location of a helm chart directory. // Useful to test from upstream where a helm release is not available yet. @@ -364,10 +106,6 @@ type Parameters struct { // template. HelmOpts values.Options - // HelmGenValuesFile points to the file that will store the generated helm - // options. - HelmGenValuesFile string - // HelmResetValues if true, will reset helm values to the defaults found in the chart when upgrading HelmResetValues bool @@ -375,30 +113,12 @@ type Parameters struct { // specified by other flags. This options take precedence over the HelmResetValues option. HelmReuseValues bool - // ImageSuffix will set the suffix that should be set on all docker images - // generated by cilium-cli - ImageSuffix string - - // ImageTag will set the tags that will be set on all docker images - // generated by cilium-cli - ImageTag string - - // HelmValuesSecretName is the name of the secret where helm values will be - // stored. - HelmValuesSecretName string - // ListVersions lists all the available versions for install without actually installing. ListVersions bool // NodesWithoutCilium lists all nodes on which Cilium is not installed. NodesWithoutCilium []string - // APIVersions defines extra kubernetes api resources that can be passed to helm for capabilities validation, - // specifically for CRDs. - APIVersions []string - // UserSetKubeProxyReplacement will be set as true if user passes helm opt for the Kube-Proxy replacement. - UserSetKubeProxyReplacement bool - // DryRun writes resources to be installed to stdout without actually installing them. For Helm // installation mode only. DryRun bool @@ -415,58 +135,7 @@ func (p *Parameters) IsDryRun() bool { return p.DryRun || p.DryRunHelmValues } -func (p *Parameters) validate() error { - p.configOverwrites = map[string]string{} - for _, config := range p.ConfigOverwrites { - t := strings.SplitN(config, "=", 2) - if len(t) != 2 { - return fmt.Errorf("invalid config overwrite %q, must be in the form key=value", config) - } - - p.configOverwrites[t[0]] = t[1] - } - if utils.IsInHelmMode() { - // Version validation logic does not apply to Helm mode. - return nil - } else if p.AgentImage != "" || p.OperatorImage != "" || p.RelayImage != "" { - return nil - } else if err := utils.CheckVersion(p.Version); err != nil { - return err - } - - return nil -} - -func (k *K8sInstaller) fqAgentImage() string { - return utils.BuildImagePath(k.params.AgentImage, k.params.Version, defaults.AgentImage, defaults.Version) -} - -func (k *K8sInstaller) fqOperatorImage() string { - defaultImage := defaults.OperatorImage - switch k.params.DatapathMode { - case DatapathAwsENI: - defaultImage = defaults.OperatorImageAWS - case DatapathAzure: - defaultImage = defaults.OperatorImageAzure - } - - return utils.BuildImagePath(k.params.OperatorImage, k.params.Version, defaultImage, defaults.Version) -} - -func (k *K8sInstaller) fqRelayImage() string { - return utils.BuildImagePath(k.params.RelayImage, k.params.Version, defaults.RelayImage, defaults.Version) -} - -func (k *K8sInstaller) fqClusterMeshAPIImage() string { - return utils.BuildImagePath(k.params.ClusterMeshAPIImage, k.params.Version, defaults.ClusterMeshApiserverImage, defaults.Version) -} - func NewK8sInstaller(client k8sInstallerImplementation, p Parameters) (*K8sInstaller, error) { - if err := (&p).validate(); err != nil { - return nil, fmt.Errorf("invalid parameters: %w", err) - } - - cm := certs.NewCertManager(client, certs.Parameters{Namespace: p.Namespace}) chartVersion, helmChart, err := helm.ResolveHelmChartVersion(p.Version, p.HelmChartDirectory, p.HelmRepository) if err != nil { return nil, err @@ -475,7 +144,6 @@ func NewK8sInstaller(client k8sInstallerImplementation, p Parameters) (*K8sInsta return &K8sInstaller{ client: client, params: p, - certManager: cm, chartVersion: chartVersion, chart: helmChart, }, nil @@ -489,138 +157,6 @@ func (k *K8sInstaller) Exec(command string, args ...string) ([]byte, error) { return utils.Exec(k, command, args...) } -func (k *K8sInstaller) getImagesSHA() string { - ersion := strings.TrimPrefix(k.params.Version, "v") - _, err := versioncheck.Version(ersion) - // If we got an error then it means this is a commit SHA that the user - // wants to install on all images. - if err != nil { - return k.params.Version - } - return "" -} - -func (k *K8sInstaller) generateConfigMap() (*corev1.ConfigMap, error) { - var ( - cmFilename string - ) - - switch { - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - cmFilename = "templates/cilium-configmap.yaml" - default: - return nil, fmt.Errorf("cilium version unsupported %s", k.chartVersion.String()) - } - - cmFile := k.manifests[cmFilename] - - var cm corev1.ConfigMap - yamlUtils.MustUnmarshal([]byte(cmFile), &cm) - k.Log("🚀 Creating ConfigMap for Cilium version %s...", k.chartVersion) - - for key, value := range k.params.configOverwrites { - k.Log("ℹī¸ Manual overwrite in ConfigMap: %s=%s", key, value) - cm.Data[key] = value - } - - if cm.Data["install-no-conntrack-iptables-rules"] == "true" { - switch k.params.DatapathMode { - case DatapathAwsENI: - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules cannot be enabled on AWS EKS") - case DatapathGKE: - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules cannot be enabled on Google GKE") - case DatapathAzure: - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules cannot be enabled on Azure AKS") - } - - // The check for the legacy "tunnel" flag can be removed once we drop support for Cilium v1.14 - if cm.Data["tunnel"] != "disabled" || cm.Data["routing-mode"] != "native" { - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules requires tunneling to be disabled") - } - - if cm.Data["kube-proxy-replacement"] != "strict" { - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules requires kube-proxy replacement to be enabled") - } - - if cm.Data["enable-bpf-masquerade"] != "true" { - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules requires eBPF masquerading to be enabled") - } - - if cm.Data["cni-chaining-mode"] != "" { - return nil, fmt.Errorf("--install-no-conntrack-iptables-rules cannot be enabled with CNI chaining") - } - } - - return &cm, nil -} - -func (k *K8sInstaller) generateResourceQuotas() []*corev1.ResourceQuota { - resoureceQuotasFilename := "templates/cilium-resource-quota.yaml" - resourceQuotasFile, exists := k.manifests[resoureceQuotasFilename] - if !exists { - return nil - } - resourceQuotas := yamlUtils.MustUnmarshalMulti[*corev1.ResourceQuota]([]byte(resourceQuotasFile)) - return resourceQuotas -} - -func (k *K8sInstaller) restartUnmanagedPods(ctx context.Context) error { - var printed bool - - pods, err := k.client.ListPods(ctx, "", metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("unable to list pods: %w", err) - } - - // If not pods are running, skip. This avoids attemptingm to retrieve - // CiliumEndpoints if no pods are present at all. Cilium will not be - // running either. - if len(pods.Items) == 0 { - return nil - } - - cepMap := map[string]struct{}{} - ceps, err := k.client.ListCiliumEndpoints(ctx, "", metav1.ListOptions{}) - if err != nil { - // When the CEP has not been registered yet, it's impossible - // for any pods to be managed by Cilium. - if err.Error() != "the server could not find the requested resource (get ciliumendpoints.cilium.io)" { - return fmt.Errorf("unable to list cilium endpoints: %w", err) - } - } else { - for _, cep := range ceps.Items { - cepMap[cep.Namespace+"/"+cep.Name] = struct{}{} - } - } - - for _, pod := range pods.Items { - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - if pod.Status.Phase == corev1.PodSucceeded { - continue - } - if !pod.Spec.HostNetwork { - if _, ok := cepMap[pod.Namespace+"/"+pod.Name]; ok { - continue - } - - if !printed { - k.Log("â™ģī¸ Restarting unmanaged pods...") - printed = true - } - err := k.client.DeletePod(ctx, pod.Namespace, pod.Name, metav1.DeleteOptions{}) - if err != nil { - k.Log("⚠ī¸ Unable to restart pod %s/%s: %s", pod.Namespace, pod.Name, err) - } else { - k.Log("â™ģī¸ Restarted unmanaged pod %s/%s", pod.Namespace, pod.Name) - } - } - } - - return nil - -} - func (k *K8sInstaller) listVersions() error { // Print available versions and return. versions, err := helm.ListVersions() @@ -702,332 +238,6 @@ func (k *K8sInstaller) preinstall(ctx context.Context) error { return nil } -func (k *K8sInstaller) Install(ctx context.Context) error { - if k.params.ListVersions { - return k.listVersions() - } - if err := k.preinstall(ctx); err != nil { - return err - } - err := k.generateManifests(ctx) - if err != nil { - return err - } - - if k.params.HelmGenValuesFile != "" { - k.Log("ℹī¸ Generated helm values file %q successfully written", k.params.HelmGenValuesFile) - return nil - } - - k.Log("ℹī¸ Storing helm values file in %s/%s Secret", k.params.Namespace, k.params.HelmValuesSecretName) - - helmSecret := k8s.NewSecret(k.params.HelmValuesSecretName, k.params.Namespace, - map[string][]byte{ - defaults.HelmValuesSecretKeyName: []byte(k.helmYAMLValues), - defaults.HelmChartVersionSecretKeyName: []byte(k.chartVersion.String()), - }) - if _, err := k.client.GetSecret(ctx, k.params.Namespace, k.params.HelmValuesSecretName, metav1.GetOptions{}); err == nil { - if _, err := k.client.UpdateSecret(ctx, k.params.Namespace, helmSecret, metav1.UpdateOptions{}); err != nil { - k.Log("❌ Unable to store helm values file %s/%s Secret", k.params.Namespace, k.params.HelmValuesSecretName) - return err - } - } else { - if _, err := k.client.CreateSecret(ctx, k.params.Namespace, helmSecret, metav1.CreateOptions{}); err != nil { - k.Log("❌ Unable to store helm values file %s/%s Secret", k.params.Namespace, k.params.HelmValuesSecretName) - return err - } - } - - switch k.flavor.Kind { - case k8s.KindAKS: - // We only made the secret-based azure installation available in >= 1.12.0 - // Introduced in https://github.com/cilium/cilium/pull/18010 - // Additionally, secrets are only needed when using Azure IPAM - if k.params.DatapathMode == DatapathAzure && versioncheck.MustCompile(">=1.12.0")(k.chartVersion) { - if err := k.createAKSSecrets(ctx); err != nil { - return err - } - } - } - - if err := k.installCerts(ctx); err != nil { - return err - } - - for _, nodeName := range k.params.NodesWithoutCilium { - k.Log("🚀 Setting label %q on node %q to prevent Cilium from being scheduled on it...", defaults.CiliumNoScheduleLabel, nodeName) - label := jsonUtils.EscapePatchString(defaults.CiliumNoScheduleLabel) - labelPatch := fmt.Sprintf(`[{"op":"add","path":"/metadata/labels/%s","value":"true"}]`, label) - _, err = k.client.PatchNode(ctx, nodeName, types.JSONPatchType, []byte(labelPatch)) - if err != nil { - return err - } - } - - resourceQuotas := k.generateResourceQuotas() - for _, resourceQuota := range resourceQuotas { - k.Log("🚀 Creating resource quota %s...", resourceQuota.Name) - if _, err := k.client.CreateResourceQuota(ctx, k.params.Namespace, resourceQuota, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteResourceQuota(ctx, k.params.Namespace, resourceQuota.Name, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ResourceQuota: %s", resourceQuota.Name, err) - } - }) - } - - k.Log("🚀 Creating Service accounts...") - if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k.NewServiceAccount(defaults.AgentServiceAccountName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteServiceAccount(ctx, k.params.Namespace, defaults.AgentServiceAccountName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ServiceAccount: %s", defaults.AgentServiceAccountName, err) - } - }) - - if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k.NewServiceAccount(defaults.OperatorServiceAccountName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteServiceAccount(ctx, k.params.Namespace, defaults.OperatorServiceAccountName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ServiceAccount: %s", defaults.OperatorServiceAccountName, err) - } - }) - - k.Log("🚀 Creating Cluster roles...") - if _, err := k.client.CreateClusterRole(ctx, k.NewClusterRole(defaults.AgentClusterRoleName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteClusterRole(ctx, defaults.AgentClusterRoleName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ClusterRole: %s", defaults.AgentClusterRoleName, err) - } - }) - - if _, err := k.client.CreateClusterRoleBinding(ctx, k.NewClusterRoleBinding(defaults.AgentClusterRoleName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteClusterRoleBinding(ctx, defaults.AgentClusterRoleName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ClusterRoleBinding: %s", defaults.AgentClusterRoleName, err) - } - }) - - if _, err := k.client.CreateClusterRole(ctx, k.NewClusterRole(defaults.OperatorClusterRoleName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteClusterRole(ctx, defaults.OperatorClusterRoleName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ClusterRole: %s", defaults.OperatorClusterRoleName, err) - } - }) - - if _, err := k.client.CreateClusterRoleBinding(ctx, k.NewClusterRoleBinding(defaults.OperatorClusterRoleName), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteClusterRoleBinding(ctx, defaults.OperatorClusterRoleName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ClusterRoleBinding: %s", defaults.OperatorClusterRoleName, err) - } - }) - - if k.params.Encryption == encryptionIPsec { - // TODO(aanm) automate this as well in form of helm chart - if err := k.createEncryptionSecret(ctx); err != nil { - return err - } - } - - ingressClass := k.generateIngressClass() - if ingressClass != nil { - if _, err := k.client.CreateIngressClass(ctx, ingressClass, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteIngressClass(ctx, defaults.IngressClassName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s IngressClass: %s", defaults.IngressClassName, err) - } - }) - } - - ingressService := k.generateIngressService() - if ingressService != nil { - if _, err := k.client.CreateService(ctx, ingressService.GetNamespace(), ingressService, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteService(ctx, ingressService.GetNamespace(), ingressService.GetName(), metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Ingress Service: %s.%s", ingressService.GetNamespace(), ingressService.GetName(), err) - } - }) - } - - ingressEndpoint := k.generateIngressEndpoint() - if ingressEndpoint != nil { - if _, err := k.client.CreateEndpoints(ctx, ingressService.GetNamespace(), ingressEndpoint, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteEndpoints(ctx, ingressEndpoint.GetNamespace(), ingressEndpoint.GetName(), metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Ingress Endpoint: %s.%s", ingressEndpoint.GetNamespace(), ingressEndpoint.GetName(), err) - } - }) - } - - secretsNamespace := k.getSecretNamespace() - if len(secretsNamespace) != 0 { - namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: secretsNamespace}} - if _, err := k.client.CreateNamespace(ctx, namespace, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteNamespace(ctx, secretsNamespace, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Namespace: %s", secretsNamespace, err) - } - }) - } - - for _, roleName := range []string{defaults.AgentSecretsRoleName, defaults.OperatorSecretsRoleName} { - rs := k.NewRole(roleName) - - for _, r := range rs { - _, err = k.client.CreateRole(ctx, r.GetNamespace(), r, metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - _, err = k.client.UpdateRole(ctx, r.GetNamespace(), r, metav1.UpdateOptions{}) - } - - if err != nil { - return err - } - - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteRole(ctx, r.GetNamespace(), r.GetName(), metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Role: %s", r.GetName(), err) - } - }) - } - - rbs := k.NewRoleBinding(roleName) - for _, rb := range rbs { - _, err := k.client.CreateRoleBinding(ctx, rb.GetNamespace(), rb, metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - _, err = k.client.UpdateRoleBinding(ctx, rb.GetNamespace(), rb, metav1.UpdateOptions{}) - } - if err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteRoleBinding(ctx, rb.GetNamespace(), rb.GetName(), metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s RoleBinding: %s/%s", rb.GetNamespace(), rb.GetName(), err) - } - }) - } - } - - configMap, err := k.generateConfigMap() - if err != nil { - return fmt.Errorf("cannot generate ConfigMap: %w", err) - } - - if _, err := k.client.CreateConfigMap(ctx, k.params.Namespace, configMap, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteConfigMap(ctx, k.params.Namespace, defaults.ConfigMapName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s ConfigMap: %s", defaults.ConfigMapName, err) - } - }) - - // Create the node-init daemonset if one is required for the current kind. - if needsNodeInit(k.flavor.Kind, k.chartVersion) { - k.Log("🚀 Creating %s Node Init DaemonSet...", k.flavor.Kind.String()) - ds := k.generateNodeInitDaemonSet(k.flavor.Kind) - if _, err := k.client.CreateDaemonSet(ctx, k.params.Namespace, ds, metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteDaemonSet(ctx, k.params.Namespace, ds.Name, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s DaemonSet: %s", ds.Name, err) - } - }) - } - - k.Log("🚀 Creating Agent DaemonSet...") - if _, err := k.client.CreateDaemonSet(ctx, k.params.Namespace, k.generateAgentDaemonSet(), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteDaemonSet(ctx, k.params.Namespace, defaults.AgentDaemonSetName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s DaemonSet: %s", defaults.AgentDaemonSetName, err) - } - }) - - k.Log("🚀 Creating Operator Deployment...") - if _, err := k.client.CreateDeployment(ctx, k.params.Namespace, k.generateOperatorDeployment(), metav1.CreateOptions{}); err != nil { - return err - } - k.pushRollbackStep(func(ctx context.Context) { - if err := k.client.DeleteDeployment(ctx, k.params.Namespace, defaults.OperatorDeploymentName, metav1.DeleteOptions{}); err != nil { - k.Log("Cannot delete %s Deployment: %s", defaults.OperatorDeploymentName, err) - } - }) - - if k.params.Wait || k.params.RestartUnmanagedPods { - // In case unmanaged pods should be restarted we need to make sure that Cilium - // DaemonSet is up and running to guarantee the CNI configuration and binary - // are deployed on the node. See https://github.com/cilium/cilium/issues/14128 - // for details. - k.Log("⌛ Waiting for Cilium to be installed and ready...") - collector, err := status.NewK8sStatusCollector(k.client, status.K8sStatusParameters{ - Namespace: k.params.Namespace, - Wait: true, - WaitDuration: k.params.WaitDuration, - WarningFreePods: []string{defaults.AgentDaemonSetName, defaults.OperatorDeploymentName}, - }) - if err != nil { - return err - } - - s, err := collector.Status(ctx) - if err != nil { - fmt.Print(s.Format()) - return err - } - } - - if k.params.RestartUnmanagedPods { - if err := k.restartUnmanagedPods(ctx); err != nil { - return err - } - } - - k.Log("✅ Cilium was successfully installed! Run 'cilium status' to view installation health") - - return nil -} - -type rollbackStep func(context.Context) - -func (k *K8sInstaller) pushRollbackStep(step rollbackStep) { - // Prepend the step to the steps slice so that, in case rollback is - // performed, steps are rolled back in the reverse order - k.rollbackSteps = append([]rollbackStep{step}, k.rollbackSteps...) -} - -func (k *K8sInstaller) RollbackInstallation(ctx context.Context) { - if !k.params.Rollback { - k.Log("ℹī¸ Rollback disabled with '--rollback=false', leaving installed resources behind") - return - } - k.Log("↩ī¸ Rolling back installation...") - - for _, r := range k.rollbackSteps { - r(ctx) - } -} - func (k *K8sInstaller) InstallWithHelm(ctx context.Context, k8sClient *k8s.Client) error { if k.params.ListVersions { return k.listVersions() diff --git a/install/node_init.go b/install/node_init.go index 193d05754d..95c119353e 100644 --- a/install/node_init.go +++ b/install/node_init.go @@ -5,12 +5,9 @@ package install import ( "github.com/blang/semver/v4" - appsv1 "k8s.io/api/apps/v1" - "github.com/cilium/cilium/pkg/versioncheck" "github.com/cilium/cilium-cli/k8s" - yamlUtils "github.com/cilium/cilium-cli/utils/yaml" ) func needsNodeInit(k k8s.Kind, version semver.Version) bool { @@ -25,22 +22,3 @@ func needsNodeInit(k k8s.Kind, version semver.Version) bool { } return false } - -func (k *K8sInstaller) generateNodeInitDaemonSet(_ k8s.Kind) *appsv1.DaemonSet { - var ( - dsFileName string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - dsFileName = "templates/cilium-nodeinit/daemonset.yaml" - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - dsFileName = "templates/cilium-nodeinit-daemonset.yaml" - } - - dsFile := k.manifests[dsFileName] - - var ds appsv1.DaemonSet - yamlUtils.MustUnmarshal([]byte(dsFile), &ds) - return &ds -} diff --git a/install/rbac.go b/install/rbac.go deleted file mode 100644 index 0ceab04a41..0000000000 --- a/install/rbac.go +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package install - -import ( - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - - "github.com/cilium/cilium/pkg/versioncheck" - - "github.com/cilium/cilium-cli/defaults" - yamlUtils "github.com/cilium/cilium-cli/utils/yaml" -) - -func (k *K8sInstaller) NewServiceAccount(name string) *corev1.ServiceAccount { - var ( - saFileName string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - switch name { - case defaults.AgentServiceAccountName: - saFileName = "templates/cilium-agent/serviceaccount.yaml" - case defaults.OperatorServiceAccountName: - saFileName = "templates/cilium-operator/serviceaccount.yaml" - } - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - switch name { - case defaults.AgentServiceAccountName: - saFileName = "templates/cilium-agent-serviceaccount.yaml" - case defaults.OperatorServiceAccountName: - saFileName = "templates/cilium-operator-serviceaccount.yaml" - } - } - - saFile := k.manifests[saFileName] - - var sa corev1.ServiceAccount - yamlUtils.MustUnmarshal([]byte(saFile), &sa) - return &sa -} - -func (k *K8sInstaller) NewClusterRole(name string) *rbacv1.ClusterRole { - var ( - crFileName string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - switch name { - case defaults.AgentServiceAccountName: - crFileName = "templates/cilium-agent/clusterrole.yaml" - case defaults.OperatorServiceAccountName: - crFileName = "templates/cilium-operator/clusterrole.yaml" - } - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - switch name { - case defaults.AgentServiceAccountName: - crFileName = "templates/cilium-agent-clusterrole.yaml" - case defaults.OperatorServiceAccountName: - crFileName = "templates/cilium-operator-clusterrole.yaml" - } - } - - crFile := k.manifests[crFileName] - - var cr rbacv1.ClusterRole - yamlUtils.MustUnmarshal([]byte(crFile), &cr) - return &cr -} - -func (k *K8sInstaller) NewClusterRoleBinding(crbName string) *rbacv1.ClusterRoleBinding { - var ( - crbFileName string - ) - - switch { - case versioncheck.MustCompile(">1.10.99")(k.chartVersion): - switch crbName { - case defaults.AgentClusterRoleName: - crbFileName = "templates/cilium-agent/clusterrolebinding.yaml" - case defaults.OperatorClusterRoleName: - crbFileName = "templates/cilium-operator/clusterrolebinding.yaml" - } - case versioncheck.MustCompile(">=1.9.0")(k.chartVersion): - switch crbName { - case defaults.AgentClusterRoleName: - crbFileName = "templates/cilium-agent-clusterrolebinding.yaml" - case defaults.OperatorClusterRoleName: - crbFileName = "templates/cilium-operator-clusterrolebinding.yaml" - } - } - - crbFile := k.manifests[crbFileName] - - var crb rbacv1.ClusterRoleBinding - yamlUtils.MustUnmarshal([]byte(crbFile), &crb) - return &crb -} - -func (k *K8sInstaller) NewRole(name string) []*rbacv1.Role { - var ( - roleFileName string - ) - - switch { - case versioncheck.MustCompile(">1.11.99")(k.chartVersion): - switch name { - case defaults.AgentSecretsRoleName: - roleFileName = "templates/cilium-agent/role.yaml" - case defaults.OperatorSecretsRoleName: - roleFileName = "templates/cilium-operator/role.yaml" - } - } - - rFile, exists := k.manifests[roleFileName] - if !exists { - return nil - } - - roles := yamlUtils.MustUnmarshalMulti[*rbacv1.Role]([]byte(rFile)) - out := []*rbacv1.Role{} - for _, role := range roles { - if role != nil { - out = append(out, role) - } - } - return out -} - -func (k *K8sInstaller) NewRoleBinding(crbName string) []*rbacv1.RoleBinding { - var ( - rbFileName string - ) - - switch { - case versioncheck.MustCompile(">1.11.99")(k.chartVersion): - switch crbName { - case defaults.AgentSecretsRoleName: - rbFileName = "templates/cilium-agent/rolebinding.yaml" - case defaults.OperatorSecretsRoleName: - rbFileName = "templates/cilium-operator/rolebinding.yaml" - } - } - - rbFile, exists := k.manifests[rbFileName] - if !exists { - return nil - } - - rbs := yamlUtils.MustUnmarshalMulti[*rbacv1.RoleBinding]([]byte(rbFile)) - out := []*rbacv1.RoleBinding{} - for _, rb := range rbs { - if rb != nil { - out = append(out, rb) - } - } - return out -} diff --git a/install/testdata/kind.yaml b/install/testdata/kind.yaml index 60c537909a..91f2f34bf5 100644 --- a/install/testdata/kind.yaml +++ b/install/testdata/kind.yaml @@ -2,8 +2,3 @@ ipam: mode: kubernetes operator: replicas: 1 -serviceAccounts: - cilium: - name: cilium - operator: - name: cilium-operator diff --git a/install/uninstall.go b/install/uninstall.go index 4f3c3eccd9..e0e76aacff 100644 --- a/install/uninstall.go +++ b/install/uninstall.go @@ -10,16 +10,11 @@ import ( "strings" "time" - "github.com/blang/semver/v4" - "github.com/cilium/workerpool" "helm.sh/helm/v3/pkg/action" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "github.com/cilium/cilium-cli/clustermesh" "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/internal/utils" - "github.com/cilium/cilium-cli/k8s" ) type UninstallParameters struct { @@ -35,34 +30,15 @@ type UninstallParameters struct { } type K8sUninstaller struct { - client k8sInstallerImplementation - params UninstallParameters - flavor k8s.Flavor - version semver.Version + client k8sInstallerImplementation + params UninstallParameters } func NewK8sUninstaller(client k8sInstallerImplementation, p UninstallParameters) *K8sUninstaller { - uninstaller := &K8sUninstaller{ + return &K8sUninstaller{ client: client, params: p, } - - // Version detection / validation is unnecessary in Helm mode. - if utils.IsInHelmMode() { - return uninstaller - } - - ciliumVersion, err := client.GetRunningCiliumVersion(context.Background(), p.Namespace) - if err != nil { - uninstaller.Log("Error getting Cilium Version: %s", err) - } - version, err := semver.ParseTolerant(ciliumVersion) - if err != nil { - uninstaller.Log("Error parsing Cilium Version: %s", err) - } else { - uninstaller.version = version - } - return uninstaller } func (k *K8sUninstaller) Log(format string, a ...interface{}) { @@ -92,125 +68,3 @@ func (k *K8sUninstaller) undoAwsNodeNodeSelector(ctx context.Context) error { } return err } - -func (k *K8sUninstaller) Uninstall(ctx context.Context) error { - k.autodetect(ctx) - - k.Log("đŸ”Ĩ Enabling CNI cleanup...") - k.enableCNIUninstall(ctx) - k.Log("đŸ”Ĩ Deleting agent DaemonSet...") - k.client.DeleteDaemonSet(ctx, k.params.Namespace, defaults.AgentDaemonSetName, metav1.DeleteOptions{}) - // We need to wait for daemonset to be deleted before proceeding with further cleanups - // as pods' daemonsets might still need to contact API Server, for example to remove node annotations. - if k.params.Wait { - k.Log("⌛ Waiting for agent DaemonSet to be uninstalled...") - err := k.waitForPodsToBeDeleted(ctx) - if err != nil { - k.Log("❌ Error while waiting for deletion of agent DaemonSet: %v", err) - } else { - k.Log("đŸ”Ĩ Agent DaemonSet deleted successfully...") - } - } - k.Log("đŸ”Ĩ Deleting operator Deployment...") - k.client.DeleteDeployment(ctx, k.params.Namespace, defaults.OperatorDeploymentName, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting %s namespace...", defaults.IngressSecretsNamespace) - k.client.DeleteNamespace(ctx, defaults.IngressSecretsNamespace, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting ConfigMap...") - k.client.DeleteConfigMap(ctx, k.params.Namespace, defaults.ConfigMapName, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting Roles...") - k.client.DeleteRole(ctx, k.params.Namespace, defaults.AgentConfigRoleName, metav1.DeleteOptions{}) - k.client.DeleteRoleBinding(ctx, k.params.Namespace, defaults.AgentConfigRoleName, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting Cluster roles...") - k.client.DeleteClusterRole(ctx, defaults.AgentClusterRoleName, metav1.DeleteOptions{}) - k.client.DeleteClusterRoleBinding(ctx, defaults.AgentClusterRoleName, metav1.DeleteOptions{}) - k.client.DeleteClusterRole(ctx, defaults.OperatorClusterRoleName, metav1.DeleteOptions{}) - k.client.DeleteClusterRoleBinding(ctx, defaults.OperatorClusterRoleName, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting IngressClass...") - k.client.DeleteIngressClass(ctx, defaults.IngressClassName, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting Ingress Service...") - k.client.DeleteService(ctx, k.params.Namespace, defaults.IngressService, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting Ingress Endpoints...") - k.client.DeleteEndpoints(ctx, k.params.Namespace, defaults.IngressService, metav1.DeleteOptions{}) - k.client.DeleteService(ctx, k.params.Namespace, defaults.IngressService, metav1.DeleteOptions{}) - k.Log("đŸ”Ĩ Deleting Ingress Secret Namespace...") - k.client.DeleteNamespace(ctx, defaults.IngressSecretsNamespace, metav1.DeleteOptions{}) - - k.Log("đŸ”Ĩ Deleting Service accounts...") - k.client.DeleteServiceAccount(ctx, k.params.Namespace, defaults.AgentServiceAccountName, metav1.DeleteOptions{}) - k.client.DeleteServiceAccount(ctx, k.params.Namespace, defaults.OperatorServiceAccountName, metav1.DeleteOptions{}) - - clustermesh.NewK8sClusterMesh(k.client, clustermesh.Parameters{ - Namespace: k.params.Namespace, - Writer: k.params.Writer, - }).Disable(ctx) - - k.Log("đŸ”Ĩ Deleting certificates...") - k.uninstallCerts(ctx) - - switch k.flavor.Kind { - case k8s.KindEKS: - k.undoAwsNodeNodeSelector(ctx) - case k8s.KindGKE: - k.Log("đŸ”Ĩ Deleting resource quotas...") - k.client.DeleteResourceQuota(ctx, k.params.Namespace, defaults.AgentResourceQuota, metav1.DeleteOptions{}) - k.client.DeleteResourceQuota(ctx, k.params.Namespace, defaults.OperatorResourceQuota, metav1.DeleteOptions{}) - } - - if needsNodeInit(k.flavor.Kind, k.version) { - k.Log("đŸ”Ĩ Deleting node init daemonset...") - k.client.DeleteDaemonSet(ctx, k.params.Namespace, defaults.NodeInitDaemonSetName, metav1.DeleteOptions{}) - } - - k.Log("đŸ”Ĩ Deleting secret with the helm values configuration...") - k.client.DeleteSecret(ctx, k.params.Namespace, k.params.HelmValuesSecretName, metav1.DeleteOptions{}) - - k.Log("✅ Cilium was successfully uninstalled.") - - return nil -} - -func (k *K8sUninstaller) waitForPodsToBeDeleted(ctx context.Context) error { - for { - pods, err := k.client.ListPods(ctx, k.params.Namespace, metav1.ListOptions{LabelSelector: defaults.AgentPodSelector}) - if err != nil { - return err - } - - if len(pods.Items) > 0 { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for pod deletion") - case <-time.After(defaults.WaitRetryInterval): - } - } else { - return nil - } - } -} - -func (k *K8sUninstaller) enableCNIUninstall(ctx context.Context) { - pods, err := k.client.ListPods(ctx, k.params.Namespace, metav1.ListOptions{LabelSelector: defaults.AgentPodSelector}) - if err != nil { - k.Log("❌ Failed to enable cni cleanup: %v", err) - return - } - wp := workerpool.NewWithContext(ctx, k.params.WorkerCount) - defer wp.Close() - - for _, pod := range pods.Items { - pod := pod - wp.Submit(pod.Name, func(ctx context.Context) error { - _, err := k.client.ExecInPod(ctx, pod.Namespace, pod.Name, defaults.AgentContainerName, - []string{ - "/bin/sh", - "-c", - "echo -n true > /tmp/cilium/config-map/cni-uninstall || true", - }) - if err != nil { - k.Log("❌ Failed to enable cni cleanup in pod %s: %v", pod.Name, err) - } - return nil - }) - } - wp.Drain() -} diff --git a/install/upgrade.go b/install/upgrade.go index fbbecbc696..6f6c1dccf3 100644 --- a/install/upgrade.go +++ b/install/upgrade.go @@ -6,154 +6,16 @@ package install import ( "context" "fmt" - "strings" "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/getter" - appsv1 "k8s.io/api/apps/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/yaml" "github.com/cilium/cilium-cli/defaults" "github.com/cilium/cilium-cli/internal/helm" "github.com/cilium/cilium-cli/k8s" - "github.com/cilium/cilium-cli/status" ) -func (k *K8sInstaller) Upgrade(ctx context.Context) error { - k.autodetect(ctx) - - if err := k.detectDatapathMode(); err != nil { - return err - } - - daemonSet, err := k.client.GetDaemonSet(ctx, k.params.Namespace, defaults.AgentDaemonSetName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("unable to retrieve DaemonSet of cilium-agent: %s", err) - } - - deployment, err := k.client.GetDeployment(ctx, k.params.Namespace, defaults.OperatorDeploymentName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("unable to retrieve Deployment of cilium-operator: %s", err) - } - - var patched int - - if err = upgradeDeployment(ctx, k, upgradeDeploymentParams{ - deployment: deployment, - imageExcludeDigest: k.fqOperatorImage(), - containerName: defaults.OperatorContainerName, - }, &patched); err != nil { - return err - } - - agentImage := k.fqAgentImage() - var containerPatches []string - for _, c := range daemonSet.Spec.Template.Spec.Containers { - if c.Image != agentImage { - containerPatches = append(containerPatches, `{"name":"`+c.Name+`", "image":"`+agentImage+`"}`) - } - } - var initContainerPatches []string - for _, c := range daemonSet.Spec.Template.Spec.InitContainers { - if c.Image != agentImage { - initContainerPatches = append(initContainerPatches, `{"name":"`+c.Name+`", "image":"`+agentImage+`"}`) - } - } - - if len(containerPatches) == 0 && len(initContainerPatches) == 0 { - k.Log("✅ Cilium is already up to date") - } else { - k.Log("🚀 Upgrading cilium to version %s...", k.fqAgentImage()) - - patch := []byte(`{"spec":{"template":{"spec":{"containers":[` + strings.Join(containerPatches, ",") + `], "initContainers":[` + strings.Join(initContainerPatches, ",") + `]}}}}`) - _, err = k.client.PatchDaemonSet(ctx, k.params.Namespace, defaults.AgentDaemonSetName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - return fmt.Errorf("unable to patch DaemonSet %s with patch %q: %w", defaults.AgentDaemonSetName, patch, err) - } - - patched++ - } - - hubbleRelayDeployment, err := k.client.GetDeployment(ctx, k.params.Namespace, defaults.RelayDeploymentName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return fmt.Errorf("unable to retrieve Deployment of %s: %w", defaults.RelayDeploymentName, err) - } - - if err == nil { // only update if hubble relay deployment was found on the cluster - if err = upgradeDeployment(ctx, k, upgradeDeploymentParams{ - deployment: hubbleRelayDeployment, - imageExcludeDigest: k.fqRelayImage(), - containerName: defaults.RelayContainerName, - }, &patched); err != nil { - return err - } - } - - clustermeshAPIServerDeployment, err := k.client.GetDeployment(ctx, k.params.Namespace, defaults.ClusterMeshDeploymentName, metav1.GetOptions{}) - if err != nil && !k8serrors.IsNotFound(err) { - return fmt.Errorf("unable to retrieve Deployment of %s: %w", defaults.ClusterMeshDeploymentName, err) - } - - if err == nil { // only update clustermesh-apiserver if deployment was found on the cluster - if err = upgradeDeployment(ctx, k, upgradeDeploymentParams{ - deployment: clustermeshAPIServerDeployment, - imageExcludeDigest: k.fqClusterMeshAPIImage(), - containerName: defaults.ClusterMeshContainerName, - }, &patched); err != nil { - return err - } - } - - if patched > 0 && k.params.Wait { - k.Log("⌛ Waiting for Cilium to be upgraded...") - collector, err := status.NewK8sStatusCollector(k.client, status.K8sStatusParameters{ - Namespace: k.params.Namespace, - Wait: true, - WaitDuration: k.params.WaitDuration, - WarningFreePods: []string{defaults.AgentDaemonSetName, defaults.OperatorDeploymentName, defaults.RelayDeploymentName, defaults.ClusterMeshDeploymentName}, - }) - if err != nil { - return err - } - - s, err := collector.Status(ctx) - if err != nil { - fmt.Print(s.Format()) - return err - } - } - - return nil -} - -type upgradeDeploymentParams struct { - deployment *appsv1.Deployment - imageExcludeDigest string - containerName string -} - -func upgradeDeployment(ctx context.Context, k *K8sInstaller, params upgradeDeploymentParams, patched *int) error { - if params.deployment.Spec.Template.Spec.Containers[0].Image == params.imageExcludeDigest { - k.Log("✅ %s is already up to date", params.deployment.Name) - return nil - } - - k.Log("🚀 Upgrading %s to version %s...", params.deployment.Name, params.imageExcludeDigest) - containerPath := fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name": "%s", "image":"`, params.containerName) - patch := []byte(containerPath + params.imageExcludeDigest + `"}]}}}}`) - - _, err := k.client.PatchDeployment(ctx, k.params.Namespace, params.deployment.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - return fmt.Errorf("unable to patch Deployment %s with patch %q: %w", params.deployment.Name, patch, err) - } - - *patched++ - return nil -} - func (k *K8sInstaller) UpgradeWithHelm(ctx context.Context, k8sClient *k8s.Client) error { if k.params.ListVersions { return k.listVersions() diff --git a/internal/cli/cmd/cmd.go b/internal/cli/cmd/cmd.go index 913a812a55..41c55972ff 100644 --- a/internal/cli/cmd/cmd.go +++ b/internal/cli/cmd/cmd.go @@ -9,7 +9,6 @@ import ( "github.com/spf13/cobra" - "github.com/cilium/cilium-cli/internal/utils" "github.com/cilium/cilium-cli/k8s" ) @@ -88,20 +87,10 @@ cilium connectivity test`, newCmdStatus(), newCmdSysdump(hooks), newCmdVersion(), + newCmdInstallWithHelm(), + newCmdUninstallWithHelm(), + newCmdUpgradeWithHelm(), ) - if utils.IsInHelmMode() { - cmd.AddCommand( - newCmdInstallWithHelm(), - newCmdUninstallWithHelm(), - newCmdUpgradeWithHelm(), - ) - } else { - cmd.AddCommand( - newCmdInstall(), - newCmdUninstall(), - newCmdUpgrade(), - ) - } cmd.SetOut(os.Stdout) cmd.SetErr(os.Stderr) diff --git a/internal/cli/cmd/install.go b/internal/cli/cmd/install.go index e7f0aa75f2..53032bbbe9 100644 --- a/internal/cli/cmd/install.go +++ b/internal/cli/cmd/install.go @@ -8,215 +8,15 @@ import ( "fmt" "io" "os" - "runtime" - "strings" - "time" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/cilium/cilium-cli/connectivity/check" "github.com/cilium/cilium-cli/defaults" - "github.com/cilium/cilium-cli/hubble" "github.com/cilium/cilium-cli/install" ) -func newCmdInstall() *cobra.Command { - var params = install.Parameters{Writer: os.Stdout} - - cmd := &cobra.Command{ - Use: "install", - Short: "Install Cilium in a Kubernetes cluster", - Long: `Install Cilium in a Kubernetes cluster - -Examples: -# Install Cilium in current Kubernetes context with default parameters -cilium install - -# Install Cilium into Kubernetes context "kind-cluster1" and also set cluster -# name and ID to prepare for multi-cluster capabilties. -cilium install --context kind-cluster1 --cluster-id 1 --cluster-name cluster1 -`, - RunE: func(cmd *cobra.Command, _ []string) error { - params.Namespace = namespace - - cmd.Flags().Visit(func(f *pflag.Flag) { - if f.Name == "helm-set" && strings.Contains(f.Value.String(), "kubeProxyReplacement") { - params.UserSetKubeProxyReplacement = true - } - }) - - installer, err := install.NewK8sInstaller(k8sClient, params) - if err != nil { - return err - } - cmd.SilenceUsage = true - if err := installer.Install(context.Background()); err != nil { - installer.RollbackInstallation(context.Background()) - - fatalf("Unable to install Cilium: %s", err) - } - return nil - }, - } - - addCommonInstallFlags(cmd, ¶ms) - addCommonHelmFlags(cmd, ¶ms) - cmd.Flags().StringSliceVar(¶ms.DisableChecks, "disable-check", []string{}, "Disable a particular validation check") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.IPAM, "ipam", "", "IP Address Management (IPAM) mode") - cmd.Flags().MarkDeprecated("ipam", "IPAM mode is autodetected depending on `datapath-mode`. If needed, this can now be overridden via `--set` (Helm value: `ipam.mode`).") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.IPv4NativeRoutingCIDR, "ipv4-native-routing-cidr", "", "IPv4 CIDR within which native routing is possible") - cmd.Flags().MarkDeprecated("ipv4-native-routing-cidr", "This can now be overridden via `--set` (Helm value: `ipv4NativeRoutingCIDR`).") - // It can be deprecated since we have a helm option for it - cmd.Flags().IntVar(¶ms.ClusterID, "cluster-id", 0, "Unique cluster identifier for multi-cluster") - cmd.Flags().MarkDeprecated("cluster-id", "This can now be overridden via `--set` (Helm value: `cluster.id`).") - cmd.Flags().StringVar(¶ms.InheritCA, "inherit-ca", "", "Inherit/import CA from another cluster") - cmd.Flags().BoolVar(¶ms.RestartUnmanagedPods, "restart-unmanaged-pods", true, "Restart pods which are not being managed by Cilium") - cmd.Flags().StringVar(¶ms.Encryption, "encryption", "disabled", "Enable encryption of all workloads traffic { disabled | ipsec | wireguard }") - // It can be deprecated since we have a helm option for it - cmd.Flags().BoolVar(¶ms.NodeEncryption, "node-encryption", false, "Enable encryption of all node to node traffic") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringSliceVar(¶ms.ConfigOverwrites, "config", []string{}, "Set ConfigMap entries { key=value[,key=value,..] }") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.AgentImage, "agent-image", "", "Image path to use for Cilium agent") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.OperatorImage, "operator-image", "", "Image path to use for Cilium operator") - cmd.Flags().DurationVar(¶ms.CiliumReadyTimeout, "cilium-ready-timeout", 5*time.Minute, - "Timeout for Cilium to become ready before restarting unmanaged pods") - cmd.Flags().BoolVar(¶ms.Rollback, "rollback", true, "Roll back installed resources on failure") - - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.Azure.ResourceGroupName, "azure-resource-group", "", "Azure resource group name the cluster is in (required)") - cmd.Flags().StringVar(¶ms.Azure.AKSNodeResourceGroup, "azure-node-resource-group", "", "Azure node resource group name the cluster is in. Bypasses `--azure-resource-group` if provided.") - cmd.Flags().MarkHidden("azure-node-resource-group") // intended for for development purposes, notably CI usage, cf. azure.go - cmd.Flags().StringVar(¶ms.Azure.SubscriptionName, "azure-subscription", "", "Azure subscription name the cluster is in (default `az account show`)") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.Azure.SubscriptionID, "azure-subscription-id", "", "Azure subscription ID. Bypasses auto-detection and `--azure-subscription` if provided.") - cmd.Flags().MarkHidden("azure-subscription-id") // intended for for development purposes, notably CI usage, cf. azure.go - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.Azure.TenantID, "azure-tenant-id", "", "Tenant ID of Azure Service Principal to use for installing Cilium (will create one if none provided)") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.Azure.ClientID, "azure-client-id", "", "Client (application) ID of Azure Service Principal to use for installing Cilium (will create one if none provided)") - // It can be deprecated since we have a helm option for it - cmd.Flags().StringVar(¶ms.Azure.ClientSecret, "azure-client-secret", "", "Client secret of Azure Service Principal to use for installing Cilium (will create one if none provided)") - cmd.Flags().StringVar(¶ms.K8sVersion, "k8s-version", "", "Kubernetes server version in case auto-detection fails") - - cmd.Flags().StringVar(¶ms.HelmGenValuesFile, "helm-auto-gen-values", "", "Write an auto-generated helm values into this file") - cmd.Flags().StringVar(¶ms.HelmValuesSecretName, "helm-values-secret-name", defaults.HelmValuesSecretName, "Secret name to store the auto-generated helm values file. The namespace is the same as where Cilium will be installed") - cmd.Flags().StringSliceVar(¶ms.APIVersions, "api-versions", []string{}, "Kubernetes API versions to use for helm's Capabilities.APIVersions in case discovery fails") - cmd.Flags().StringVar(¶ms.ImageSuffix, "image-suffix", "", "Set all generated images with this suffix") - cmd.Flags().StringVar(¶ms.ImageTag, "image-tag", "", "Set all images with this tag") - - for flagName := range install.FlagsToHelmOpts { - // TODO(aanm) Do not mark the flags has deprecated for now. - // msg := fmt.Sprintf("use --helm-set=%s<=value> instead", helmOpt) - // err := cmd.Flags().MarkDeprecated(flagName, msg) - // if err != nil { - // panic(err) - // } - install.FlagValues[flagName] = cmd.Flags().Lookup(flagName).Value - } - install.FlagValues["config"] = cmd.Flags().Lookup("config").Value - - return cmd -} - -func newCmdUninstall() *cobra.Command { - var params = install.UninstallParameters{Writer: os.Stdout} - - cmd := &cobra.Command{ - Use: "uninstall", - Short: "Uninstall Cilium", - Long: ``, - RunE: func(_ *cobra.Command, _ []string) error { - params.Namespace = namespace - ctx := context.Background() - - cc, err := check.NewConnectivityTest(k8sClient, check.Parameters{ - CiliumNamespace: namespace, - TestNamespace: params.TestNamespace, - FlowValidation: check.FlowValidationModeDisabled, - Writer: os.Stdout, - }, version) - if err != nil { - fmt.Printf("⚠ ī¸ Failed to initialize connectivity test uninstaller: %s\n", err) - } else { - cc.UninstallResources(ctx, params.Wait) - } - - h, err := hubble.NewK8sHubble(ctx, - k8sClient, hubble.Parameters{ - Namespace: params.Namespace, - HelmValuesSecretName: params.HelmValuesSecretName, - RedactHelmCertKeys: params.RedactHelmCertKeys, - Writer: params.Writer, - HelmChartDirectory: params.HelmChartDirectory, - }) - if err != nil { - fmt.Printf("⚠ ī¸ Failed to initialize Hubble uninstaller: %s\n", err) - } else if err = h.Disable(ctx, true); err != nil { - fmt.Printf("ℹī¸ Failed to disable Hubble: %s\n", err) - } - uninstaller := install.NewK8sUninstaller(k8sClient, params) - if err := uninstaller.Uninstall(context.Background()); err != nil { - fatalf("Unable to uninstall Cilium: %s", err) - } - return nil - }, - } - - addCommonUninstallFlags(cmd, ¶ms) - cmd.Flags().StringVar(¶ms.HelmChartDirectory, "chart-directory", "", "Helm chart directory") - cmd.Flags().StringVar(¶ms.HelmValuesSecretName, "helm-values-secret-name", defaults.HelmValuesSecretName, "Secret name to store the auto-generated helm values file. The namespace is the same as where Cilium will be installed") - cmd.Flags().BoolVar(¶ms.RedactHelmCertKeys, "redact-helm-certificate-keys", true, "Do not print in the terminal any certificate keys generated by helm. (Certificates will always be stored unredacted in the secret defined by 'helm-values-secret-name')") - cmd.Flags().IntVar(¶ms.WorkerCount, "worker-count", runtime.NumCPU(), "Number of workers to use for parallel operations") - - return cmd -} - -func newCmdUpgrade() *cobra.Command { - var params = install.Parameters{Writer: os.Stdout} - - cmd := &cobra.Command{ - Use: "upgrade", - Short: "Upgrade Cilium in a Kubernetes cluster", - Long: fmt.Sprintf(`Upgrade Cilium in a Kubernetes cluster - -Examples: -# Upgrade Cilium to the latest patch release: -cilium upgrade - -# Upgrade Cilium to a specific version -cilium upgrade --version %s -`, defaults.Version), - RunE: func(cmd *cobra.Command, _ []string) error { - params.Namespace = namespace - - installer, err := install.NewK8sInstaller(k8sClient, params) - if err != nil { - return err - } - cmd.SilenceUsage = true - if err := installer.Upgrade(context.Background()); err != nil { - fatalf("Unable to upgrade Cilium: %s", err) - } - return nil - }, - } - - cmd.Flags().StringVar(¶ms.Version, "version", defaults.Version, "Cilium version to install") - cmd.Flags().BoolVar(¶ms.Wait, "wait", true, "Wait for status to report success (no errors)") - cmd.Flags().DurationVar(¶ms.WaitDuration, "wait-duration", defaults.StatusWaitDuration, "Maximum time to wait for status") - cmd.Flags().StringVar(¶ms.AgentImage, "agent-image", "", "Image path to use for Cilium agent") - cmd.Flags().StringVar(¶ms.OperatorImage, "operator-image", "", "Image path to use for Cilium operator") - cmd.Flags().StringVar(¶ms.RelayImage, "hubble-relay-image", "", "Image path to use for Hubble Relay") - cmd.Flags().StringVar(¶ms.ClusterMeshAPIImage, "clustermesh-apiserver-image", "", "Image path to use for cluster mesh API server") - - return cmd -} - // addCommonInstallFlags adds install command flags that are shared between classic and helm mode. func addCommonInstallFlags(cmd *cobra.Command, params *install.Parameters) { // We can't get rid of --cluster-name until we fix https://github.com/cilium/cilium-cli/issues/1347.