Skip to content

Commit

Permalink
Merge pull request #2495 from CecileRobertMichon/calico-helm
Browse files Browse the repository at this point in the history
Use Helm to install Calico CNI in e2e tests instead of ClusterResourceSets
  • Loading branch information
k8s-ci-robot authored Nov 30, 2022
2 parents 3cb02a8 + 4726be8 commit 1d3194a
Show file tree
Hide file tree
Showing 96 changed files with 1,683 additions and 78,154 deletions.
9 changes: 4 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -290,10 +290,9 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create
$(KUBECTL) wait --for=condition=Available --timeout=5m -n capi-kubeadm-bootstrap-system deployment -l cluster.x-k8s.io/provider=bootstrap-kubeadm
$(KUBECTL) wait --for=condition=Available --timeout=5m -n capi-kubeadm-control-plane-system deployment -l cluster.x-k8s.io/provider=control-plane-kubeadm

# apply CNI ClusterResourceSets
source ./scripts/ci-configmap.sh

$(KUBECTL) apply -f templates/addons/calico-resource-set.yaml
# install Windows Calico cluster resource set
$(KUBECTL) create configmap calico-windows-addon --from-file="$(ADDONS_DIR)/windows/calico" --dry-run=client -o yaml | kubectl apply -f -
$(KUBECTL) apply -f templates/addons/windows/calico-resource-set.yaml

# Wait for CAPZ deployments
$(KUBECTL) wait --for=condition=Available --timeout=5m -n capz-system deployment -l cluster.x-k8s.io/provider=infrastructure-azure
Expand Down Expand Up @@ -488,7 +487,7 @@ generate-addons: fetch-calico-manifests ## Generate metric-server, calico calico
$(KUSTOMIZE) build $(ADDONS_DIR)/calico-dual-stack > $(ADDONS_DIR)/calico-dual-stack.yaml

# When updating this, make sure to also update the Windows image version in templates/addons/windows/calico.
CALICO_VERSION := v3.23.0
CALICO_VERSION := v3.24.5
# Where all downloaded Calico manifests are unpacked and stored.
CALICO_RELEASES := $(ARTIFACTS)/calico
# Path to manifests directory in a Calico release archive.
Expand Down
20 changes: 14 additions & 6 deletions Tiltfile
Original file line number Diff line number Diff line change
Expand Up @@ -249,10 +249,6 @@ def create_identity_secret():

def create_crs():
# create config maps
local(kubectl_cmd + " delete configmaps calico-addon --ignore-not-found=true")
local(kubectl_cmd + " create configmap calico-addon --from-file=templates/addons/calico.yaml")
local(kubectl_cmd + " delete configmaps calico-ipv6-addon --ignore-not-found=true")
local(kubectl_cmd + " create configmap calico-ipv6-addon --from-file=templates/addons/calico-ipv6.yaml")
local(kubectl_cmd + " delete configmaps csi-proxy-addon --ignore-not-found=true")
local(kubectl_cmd + " create configmap csi-proxy-addon --from-file=templates/addons/windows/csi-proxy/csi-proxy.yaml")

Expand All @@ -261,7 +257,7 @@ def create_crs():
local(kubectl_cmd + " create configmap calico-windows-addon --from-file=templates/addons/windows/calico/ --dry-run=client -o yaml | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f -")

# set up crs
local(kubectl_cmd + " apply -f templates/addons/calico-resource-set.yaml")
local(kubectl_cmd + " apply -f templates/addons/windows/calico-resource-set.yaml")
local(kubectl_cmd + " apply -f templates/addons/windows/csi-proxy/csi-proxy-resource-set.yaml")

# create flavor resources from cluster-template files in the templates directory
Expand Down Expand Up @@ -350,8 +346,20 @@ def deploy_worker_templates(template, substitutions):
yaml = yaml.replace('"', '\\"') # add escape character to double quotes in yaml
flavor_name = os.path.basename(flavor)
flavor_cmd = "RANDOM=$(bash -c 'echo $RANDOM'); CLUSTER_NAME=" + flavor.replace("windows", "win") + "-$RANDOM; make generate-flavors; echo \"" + yaml + "\" > ./.tiltbuild/" + flavor + "; cat ./.tiltbuild/" + flavor + " | " + envsubst_cmd + " | " + kubectl_cmd + " apply -f - && echo \"Cluster \'$CLUSTER_NAME\' created, don't forget to delete\""

# wait for kubeconfig to be available
flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done"

# install calico
if "ipv6" in flavor_name:
calico_values = "./templates/addons/calico-ipv6/values.yaml"
elif "dual-stack" in flavor_name:
calico_values = "./templates/addons/calico-dual-stack/values.yaml"
else:
calico_values = "./templates/addons/calico/values.yaml"
flavor_cmd += "; " + helm_cmd + " repo add projectcalico https://projectcalico.docs.tigera.io/charts; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install calico projectcalico/tigera-operator -f " + calico_values + " --namespace tigera-operator --create-namespace; kubectl --kubeconfig ./${CLUSTER_NAME}.kubeconfig apply -f ./templates/addons/calico/felix-override.yaml"
if "external-cloud-provider" in flavor_name:
flavor_cmd += "; until " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig > /dev/null 2>&1; do sleep 5; done; " + kubectl_cmd + " get secret ${CLUSTER_NAME}-kubeconfig -o jsonpath={.data.value} | base64 --decode > ./${CLUSTER_NAME}.kubeconfig; chmod 600 ./${CLUSTER_NAME}.kubeconfig; until " + kubectl_cmd + " --kubeconfig=./${CLUSTER_NAME}.kubeconfig get nodes > /dev/null 2>&1; do sleep 5; done; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME}"
flavor_cmd += "; " + helm_cmd + " --kubeconfig ./${CLUSTER_NAME}.kubeconfig install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME}"
local_resource(
name = flavor_name,
cmd = flavor_cmd,
Expand Down
2 changes: 1 addition & 1 deletion docs/book/src/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
- [Getting Started](./topics/getting-started.md)
- [Troubleshooting](./topics/troubleshooting.md)
- [AAD Integration](./topics/aad-integration.md)
- [Addons](./topics/addons.md)
- [API Server Endpoint](./topics/api-server-endpoint.md)
- [Cloud Provider Config](./topics/cloud-provider-config.md)
- [Control Plane Outbound Load Balancer](./topics/control-plane-outbound-lb.md)
Expand All @@ -16,7 +17,6 @@
- [Dual-Stack](./topics/dual-stack.md)
- [Externally managed Azure infrastructure](./topics/externally-managed-azure-infrastructure.md)
- [Failure Domains](./topics/failure-domains.md)
- [Flannel](./topics/flannel.md)
- [GPU-enabled Clusters](./topics/gpu.md)
- [Identity use cases](./topics/identities-use-cases.md)
- [IPv6](./topics/ipv6.md)
Expand Down
161 changes: 161 additions & 0 deletions docs/book/src/topics/addons.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
# CNI

By default, the CNI plugin is not installed for self-managed clusters, so you have to [install your own](https://cluster-api.sigs.k8s.io/user/quick-start.html#deploy-a-cni-solution).

Some of the instructions below use [Helm](https://helm.sh) to install the addons. If you're not familiar with using Helm to manage Kubernetes applications as packages, there's lots of good [Helm documentation on the official website](https://helm.sh/docs/). You can install Helm by following the [official instructions](https://helm.sh/docs/intro/install/).

## Calico

To install [Calico](https://www.tigera.io/project-calico/) on a self-managed cluster using the office Calico Helm chart, run the commands corresponding to the cluster network configuration:

### For IPv4 clusters

```bash
IPV4_CIDR_BLOCK=<cluster ipv4 pod cidr block> \
helm repo add projectcalico https://projectcalico.docs.tigera.io/charts && \
helm install calico projectcalico/tigera-operator -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/values.yaml --set-string installation.calicoNetwork.ipPools[0].cidr="$IPV4_CIDR_BLOCK" --namespace tigera-operator --create-namespace
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/felix-override.yaml
```

### For IPv6 clusters

```bash
IPV6_CIDR_BLOCK=<cluster ipv6 pod cidr block> \
helm repo add projectcalico https://projectcalico.docs.tigera.io/charts && \
helm install calico projectcalico/tigera-operator -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico-ipv6/values.yaml --set-string installation.calicoNetwork.ipPools[0].cidr="$IPV6_CIDR_BLOCK" --namespace tigera-operator --create-namespace
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/felix-override.yaml
```

### For dual-stack (IPv4 + IPv6) clusters

```bash
IPV4_CIDR_BLOCK=<cluster ipv4 pod cidr block> \
IPV6_CIDR_BLOCK=<cluster ipv6 pod cidr block> \
helm repo add projectcalico https://projectcalico.docs.tigera.io/charts && \
helm install calico projectcalico/tigera-operator -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico-dual-stack/values.yaml --set-string installation.calicoNetwork.ipPools[0].cidr="$IPV4_CIDR_BLOCK",installation.calicoNetwork.ipPools[1].cidr="$IPV6_CIDR_BLOCK" --namespace tigera-operator --create-namespace
kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/addons/calico/felix-override.yaml
```

<aside class="note">

<h1> Note </h1>

For Windows nodes, you also need to copy the kubeadm-config configmap to the calico-system namespace so the calico-node-windows Daemonset can find it:

```bash
kubectl create ns calico-system
kubectl get configmap kubeadm-config --namespace=kube-system -o yaml \
| sed 's/namespace: kube-system/namespace: calico-system/' \
| kubectl create -f -
```

</aside>


For more information, see the [official Calico documentation](https://projectcalico.docs.tigera.io/getting-started/kubernetes/helm).

## Flannel

This section describes how to use [Flannel](https://github.com/flannel-io/flannel) as your CNI solution.

### Modify the Cluster resources

Before deploying the cluster, change the `KubeadmControlPlane` value at `spec.kubeadmConfigSpec.clusterConfiguration.controllerManager.extraArgs.allocate-node-cidrs` to `"true"`

```yaml
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
spec:
kubeadmConfigSpec:
clusterConfiguration:
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
```
#### Modify Flannel config
_NOTE_: This is based off of the instructions at: https://github.com/flannel-io/flannel#deploying-flannel-manually
You need to make an adjustment to the default flannel configuration so that the CIDR inside your CAPZ cluster matches the Flannel Network CIDR.
View your capi-cluster.yaml and make note of the Cluster Network CIDR Block. For example:
```yaml
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
```
Download the file at `https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml` and modify the `kube-flannel-cfg` ConfigMap.
Set the value at `data.net-conf.json.Network` value to match your Cluster Network CIDR Block.

```bash
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
```

Edit kube-flannel.yml and change this section so that the Network section matches your Cluster CIDR

```yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
data:
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
```

Apply kube-flannel.yml

```bash
kubectl apply -f kube-flannel.yml
```

# External Cloud Provider

To deploy a cluster using [external cloud provider](https://github.com/kubernetes-sigs/cloud-provider-azure), create a cluster configuration with the [external cloud provider template](https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/cluster-template-external-cloud-provider.yaml).

After the cluster has provisioned, install the `cloud-provider-azure` components using the official helm chart:

```bash
helm install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME}
```

The Helm chart will pick the right version of `cloud-controller-manager` and `cloud-node-manager` to work with the version of Kubernetes your cluster is running.

After running `helm install`, you should eventually see a set of pods like these in a `Running` state:

```bash
kube-system cloud-controller-manager 1/1 Running 0 41s
kube-system cloud-node-manager-5pklx 1/1 Running 0 26s
kube-system cloud-node-manager-hbbqt 1/1 Running 0 30s
kube-system cloud-node-manager-mfsdg 1/1 Running 0 39s
kube-system cloud-node-manager-qrz74 1/1 Running 0 24s
```

For more information see the official [`cloud-provider-azure` helm chart documentation](https://github.com/kubernetes-sigs/cloud-provider-azure/tree/master/helm/cloud-provider-azure).

## Storage Drivers

### Azure File CSI Driver

To install the Azure File CSI driver please refer to the [installation guide](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/install-azurefile-csi-driver.md)

Repository: https://github.com/kubernetes-sigs/azurefile-csi-driver

### Azure Disk CSI Driver

To install the Azure Disk CSI driver please refer to the [installation guide](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/install-azuredisk-csi-driver.md)

Repository: https://github.com/kubernetes-sigs/azuredisk-csi-driver
39 changes: 0 additions & 39 deletions docs/book/src/topics/cloud-provider-config.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,42 +67,3 @@ All cloud provider config values can be customized by creating the `${RESOURCE}-
</aside>


# External Cloud Provider

To deploy a cluster using [external cloud provider](https://github.com/kubernetes-sigs/cloud-provider-azure), create a cluster configuration with the [external cloud provider template](https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-azure/main/templates/cluster-template-external-cloud-provider.yaml).

After the cluster has provisioned, install the `cloud-provider-azure` components using the official helm chart:

```bash
helm install --repo https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo cloud-provider-azure --generate-name --set infra.clusterName=${CLUSTER_NAME}
```

The Helm chart will pick the right version of `cloud-controller-manager` and `cloud-node-manager` to work with the version of Kubernetes your cluster is running.

After running `helm install`, you should eventually see a set of pods like these in a `Running` state:

```bash
kube-system cloud-controller-manager 1/1 Running 0 41s
kube-system cloud-node-manager-5pklx 1/1 Running 0 26s
kube-system cloud-node-manager-hbbqt 1/1 Running 0 30s
kube-system cloud-node-manager-mfsdg 1/1 Running 0 39s
kube-system cloud-node-manager-qrz74 1/1 Running 0 24s
```

For more information see the official [`cloud-provider-azure` helm chart documentation](https://github.com/kubernetes-sigs/cloud-provider-azure/tree/master/helm/cloud-provider-azure).

If you're not familiar with using Helm to manage Kubernetes applications as packages, there's lots of good [Helm documentation on the official website](https://helm.sh/docs/).

## Storage Drivers

### Azure File CSI Driver

To install the Azure File CSI driver please refer to the [installation guide](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/install-azurefile-csi-driver.md)

Repository: https://github.com/kubernetes-sigs/azurefile-csi-driver

### Azure Disk CSI Driver

To install the Azure Disk CSI driver please refer to the [installation guide](https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/install-azuredisk-csi-driver.md)

Repository: https://github.com/kubernetes-sigs/azuredisk-csi-driver
68 changes: 0 additions & 68 deletions docs/book/src/topics/flannel.md

This file was deleted.

Loading

0 comments on commit 1d3194a

Please sign in to comment.