Skip to content

Commit

Permalink
Check-in v1.3.0 manifests in test/e2e/data and bump versions
Browse files Browse the repository at this point in the history
v1.3.0 manifests are to to spin up a point in time infrastructure from
a particular release. We also updated the versions to ensure there's a
version that e2e tests can target. We have also added a
'release-blocker' comment everywhere that needs to be modified to ensure
a release does not break e2es.
  • Loading branch information
thunderboltsid committed Jan 18, 2024
1 parent 72058b8 commit d2a645a
Show file tree
Hide file tree
Showing 16 changed files with 693 additions and 5 deletions.
9 changes: 6 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ IMG_TAG := latest
endif

ifeq (${LOCAL_PROVIDER_VERSION},latest)
# Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml)
LOCAL_PROVIDER_VERSION := v1.3.99
# TODO(release-blocker): Change this versions after release when required here and in e2e config (test/e2e/config/nutanix.yaml)
LOCAL_PROVIDER_VERSION := v1.4.99
endif

# PLATFORMS is a list of platforms to build for.
Expand Down Expand Up @@ -304,11 +304,14 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi
##@ Templates

.PHONY: cluster-e2e-templates
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 ## Generate cluster templates for all versions
cluster-e2e-templates: $(KUSTOMIZE) cluster-e2e-templates-v1beta1 cluster-e2e-templates-v1alpha4 cluster-e2e-templates-v124 cluster-e2e-templates-v130 ## Generate cluster templates for all versions

cluster-e2e-templates-v124: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.2.4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.2.4/cluster-template.yaml

cluster-e2e-templates-v130: $(KUSTOMIZE) ## Generate cluster templates for CAPX v1.3.0
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1.3.0/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1.3.0/cluster-template.yaml

cluster-e2e-templates-v1alpha4: $(KUSTOMIZE) ## Generate cluster templates for v1alpha4
$(KUSTOMIZE) build $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template --load-restrictor LoadRestrictionsNone > $(NUTANIX_E2E_TEMPLATES)/v1alpha4/cluster-template.yaml

Expand Down
3 changes: 2 additions & 1 deletion metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
# the contract version may change between minor or major versions, but *not*
# between patch versions.
#
# update this file only when a new major or minor version is released
# TODO(release-blocker): update this file only when a new major or minor version is released

apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3
kind: Metadata
releaseSeries:
Expand Down
10 changes: 9 additions & 1 deletion test/e2e/config/nutanix.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,15 @@ providers:
- sourcePath: "../../../metadata.yaml"
- sourcePath: "../data/infrastructure-nutanix/v1.2.4/cluster-template.yaml"
- sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml"
- name: v1.3.99 # next; use manifest from source files
- name: v1.3.0
type: url
value: https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/download/v1.3.0/infrastructure-components.yaml
contract: v1beta1
files:
- sourcePath: "../../../metadata.yaml"
- sourcePath: "../data/infrastructure-nutanix/v1.3.0/cluster-template.yaml"
- sourcePath: "../data/infrastructure-nutanix/ccm-update.yaml"
- name: v1.4.99 # # TODO(release-blocker): update this when you make a new release and change the version to higher than the latest release
type: kustomize
value: "../../../config/default"
contract: v1beta1
Expand Down
43 changes: 43 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/ccm-patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: "${CLUSTER_NAME}-kcp"
namespace: "${NAMESPACE}"
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: external
controllerManager:
extraArgs:
cloud-provider: external
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "${CLUSTER_NAME}-kcfg-0"
namespace: "${NAMESPACE}"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: "nutanix"
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
200 changes: 200 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/cluster-with-kcp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixCluster
metadata:
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
spec:
prismCentral:
address: "${NUTANIX_ENDPOINT}"
port: ${NUTANIX_PORT=9440}
insecure: ${NUTANIX_INSECURE=false}
credentialRef:
name: "${CLUSTER_NAME}"
kind: Secret
additionalTrustBundle:
name: user-ca-bundle
kind: ConfigMap
controlPlaneEndpoint:
host: "${CONTROL_PLANE_ENDPOINT_IP}"
port: ${CONTROL_PLANE_ENDPOINT_PORT=6443}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
spec:
clusterNetwork:
services:
cidrBlocks: ["172.19.0.0/16"]
pods:
cidrBlocks: ["172.20.0.0/16"]
serviceDomain: "cluster.local"
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: "${CLUSTER_NAME}-kcp"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixCluster
name: "${CLUSTER_NAME}"

---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: "${CLUSTER_NAME}-kcp"
namespace: "${NAMESPACE}"
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT=1}
version: ${KUBERNETES_VERSION}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
name: "${CLUSTER_NAME}-mt-0"
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
certSANs:
- localhost
- 127.0.0.1
- 0.0.0.0
extraArgs:
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
controllerManager:
extraArgs:
enable-hostpath-provisioner: "true"
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
scheduler:
extraArgs:
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-vip
namespace: kube-system
spec:
containers:
- name: kube-vip
image: ghcr.io/kube-vip/kube-vip:v0.6.4
imagePullPolicy: IfNotPresent
args:
- manager
env:
- name: vip_arp
value: "true"
- name: address
value: "${CONTROL_PLANE_ENDPOINT_IP}"
- name: port
value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: svc_enable
value: "${KUBEVIP_SVC_ENABLE=false}"
- name: lb_enable
value: "${KUBEVIP_LB_ENABLE=false}"
- name: enableServicesElection
value: "${KUBEVIP_SVC_ELECTION=false}"
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_TIME
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
resources: {}
hostNetwork: true
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
volumes:
- name: kubeconfig
hostPath:
type: FileOrCreate
path: /etc/kubernetes/admin.conf
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
users:
- name: capiuser
lockPassword: false
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ${NUTANIX_SSH_AUTHORIZED_KEY}
preKubeadmCommands:
- echo "before kubeadm call" > /var/log/prekubeadm.log
- hostnamectl set-hostname "{{ ds.meta_data.hostname }}"
postKubeadmCommands:
- echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc
- echo "after kubeadm call" > /var/log/postkubeadm.log
useExperimentalRetryJoin: true
verbosity: 10

---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "${CLUSTER_NAME}-kcfg-0"
namespace: "${NAMESPACE}"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
users:
- name: capiuser
lockPassword: false
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ${NUTANIX_SSH_AUTHORIZED_KEY}
preKubeadmCommands:
- echo "before kubeadm call" > /var/log/prekubeadm.log
- hostnamectl set-hostname "{{ ds.meta_data.hostname }}"
postKubeadmCommands:
- echo "after kubeadm call" > /var/log/postkubeadm.log
verbosity: 10
#useExperimentalRetryJoin: true
8 changes: 8 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: user-ca-bundle
namespace: "${NAMESPACE}"
binaryData:
ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cni: ${CLUSTER_NAME}-crs-cni
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
18 changes: 18 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/crs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "cni-${CLUSTER_NAME}-crs-cni"
data: ${CNI_RESOURCES}
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: "${CLUSTER_NAME}-crs-cni"
spec:
strategy: ApplyOnce
clusterSelector:
matchLabels:
cni: "${CLUSTER_NAME}-crs-cni"
resources:
- name: "cni-${CLUSTER_NAME}-crs-cni"
kind: ConfigMap
28 changes: 28 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/md.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
labels:
cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
name: "${CLUSTER_NAME}-wmd"
namespace: "${NAMESPACE}"
spec:
clusterName: "${CLUSTER_NAME}"
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels: {}
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: "${CLUSTER_NAME}-kcfg-0"
clusterName: "${CLUSTER_NAME}"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
name: "${CLUSTER_NAME}-mt-0"
version: "${KUBERNETES_VERSION}"
31 changes: 31 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/mhc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
name: "${CLUSTER_NAME}-mhc"
namespace: "${NAMESPACE}"
spec:
clusterName: "${CLUSTER_NAME}"
maxUnhealthy: 40%
nodeStartupTimeout: 10m0s
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
unhealthyConditions:
- type: Ready
status: "False"
timeout: 5m0s
- type: Ready
status: Unknown
timeout: 5m0s
- type: MemoryPressure
status: "True"
timeout: 5m0s
- type: DiskPressure
status: "True"
timeout: 5m0s
- type: PIDPressure
status: "True"
timeout: 5m0s
- type: NetworkUnavailable
status: "True"
timeout: 5m0s
Loading

0 comments on commit d2a645a

Please sign in to comment.