Skip to content

Commit

Permalink
Check-in v1.3.0 manifests in test/e2e/data
Browse files Browse the repository at this point in the history
This is to spin up a point in time infrastructure from a particular
release.
  • Loading branch information
thunderboltsid committed Jan 18, 2024
1 parent 72058b8 commit 60e7ee8
Show file tree
Hide file tree
Showing 13 changed files with 676 additions and 0 deletions.
43 changes: 43 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/ccm-patch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: "${CLUSTER_NAME}-kcp"
namespace: "${NAMESPACE}"
spec:
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
extraArgs:
cloud-provider: external
controllerManager:
extraArgs:
cloud-provider: external
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "${CLUSTER_NAME}-kcfg-0"
namespace: "${NAMESPACE}"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
cloud-provider: external
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
ccm: "nutanix"
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
200 changes: 200 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/cluster-with-kcp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixCluster
metadata:
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
spec:
prismCentral:
address: "${NUTANIX_ENDPOINT}"
port: ${NUTANIX_PORT=9440}
insecure: ${NUTANIX_INSECURE=false}
credentialRef:
name: "${CLUSTER_NAME}"
kind: Secret
additionalTrustBundle:
name: user-ca-bundle
kind: ConfigMap
controlPlaneEndpoint:
host: "${CONTROL_PLANE_ENDPOINT_IP}"
port: ${CONTROL_PLANE_ENDPOINT_PORT=6443}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
spec:
clusterNetwork:
services:
cidrBlocks: ["172.19.0.0/16"]
pods:
cidrBlocks: ["172.20.0.0/16"]
serviceDomain: "cluster.local"
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
name: "${CLUSTER_NAME}-kcp"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixCluster
name: "${CLUSTER_NAME}"

---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: KubeadmControlPlane
metadata:
name: "${CLUSTER_NAME}-kcp"
namespace: "${NAMESPACE}"
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT=1}
version: ${KUBERNETES_VERSION}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
name: "${CLUSTER_NAME}-mt-0"
kubeadmConfigSpec:
clusterConfiguration:
apiServer:
certSANs:
- localhost
- 127.0.0.1
- 0.0.0.0
extraArgs:
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
controllerManager:
extraArgs:
enable-hostpath-provisioner: "true"
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
scheduler:
extraArgs:
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
files:
- content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-vip
namespace: kube-system
spec:
containers:
- name: kube-vip
image: ghcr.io/kube-vip/kube-vip:v0.6.4
imagePullPolicy: IfNotPresent
args:
- manager
env:
- name: vip_arp
value: "true"
- name: address
value: "${CONTROL_PLANE_ENDPOINT_IP}"
- name: port
value: "${CONTROL_PLANE_ENDPOINT_PORT=6443}"
- name: vip_cidr
value: "32"
- name: cp_enable
value: "true"
- name: cp_namespace
value: kube-system
- name: vip_ddns
value: "false"
- name: vip_leaderelection
value: "true"
- name: vip_leaseduration
value: "15"
- name: vip_renewdeadline
value: "10"
- name: vip_retryperiod
value: "2"
- name: svc_enable
value: "${KUBEVIP_SVC_ENABLE=false}"
- name: lb_enable
value: "${KUBEVIP_LB_ENABLE=false}"
- name: enableServicesElection
value: "${KUBEVIP_SVC_ELECTION=false}"
securityContext:
capabilities:
add:
- NET_ADMIN
- SYS_TIME
- NET_RAW
volumeMounts:
- mountPath: /etc/kubernetes/admin.conf
name: kubeconfig
resources: {}
hostNetwork: true
hostAliases:
- hostnames:
- kubernetes
ip: 127.0.0.1
volumes:
- name: kubeconfig
hostPath:
type: FileOrCreate
path: /etc/kubernetes/admin.conf
status: {}
owner: root:root
path: /etc/kubernetes/manifests/kube-vip.yaml
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
users:
- name: capiuser
lockPassword: false
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ${NUTANIX_SSH_AUTHORIZED_KEY}
preKubeadmCommands:
- echo "before kubeadm call" > /var/log/prekubeadm.log
- hostnamectl set-hostname "{{ ds.meta_data.hostname }}"
postKubeadmCommands:
- echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc
- echo "after kubeadm call" > /var/log/postkubeadm.log
useExperimentalRetryJoin: true
verbosity: 10

---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: "${CLUSTER_NAME}-kcfg-0"
namespace: "${NAMESPACE}"
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
# We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd
# kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726
#cgroup-driver: cgroupfs
eviction-hard: nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<15%,memory.available<100Mi,imagefs.inodesFree<10%
tls-cipher-suites: "${TLS_CIPHER_SUITES=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}"
users:
- name: capiuser
lockPassword: false
sudo: ALL=(ALL) NOPASSWD:ALL
sshAuthorizedKeys:
- ${NUTANIX_SSH_AUTHORIZED_KEY}
preKubeadmCommands:
- echo "before kubeadm call" > /var/log/prekubeadm.log
- hostnamectl set-hostname "{{ ds.meta_data.hostname }}"
postKubeadmCommands:
- echo "after kubeadm call" > /var/log/postkubeadm.log
verbosity: 10
#useExperimentalRetryJoin: true
8 changes: 8 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/cm.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: user-ca-bundle
namespace: "${NAMESPACE}"
binaryData:
ca.crt: ${NUTANIX_ADDITIONAL_TRUST_BUNDLE=""}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
labels:
cni: ${CLUSTER_NAME}-crs-cni
name: "${CLUSTER_NAME}"
namespace: "${NAMESPACE}"
18 changes: 18 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/crs.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: "cni-${CLUSTER_NAME}-crs-cni"
data: ${CNI_RESOURCES}
---
apiVersion: addons.cluster.x-k8s.io/v1beta1
kind: ClusterResourceSet
metadata:
name: "${CLUSTER_NAME}-crs-cni"
spec:
strategy: ApplyOnce
clusterSelector:
matchLabels:
cni: "${CLUSTER_NAME}-crs-cni"
resources:
- name: "cni-${CLUSTER_NAME}-crs-cni"
kind: ConfigMap
28 changes: 28 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/md.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
labels:
cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
name: "${CLUSTER_NAME}-wmd"
namespace: "${NAMESPACE}"
spec:
clusterName: "${CLUSTER_NAME}"
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels: {}
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: "${CLUSTER_NAME}-kcfg-0"
clusterName: "${CLUSTER_NAME}"
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
name: "${CLUSTER_NAME}-mt-0"
version: "${KUBERNETES_VERSION}"
31 changes: 31 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/mhc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
metadata:
name: "${CLUSTER_NAME}-mhc"
namespace: "${NAMESPACE}"
spec:
clusterName: "${CLUSTER_NAME}"
maxUnhealthy: 40%
nodeStartupTimeout: 10m0s
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
unhealthyConditions:
- type: Ready
status: "False"
timeout: 5m0s
- type: Ready
status: Unknown
timeout: 5m0s
- type: MemoryPressure
status: "True"
timeout: 5m0s
- type: DiskPressure
status: "True"
timeout: 5m0s
- type: PIDPressure
status: "True"
timeout: 5m0s
- type: NetworkUnavailable
status: "True"
timeout: 5m0s
40 changes: 40 additions & 0 deletions test/e2e/data/infrastructure-nutanix/v1.3.0/base/nmt.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: NutanixMachineTemplate
metadata:
name: "${CLUSTER_NAME}-mt-0"
namespace: "${NAMESPACE}"
spec:
template:
spec:
providerID: "nutanix://${CLUSTER_NAME}-m1"
# Supported options for boot type: legacy and uefi
# Defaults to legacy if not set
bootType: ${NUTANIX_MACHINE_BOOT_TYPE=legacy}
vcpusPerSocket: ${NUTANIX_MACHINE_VCPU_PER_SOCKET=1}
vcpuSockets: ${NUTANIX_MACHINE_VCPU_SOCKET=2}
memorySize: "${NUTANIX_MACHINE_MEMORY_SIZE=4Gi}"
systemDiskSize: "${NUTANIX_SYSTEMDISK_SIZE=40Gi}"
image:
type: name
name: "${NUTANIX_MACHINE_TEMPLATE_IMAGE_NAME}"
cluster:
type: name
name: "${NUTANIX_PRISM_ELEMENT_CLUSTER_NAME}"
subnet:
- type: name
name: "${NUTANIX_SUBNET_NAME}"
# Adds additional categories to the virtual machines.
# Note: Categories must already be present in Prism Central
# additionalCategories:
# - key: AppType
# value: Kubernetes
# Adds the cluster virtual machines to a project defined in Prism Central.
# Replace NUTANIX_PROJECT_NAME with the correct project defined in Prism Central
# Note: Project must already be present in Prism Central.
# project:
# type: name
# name: "NUTANIX_PROJECT_NAME"
# gpus:
# - type: name
# name: "GPU NAME"
Loading

0 comments on commit 60e7ee8

Please sign in to comment.