diff --git a/README.md b/README.md index 1518a6ac..a3c6abe0 100644 --- a/README.md +++ b/README.md @@ -259,10 +259,72 @@ Pre-requisites: - 12 service(15 ports each) with 8 pod endpoints, 12 service(15 ports each) with 6 pod endpoints, 12 service(15 ports each) with 5 pod endpoints - 29 service(15 ports each) with 4 pod endpoints, 29 service(15 ports each) with 6 pod endpoints -## Workers Scale +## Core RDS workloads + +The telco core reference design specification (RDS) describes OpenShift Container Platform clusters running on commodity hardware that can support large scale telco applications including control plane and some centralized data plane functions. It captures the recommended, tested, and supported configurations to get reliable and repeatable performance for clusters running the telco core profile. + +Pre-requisites: + - A **PerformanceProfile** with isolated and reserved cores, 1G hugepages and and `topologyPolicy=single-numa-node`. Hugepages should be allocated in the first NUMA node (the one that would be used by DPDK deployments): + ```yaml + hugepages: + defaultHugepagesSize: 1G + pages: + - count: 160 + node: 0 + size: 1G + - count: 6 + node: 1 + size: 1G + ``` + - **MetalLB operator** limiting speaker pods to specific nodes (aprox. 10%, 12 in the case of 120 node iterations with the corresponding ***worker-metallb*** label): + ```yaml + apiVersion: metallb.io/v1beta1 + kind: MetalLB + metadata: + name: metallb + namespace: metallb-system + spec: + nodeSelector: + node-role.kubernetes.io/worker-metallb: "" + speakerTolerations: + - key: "Example" + operator: "Exists" + effect: "NoExecute" + ``` + - **SRIOV operator** with its corresponding *SriovNetworkNodePolicy* + - Some nodes (i.e.: 25% of them) with the ***worker-dpdk*** label to host the DPDK pods, i.e.: + ``` + $ kubectl label node worker1 node-role.kubernetes.io/worker-dpdk= + ``` + +Object count: +| Iterations / nodes / namespaces | 1 | 120 | +| --------------------------------- | ---- | ----------------------------------- | +| configmaps | 30 | 3600 | +| deployments_best_effort | 25 | 3000 | +| deployments_dpdk | 2 | 240 (assuming 24 worker-dpdk nodes) | +| endpoints (210x service) | 4200 | 504000 | +| endpoints lb (90 x service) | 90 | 10800 | +| networkPolicy | 3 | 360 | +| namespaces | 1 | 120 | +| pods_best_effort (2 x deployment) | 50 | 6000 | +| pods_dpdk (1 x deployment) | 2 | 240 (assuming 24 worker-dpdk nodes) | +| route | 2 | 240 | +| services | 20 | 2400 | +| services (lb) | 1 | 120 | +| secrets | 42 | 5040 | + + +Input parameters specific to the workload: +| Parameter | Description | Default value | +| ------------------- | ------------------------------------------------------------------------------------------------ | ------------- | +| dpdk-cores | Number of cores assigned for each DPDK pod (should fill all the isolated cores of one NUMA node) | 2 | +| performance-profile | Name of the performance profile implemented on the cluster | default | + +## Workers Scale As a day2 operation, we can use this option to scale our cluster's worker nodes to a desired count and capture their bootup times. -!!! Note +!!! Note This is only supported for openshift clusters hosted on AWS at the moment. diff --git a/cmd/config/rds-core/bgpadvertisement.yml b/cmd/config/rds-core/bgpadvertisement.yml new file mode 100644 index 00000000..6250fb6b --- /dev/null +++ b/cmd/config/rds-core/bgpadvertisement.yml @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: BGPAdvertisement +metadata: + name: bgpadvertisement-basic + namespace: metallb-system +spec: + ipAddressPools: + - adress-pool diff --git a/cmd/config/rds-core/bgppeer.yml b/cmd/config/rds-core/bgppeer.yml new file mode 100644 index 00000000..e15dfff8 --- /dev/null +++ b/cmd/config/rds-core/bgppeer.yml @@ -0,0 +1,10 @@ +apiVersion: metallb.io/v1beta2 +kind: BGPPeer +metadata: + namespace: metallb-system + name: bpg-peer +spec: + peerAddress: 10.0.0.1 + peerASN: 64501 + myASN: 64500 + routerID: 10.10.10.10 diff --git a/cmd/config/rds-core/configmap.yml b/cmd/config/rds-core/configmap.yml new file mode 100644 index 00000000..9c43ee50 --- /dev/null +++ b/cmd/config/rds-core/configmap.yml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{.JobName}}-{{.Replica}} +data: + key1: "{{randAlphaNum 2048}}" diff --git a/cmd/config/rds-core/deployment-client.yml b/cmd/config/rds-core/deployment-client.yml new file mode 100644 index 00000000..95f590dc --- /dev/null +++ b/cmd/config/rds-core/deployment-client.yml @@ -0,0 +1,121 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: client-{{.Replica}} +spec: + replicas: {{.podReplicas}} + selector: + matchLabels: + name: client-{{.Replica}} + template: + metadata: + labels: + name: client-{{.Replica}} + app: client + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: client + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + - key: node-role.kubernetes.io/infra + operator: DoesNotExist + - key: node-role.kubernetes.io/workload + operator: DoesNotExist + containers: + - name: client-app + image: quay.io/cloud-bulldozer/curl:latest + command: ["sleep", "inf"] + resources: + requests: + memory: "10Mi" + cpu: "10m" + env: + imagePullPolicy: IfNotPresent + securityContext: + privileged: false + readinessProbe: + exec: + command: + - "/bin/sh" + - "-c" + - "curl --fail -sS ${SERVICE_ENDPOINT} -o /dev/null && curl --fail -sSk ${ROUTE_ENDPOINT} -o /dev/null" + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + volumeMounts: + - name: secret-1 + mountPath: /secret1 + - name: secret-2 + mountPath: /secret2 + - name: secret-3 + mountPath: /secret3 + - name: secret-4 + mountPath: /secret4 + - name: configmap-1 + mountPath: /configmap1 + - name: configmap-2 + mountPath: /configmap2 + - name: configmap-3 + mountPath: /configmap3 + - name: configmap-4 + mountPath: /configmap4 + - name: podinfo + mountPath: /etc/podlabels + env: + - name: ENVVAR1 + value: "{{randAlphaNum 250}}" + - name: ENVVAR2 + value: "{{randAlphaNum 250}}" + - name: ENVVAR3 + value: "{{randAlphaNum 250}}" + - name: ENVVAR4 + value: "{{randAlphaNum 250}}" + - name: ROUTE_ENDPOINT + value: "https://rds-{{randInt 1 2}}-rds-{{.Iteration}}.{{ .ingressDomain }}/256.html" + - name: SERVICE_ENDPOINT + value: "http://rds-{{randInt 1 22}}/256.html" + volumes: + - name: secret-1 + secret: + secretName: {{.JobName}}-1 + - name: secret-2 + secret: + secretName: {{.JobName}}-2 + - name: secret-3 + secret: + secretName: {{.JobName}}-3 + - name: secret-4 + secret: + secretName: {{.JobName}}-4 + - name: configmap-1 + configMap: + name: {{.JobName}}-1 + - name: configmap-2 + configMap: + name: {{.JobName}}-2 + - name: configmap-3 + configMap: + name: {{.JobName}}-3 + - name: configmap-4 + configMap: + name: {{.JobName}}-4 + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + restartPolicy: Always + strategy: + type: RollingUpdate + diff --git a/cmd/config/rds-core/deployment-dpdk.yml b/cmd/config/rds-core/deployment-dpdk.yml new file mode 100644 index 00000000..ab99cf48 --- /dev/null +++ b/cmd/config/rds-core/deployment-dpdk.yml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dpdk-{{.Replica}} + labels: + group: load + svc: dpdk-{{.Replica}} +spec: + replicas: {{.podReplicas}} + selector: + matchLabels: + name: dpdk-{{.Replica}} + template: + metadata: + labels: + group: load + name: dpdk-{{.Replica}} + annotations: + irq-load-balancing.crio.io: "disable" + cpu-load-balancing.crio.io: "disable" + cpu-quota.crio.io: "disable" + k8s.v1.cni.cncf.io/networks: '[ + { "name": "sriov-net-{{ .Iteration }}-1" }, + { "name": "sriov-net-{{ .Iteration }}-2" } + ]' + spec: + runtimeClassName: performance-{{.perf_profile}} + containers: + - name: dpdk + image: ghcr.io/abraham2512/fedora-stress-ng:master + imagePullPolicy: Always + # Request and Limits must be identical for the Pod to be assigned to the QoS Guarantee + resources: + requests: + cpu: {{.dpdk_cores}} + memory: 1024M + hugepages-1Gi: 16Gi + limits: + cpu: {{.dpdk_cores}} + memory: 1024M + hugepages-1Gi: 16Gi + env: + - name: stress_cpu + value: "4" + - name: stress_vm + value: "1" + - name: stress_vm-bytes + value: "512M" + volumeMounts: + - mountPath: /hugepages + name: hugepage + dnsPolicy: Default + terminationGracePeriodSeconds: 1 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker-dpdk + operator: Exists + # Add not-ready/unreachable tolerations for 15 minutes so that node + # failure doesn't trigger pod deletion. + tolerations: + - key: "node.kubernetes.io/not-ready" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 900 + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 900 + volumes: + - name: hugepage + emptyDir: + medium: HugePages \ No newline at end of file diff --git a/cmd/config/rds-core/deployment-server.yml b/cmd/config/rds-core/deployment-server.yml new file mode 100644 index 00000000..d1494b3e --- /dev/null +++ b/cmd/config/rds-core/deployment-server.yml @@ -0,0 +1,106 @@ +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: server-{{.Replica}} +spec: + replicas: {{.podReplicas}} + selector: + matchLabels: + name: rds-{{.Replica}} + template: + metadata: + labels: + name: rds-{{.Replica}} + app: nginx + spec: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: nginx + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + - key: node-role.kubernetes.io/infra + operator: DoesNotExist + - key: node-role.kubernetes.io/workload + operator: DoesNotExist + containers: + - image: quay.io/cloud-bulldozer/nginx:latest + resources: + requests: + memory: "25Mi" + cpu: "25m" + volumeMounts: + - name: secret-1 + mountPath: /secret1 + - name: secret-2 + mountPath: /secret2 + - name: secret-3 + mountPath: /secret3 + - name: secret-4 + mountPath: /secret4 + - name: configmap-1 + mountPath: /configmap1 + - name: configmap-2 + mountPath: /configmap2 + - name: configmap-3 + mountPath: /configmap3 + - name: configmap-4 + mountPath: /configmap4 + - name: podinfo + mountPath: /etc/podlabels + imagePullPolicy: IfNotPresent + ports: + - containerPort: 8080 + protocol: TCP + - containerPort: 8443 + protocol: TCP + name: rds + env: + - name: ENVVAR1 + value: "{{randAlphaNum 250}}" + - name: ENVVAR2 + value: "{{randAlphaNum 250}}" + - name: ENVVAR3 + value: "{{randAlphaNum 250}}" + - name: ENVVAR4 + value: "{{randAlphaNum 250}}" + volumes: + - name: secret-1 + secret: + secretName: {{.JobName}}-1 + - name: secret-2 + secret: + secretName: {{.JobName}}-2 + - name: secret-3 + secret: + secretName: {{.JobName}}-3 + - name: secret-4 + secret: + secretName: {{.JobName}}-4 + - name: configmap-1 + configMap: + name: {{.JobName}}-1 + - name: configmap-2 + configMap: + name: {{.JobName}}-2 + - name: configmap-3 + configMap: + name: {{.JobName}}-3 + - name: configmap-4 + configMap: + name: {{.JobName}}-4 + - name: podinfo + downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels diff --git a/cmd/config/rds-core/ipaddresspool.yml b/cmd/config/rds-core/ipaddresspool.yml new file mode 100644 index 00000000..b1d05746 --- /dev/null +++ b/cmd/config/rds-core/ipaddresspool.yml @@ -0,0 +1,9 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + namespace: metallb-system + name: address-pool +spec: + addresses: + - 203.0.113.200/20 + - fc00:f853:ccd:e799::/120 diff --git a/cmd/config/rds-core/np-allow-from-clients.yml b/cmd/config/rds-core/np-allow-from-clients.yml new file mode 100644 index 00000000..83523db1 --- /dev/null +++ b/cmd/config/rds-core/np-allow-from-clients.yml @@ -0,0 +1,22 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-from-clients-{{.Replica}} +spec: + podSelector: + matchLabels: + app: nginx + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: rds-{{.Iteration}} + podSelector: + matchLabels: + app: client + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-burner-service-latency + ports: + - protocol: TCP + port: 8080 diff --git a/cmd/config/rds-core/np-allow-from-ingress.yml b/cmd/config/rds-core/np-allow-from-ingress.yml new file mode 100644 index 00000000..8ca4a19a --- /dev/null +++ b/cmd/config/rds-core/np-allow-from-ingress.yml @@ -0,0 +1,13 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-from-openshift-ingress +spec: + ingress: + - from: + - namespaceSelector: + matchLabels: + network.openshift.io/policy-group: ingress + ports: + - protocol: TCP + port: 8080 diff --git a/cmd/config/rds-core/np-deny-all.yml b/cmd/config/rds-core/np-deny-all.yml new file mode 100644 index 00000000..e5a9a99d --- /dev/null +++ b/cmd/config/rds-core/np-deny-all.yml @@ -0,0 +1,7 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: deny-all +spec: + podSelector: {} + ingress: [] diff --git a/cmd/config/rds-core/rds-core.yml b/cmd/config/rds-core/rds-core.yml new file mode 100644 index 00000000..ad45e8e1 --- /dev/null +++ b/cmd/config/rds-core/rds-core.yml @@ -0,0 +1,117 @@ +--- +global: + gc: {{.GC}} + gcMetrics: {{.GC_METRICS}} + measurements: + - name: podLatency + thresholds: + - conditionType: Ready + metric: P99 + threshold: {{.POD_READY_THRESHOLD}} +{{ if eq .SVC_LATENCY "true" }} + - name: serviceLatency + svcTimeout: 1m +{{ end }} +metricsEndpoints: +{{ if .ES_SERVER }} + - metrics: [{{.METRICS}}] + alerts: [{{.ALERTS}}] + indexer: + esServers: ["{{.ES_SERVER}}"] + insecureSkipVerify: true + defaultIndex: {{.ES_INDEX}} + type: opensearch +{{ end }} +{{ if eq .LOCAL_INDEXING "true" }} + - metrics: [{{.METRICS}}] + alerts: [{{.ALERTS}}] + indexer: + type: local + metricsDirectory: collected-metrics-{{.UUID}} +{{ end }} + +jobs: + - name: bgp-setup + namespace: metallb-system + jobIterations: 1 + qps: {{.QPS}} + burst: {{.BURST}} + namespacedIterations: false + objects: + - objectTemplate: ipaddresspool.yml + replicas: 1 + + - objectTemplate: bgpadvertisement.yml + replicas: 1 + + - objectTemplate: bgppeer.yml + replicas: 1 + + - name: rds + namespace: rds + jobIterations: {{.JOB_ITERATIONS}} + qps: {{.QPS}} + burst: {{.BURST}} + namespacedIterations: true + podWait: false + waitWhenFinished: true + preLoadImages: true + preLoadPeriod: 15s + churn: {{.CHURN}} + churnCycles: {{.CHURN_CYCLES}} + churnDuration: {{.CHURN_DURATION}} + churnPercent: {{.CHURN_PERCENT}} + churnDelay: {{.CHURN_DELAY}} + churnDeletionStrategy: {{.CHURN_DELETION_STRATEGY}} + namespaceLabels: + security.openshift.io/scc.podSecurityLabelSync: false + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + objects: + + - objectTemplate: secret.yml + replicas: 42 + + - objectTemplate: configmap.yml + replicas: 30 + + - objectTemplate: np-deny-all.yml + replicas: 1 + + - objectTemplate: np-allow-from-clients.yml + replicas: 1 + + - objectTemplate: np-allow-from-ingress.yml + replicas: 1 + + - objectTemplate: sriov-network.yml + replicas: 2 + + - objectTemplate: service.yml + replicas: 20 + + - objectTemplate: service-lb.yml + replicas: 1 + + - objectTemplate: route.yml + replicas: 2 + + - objectTemplate: deployment-server.yml + replicas: 15 + inputVars: + podReplicas: 2 + + - objectTemplate: deployment-client.yml + replicas: 10 + inputVars: + podReplicas: 2 + ingressDomain: {{.INGRESS_DOMAIN}} + + - objectTemplate: deployment-dpdk.yml + replicas: 2 + inputVars: + podReplicas: 1 + dpdk_cores: {{.DPDK_CORES}} + perf_profile: {{.PERF_PROFILE}} + diff --git a/cmd/config/rds-core/route.yml b/cmd/config/rds-core/route.yml new file mode 100644 index 00000000..95421bbf --- /dev/null +++ b/cmd/config/rds-core/route.yml @@ -0,0 +1,11 @@ +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: rds-{{.Replica}} +spec: + to: + kind: Service + name: rds-{{.Replica}} + tls: + termination: edge diff --git a/cmd/config/rds-core/secret.yml b/cmd/config/rds-core/secret.yml new file mode 100644 index 00000000..c8df8b9d --- /dev/null +++ b/cmd/config/rds-core/secret.yml @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{.JobName}}-{{.Replica}} +data: + top-secret: "{{randAlphaNum 2048}}" diff --git a/cmd/config/rds-core/service-lb.yml b/cmd/config/rds-core/service-lb.yml new file mode 100644 index 00000000..ec5d70d7 --- /dev/null +++ b/cmd/config/rds-core/service-lb.yml @@ -0,0 +1,25 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: rds-{{.Replica}} + annotations: + metallb.universe.tf/address-pool: address-pool + kube-burner.io/service-latency: "false" +spec: + selector: + app: nginx + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 8080 + - name: http1 + protocol: TCP + port: 81 + targetPort: 8080 + - name: http2 + protocol: TCP + port: 82 + targetPort: 8080 + type: LoadBalancer diff --git a/cmd/config/rds-core/service.yml b/cmd/config/rds-core/service.yml new file mode 100644 index 00000000..fa9ede04 --- /dev/null +++ b/cmd/config/rds-core/service.yml @@ -0,0 +1,38 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: rds-{{add .Replica 1}} +spec: + selector: + app: nginx + type: ClusterIP + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 8080 + - name: http1 + protocol: TCP + port: 81 + targetPort: 8080 + - name: http2 + protocol: TCP + port: 82 + targetPort: 8080 + - name: http3 + protocol: TCP + port: 83 + targetPort: 8080 + - name: http4 + protocol: TCP + port: 84 + targetPort: 8080 + - name: http5 + protocol: TCP + port: 85 + targetPort: 8080 + - name: http6 + protocol: TCP + port: 86 + targetPort: 8080 diff --git a/cmd/config/rds-core/sriov-network.yml b/cmd/config/rds-core/sriov-network.yml new file mode 100644 index 00000000..4641c077 --- /dev/null +++ b/cmd/config/rds-core/sriov-network.yml @@ -0,0 +1,19 @@ +apiVersion: sriovnetwork.openshift.io/v1 +kind: SriovNetwork +metadata: + name: sriov-net-{{ .Iteration }}-{{ .Replica }} + namespace: openshift-sriov-network-operator +spec: + ipam: | + { + "type": "static", + "addresses": [ + { + "address": "10.1.54.0/24" + } + ] + } + spoofChk: "off" + trust: "on" + resourceName: intelnics2 + networkNamespace: rds-{{ .Iteration }} diff --git a/cmd/ocp.go b/cmd/ocp.go index 7934efc8..f99b1e8c 100644 --- a/cmd/ocp.go +++ b/cmd/ocp.go @@ -118,6 +118,7 @@ func openShiftCmd() *cobra.Command { ocp.NewIndex(&wh.MetricsEndpoint, &wh.MetadataAgent), ocp.NewWorkersScale(&wh.MetricsEndpoint, &wh.MetadataAgent), ocp.NewPVCDensity(&wh), + ocp.NewRDSCore(&wh), ocp.NewWebBurner(&wh, "web-burner-init"), ocp.NewWebBurner(&wh, "web-burner-node-density"), ocp.NewWebBurner(&wh, "web-burner-cluster-density"), diff --git a/rds-core.go b/rds-core.go new file mode 100644 index 00000000..2b93adbd --- /dev/null +++ b/rds-core.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Kube-burner Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocp + +import ( + "fmt" + "os" + "strconv" + "time" + + "github.com/kube-burner/kube-burner/pkg/workloads" + log "github.com/sirupsen/logrus" + + "github.com/spf13/cobra" +) + +// NewNodeDensity holds node-density-cni workload +func NewRDSCore(wh *workloads.WorkloadHelper) *cobra.Command { + var iterations, churnPercent, churnCycles, dpdkCores int + var churn, svcLatency bool + var churnDelay, churnDuration, podReadyThreshold time.Duration + var churnDeletionStrategy, perfProfile string + var metricsProfiles []string + var rc int + cmd := &cobra.Command{ + Use: "rds-core", + Short: "Runs rds-core workload", + SilenceUsage: true, + PreRun: func(cmd *cobra.Command, args []string) { + os.Setenv("CHURN", fmt.Sprint(churn)) + os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles)) + os.Setenv("CHURN_DURATION", fmt.Sprintf("%v", churnDuration)) + os.Setenv("CHURN_DELAY", fmt.Sprintf("%v", churnDelay)) + os.Setenv("CHURN_PERCENT", fmt.Sprint(churnPercent)) + os.Setenv("CHURN_DELETION_STRATEGY", churnDeletionStrategy) + os.Setenv("DPDK_CORES", fmt.Sprint(dpdkCores)) + os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations)) + os.Setenv("PERF_PROFILE", perfProfile) + os.Setenv("POD_READY_THRESHOLD", fmt.Sprintf("%v", podReadyThreshold)) + os.Setenv("SVC_LATENCY", strconv.FormatBool(svcLatency)) + ingressDomain, err := wh.MetadataAgent.GetDefaultIngressDomain() + if err != nil { + log.Fatal("Error obtaining default ingress domain: ", err.Error()) + } + os.Setenv("INGRESS_DOMAIN", ingressDomain) + }, + Run: func(cmd *cobra.Command, args []string) { + setMetrics(cmd, metricsProfiles) + rc = wh.Run(cmd.Name()) + }, + PostRun: func(cmd *cobra.Command, args []string) { + os.Exit(rc) + }, + } + cmd.Flags().BoolVar(&churn, "churn", true, "Enable churning") + cmd.Flags().IntVar(&churnCycles, "churn-cycles", 0, "Churn cycles to execute") + cmd.Flags().DurationVar(&churnDuration, "churn-duration", 1*time.Hour, "Churn duration") + cmd.Flags().DurationVar(&churnDelay, "churn-delay", 2*time.Minute, "Time to wait between each churn") + cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round") + cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use") + cmd.Flags().IntVar(&dpdkCores, "dpdk-cores", 2, "Number of cores per DPDK pod") + cmd.Flags().IntVar(&iterations, "iterations", 0, "Number of iterations/namespaces") + cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use") + cmd.Flags().StringVar(&perfProfile, "perf-profile", "default", "Performance profile implemented in the cluster") + cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 2*time.Minute, "Pod ready timeout threshold") + cmd.Flags().BoolVar(&svcLatency, "service-latency", false, "Enable service latency measurement") + return cmd +}