diff --git a/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml b/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml new file mode 100644 index 00000000..36e7e5fd --- /dev/null +++ b/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml @@ -0,0 +1,146 @@ +name: E2E-V1Beta1-BlueGreen-1.19 + +on: + push: + branches: + - master + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.19' + KIND_IMAGE: 'kindest/node:v1.19.16' + KIND_CLUSTER_NAME: 'ci-testing' + +jobs: + + rollout: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.2.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf.yaml + - name: Build image + run: | + export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}" + docker build --pull --no-cache . -t $IMAGE + kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; } + - name: Install Kruise + run: | + set -ex + kubectl cluster-info + make helm + helm repo add openkruise https://openkruise.github.io/charts/ + helm repo update + helm install kruise openkruise/kruise + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + echo "Wait for kruise-manager ready successfully" + else + echo "Timeout to wait for kruise-manager ready" + exit 1 + fi + - name: Install Kruise Rollout + run: | + set -ex + kubectl cluster-info + IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + set -e + if [ "$PODS" -eq "1" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + kubectl get node -o yaml + kubectl get all -n kruise-rollout -o yaml + set -e + if [ "$PODS" -eq "1" ]; then + echo "Wait for kruise-rollout ready successfully" + else + echo "Timeout to wait for kruise-rollout ready" + exit 1 + fi + - name: Bluegreen Release Disable HPA + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='bluegreen disable hpa test case - autoscaling/v1 for v1.19' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: Deployment Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: CloneSet Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal diff --git a/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml b/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml new file mode 100644 index 00000000..07035edb --- /dev/null +++ b/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml @@ -0,0 +1,146 @@ +name: E2E-V1Beta1-BlueGreen-1.23 + +on: + push: + branches: + - master + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.19' + KIND_IMAGE: 'kindest/node:v1.23.3' + KIND_CLUSTER_NAME: 'ci-testing' + +jobs: + + rollout: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.2.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf.yaml + - name: Build image + run: | + export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}" + docker build --pull --no-cache . -t $IMAGE + kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; } + - name: Install Kruise + run: | + set -ex + kubectl cluster-info + make helm + helm repo add openkruise https://openkruise.github.io/charts/ + helm repo update + helm install kruise openkruise/kruise + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + echo "Wait for kruise-manager ready successfully" + else + echo "Timeout to wait for kruise-manager ready" + exit 1 + fi + - name: Install Kruise Rollout + run: | + set -ex + kubectl cluster-info + IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + set -e + if [ "$PODS" -eq "1" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + kubectl get node -o yaml + kubectl get all -n kruise-rollout -o yaml + set -e + if [ "$PODS" -eq "1" ]; then + echo "Wait for kruise-rollout ready successfully" + else + echo "Timeout to wait for kruise-rollout ready" + exit 1 + fi + - name: Bluegreen Release Disable HPA + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='bluegreen delete rollout case - autoscaling/v2 for v1.23' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: Deployment Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: CloneSet Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal \ No newline at end of file diff --git a/.github/workflows/e2e-v1beta1-1.19.yaml b/.github/workflows/e2e-v1beta1-jump-1.19.yaml similarity index 99% rename from .github/workflows/e2e-v1beta1-1.19.yaml rename to .github/workflows/e2e-v1beta1-jump-1.19.yaml index 6ec0e39d..9b0d9c7e 100644 --- a/.github/workflows/e2e-v1beta1-1.19.yaml +++ b/.github/workflows/e2e-v1beta1-jump-1.19.yaml @@ -1,4 +1,4 @@ -name: E2E-V1Beta1-1.19 +name: E2E-V1Beta1-JUMP-1.19 on: push: diff --git a/.github/workflows/e2e-v1beta1-1.23.yaml b/.github/workflows/e2e-v1beta1-jump-1.23.yaml similarity index 99% rename from .github/workflows/e2e-v1beta1-1.23.yaml rename to .github/workflows/e2e-v1beta1-jump-1.23.yaml index 8dad2a8b..fd56bf70 100644 --- a/.github/workflows/e2e-v1beta1-1.23.yaml +++ b/.github/workflows/e2e-v1beta1-jump-1.23.yaml @@ -1,4 +1,4 @@ -name: E2E-V1Beta1-1.23 +name: E2E-V1Beta1-JUMP-1.23 on: push: diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index 28ad4a9c..442218ff 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -172,7 +172,9 @@ func (dst *Rollout) ConvertFrom(src conversion.Hub) error { srcV1beta1 := src.(*v1beta1.Rollout) dst.ObjectMeta = srcV1beta1.ObjectMeta if !srcV1beta1.Spec.Strategy.IsCanaryStragegy() { - return fmt.Errorf("v1beta1 Rollout with %s strategy cannot be converted to v1alpha1", srcV1beta1.Spec.Strategy.GetRollingStyle()) + // only v1beta1 supports bluegreen strategy + // Don't log the message because it will print too often + return nil } // spec dst.Spec = RolloutSpec{ diff --git a/api/v1beta1/batchrelease_plan_types.go b/api/v1beta1/batchrelease_plan_types.go index e72a9941..a1472946 100644 --- a/api/v1beta1/batchrelease_plan_types.go +++ b/api/v1beta1/batchrelease_plan_types.go @@ -117,6 +117,8 @@ type BatchReleaseStatus struct { // Phase is the release plan phase, which indicates the current state of release // plan state machine in BatchRelease controller. Phase RolloutPhase `json:"phase,omitempty"` + // Message provides details on why the rollout is in its current phase + Message string `json:"message,omitempty"` } type BatchReleaseCanaryStatus struct { diff --git a/api/v1beta1/deployment_types.go b/api/v1beta1/deployment_types.go index 9975e989..5002fd82 100644 --- a/api/v1beta1/deployment_types.go +++ b/api/v1beta1/deployment_types.go @@ -62,31 +62,6 @@ type DeploymentStrategy struct { Partition intstr.IntOrString `json:"partition,omitempty"` } -// OriginalDeploymentStrategy stores part of the fileds of a workload, -// so that it can be restored when finalizing. -// It is only used for BlueGreen Release -// Similar to DeploymentStrategy, it is an annotation used in workload -// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy -type OriginalDeploymentStrategy struct { - // The deployment strategy to use to replace existing pods with new ones. - // +optional - // +patchStrategy=retainKeys - Strategy *apps.DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - type RollingStyleType string const ( @@ -138,44 +113,3 @@ func SetDefaultDeploymentStrategy(strategy *DeploymentStrategy) { } } } - -func SetDefaultSetting(setting *OriginalDeploymentStrategy) { - if setting.ProgressDeadlineSeconds == nil { - setting.ProgressDeadlineSeconds = new(int32) - *setting.ProgressDeadlineSeconds = 600 - } - if setting.Strategy == nil { - setting.Strategy = &apps.DeploymentStrategy{} - } - if setting.Strategy.Type == "" { - setting.Strategy.Type = apps.RollingUpdateDeploymentStrategyType - } - if setting.Strategy.Type == apps.RecreateDeploymentStrategyType { - return - } - strategy := setting.Strategy - if strategy.RollingUpdate == nil { - strategy.RollingUpdate = &apps.RollingUpdateDeployment{} - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set MaxUnavailable as 25% by default - maxUnavailable := intstr.FromString("25%") - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set MaxSurge as 25% by default - maxSurge := intstr.FromString("25%") - strategy.RollingUpdate.MaxUnavailable = &maxSurge - } - - // Cannot allow maxSurge==0 && MaxUnavailable==0, otherwise, no pod can be updated when rolling update. - maxSurge, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxSurge, 100, true) - maxUnavailable, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxUnavailable, 100, true) - if maxSurge == 0 && maxUnavailable == 0 { - strategy.RollingUpdate = &apps.RollingUpdateDeployment{ - MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 0}, - MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, - } - } - -} diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index a75b92b7..0cbeea32 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -422,31 +422,6 @@ func (in *ObjectRef) DeepCopy() *ObjectRef { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OriginalDeploymentStrategy) DeepCopyInto(out *OriginalDeploymentStrategy) { - *out = *in - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy - *out = new(v1.DeploymentStrategy) - (*in).DeepCopyInto(*out) - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalDeploymentStrategy. -func (in *OriginalDeploymentStrategy) DeepCopy() *OriginalDeploymentStrategy { - if in == nil { - return nil - } - out := new(OriginalDeploymentStrategy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchPodTemplateMetadata) DeepCopyInto(out *PatchPodTemplateMetadata) { *out = *in diff --git a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml index c14d26ca..34d8a03d 100644 --- a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml +++ b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml @@ -507,6 +507,10 @@ spec: - type type: object type: array + message: + description: Message provides details on why the rollout is in its + current phase + type: string observedGeneration: description: ObservedGeneration is the most recent generation observed for this BatchRelease. It corresponds to this BatchRelease's generation, diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2a094c61..dbe28b37 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -161,6 +161,18 @@ rules: - get - patch - update +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: diff --git a/pkg/controller/batchrelease/batchrelease_controller.go b/pkg/controller/batchrelease/batchrelease_controller.go index 2eaf2faa..1cfcdbbb 100644 --- a/pkg/controller/batchrelease/batchrelease_controller.go +++ b/pkg/controller/batchrelease/batchrelease_controller.go @@ -148,6 +148,7 @@ type BatchReleaseReconciler struct { // +kubebuilder:rbac:groups=apps.kruise.io,resources=statefulsets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete // Reconcile reads that state of the cluster for a Rollout object and makes changes based on the state read // and what is in the Rollout.Spec diff --git a/pkg/controller/batchrelease/batchrelease_executor.go b/pkg/controller/batchrelease/batchrelease_executor.go index 5082817d..2f54fc3c 100644 --- a/pkg/controller/batchrelease/batchrelease_executor.go +++ b/pkg/controller/batchrelease/batchrelease_executor.go @@ -24,6 +24,9 @@ import ( appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" "github.com/openkruise/rollouts/api/v1beta1" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + bgcloneset "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/cloneset" + bgdeplopyment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle" canarydeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle" @@ -32,6 +35,7 @@ import ( partitiondeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/statefulset" "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,7 +149,11 @@ func (r *Executor) progressBatches(release *v1beta1.BatchRelease, newStatus *v1b switch { case err == nil: result = reconcile.Result{RequeueAfter: DefaultDuration} + removeProgressingCondition(newStatus) newStatus.CanaryStatus.CurrentBatchState = v1beta1.VerifyingBatchState + case errors.IsFatal(err): + progressingStateTransition(newStatus, v1.ConditionTrue, v1beta1.ProgressingReasonInRolling, err.Error()) + fallthrough default: klog.Warningf("Failed to upgrade %v, err %v", klog.KObj(release), err) } @@ -204,14 +212,14 @@ func (r *Executor) getReleaseController(release *v1beta1.BatchRelease, newStatus klog.Infof("BatchRelease(%v) using %s-style release controller for this batch release", klog.KObj(release), rollingStyle) switch rollingStyle { case v1beta1.BlueGreenRollingStyle: - // if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() { - // klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) - // return partitionstyle.NewControlPlane(cloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil - // } - // if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { - // klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) - // return bluegreenstyle.NewControlPlane(deployment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil - // } + if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() { + klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return bluegreenstyle.NewControlPlane(bgcloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil + } + if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { + klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return bluegreenstyle.NewControlPlane(bgdeplopyment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil + } case v1beta1.CanaryRollingStyle: if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { @@ -257,3 +265,23 @@ func isPartitioned(release *v1beta1.BatchRelease) bool { return release.Spec.ReleasePlan.BatchPartition != nil && *release.Spec.ReleasePlan.BatchPartition <= release.Status.CanaryStatus.CurrentBatch } + +func progressingStateTransition(status *v1beta1.BatchReleaseStatus, condStatus v1.ConditionStatus, reason, message string) { + cond := util.GetBatchReleaseCondition(*status, v1beta1.RolloutConditionProgressing) + if cond == nil { + cond = util.NewRolloutCondition(v1beta1.RolloutConditionProgressing, condStatus, reason, message) + } else { + cond.Status = condStatus + cond.Reason = reason + if message != "" { + cond.Message = message + } + } + util.SetBatchReleaseCondition(status, *cond) + status.Message = cond.Message +} + +func removeProgressingCondition(status *v1beta1.BatchReleaseStatus) { + util.RemoveBatchReleaseCondition(status, v1beta1.RolloutConditionProgressing) + status.Message = "" +} diff --git a/pkg/controller/batchrelease/context/context.go b/pkg/controller/batchrelease/context/context.go index 2c428180..6ad325b0 100644 --- a/pkg/controller/batchrelease/context/context.go +++ b/pkg/controller/batchrelease/context/context.go @@ -61,6 +61,9 @@ type BatchContext struct { Pods []*corev1.Pod `json:"-"` // filter or sort pods before patch label FilterFunc FilterFuncType `json:"-"` + // the next two fields are only used for bluegreen style + CurrentSurge intstr.IntOrString `json:"currentSurge,omitempty"` + DesiredSurge intstr.IntOrString `json:"desiredSurge,omitempty"` } type FilterFuncType func(pods []*corev1.Pod, ctx *BatchContext) []*corev1.Pod diff --git a/pkg/controller/batchrelease/control/apis.go b/pkg/controller/batchrelease/control/apis.go new file mode 100644 index 00000000..43ccd268 --- /dev/null +++ b/pkg/controller/batchrelease/control/apis.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package control + +import "k8s.io/apimachinery/pkg/util/intstr" + +// OriginalDeploymentStrategy stores part of the fileds of a workload, +// so that it can be restored when finalizing. +// It is only used for BlueGreen Release +// Similar to DeploymentStrategy, it is an annotation used in workload +// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy +type OriginalDeploymentStrategy struct { + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go new file mode 100644 index 00000000..d52d6638 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go @@ -0,0 +1,239 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloneset + +import ( + "context" + "fmt" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + "github.com/openkruise/rollouts/pkg/util/patch" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realController struct { + *util.WorkloadInfo + client client.Client + pods []*corev1.Pod + key types.NamespacedName + object *kruiseappsv1alpha1.CloneSet +} + +func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface { + return &realController{ + key: key, + client: cli, + } +} + +func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo { + return rc.WorkloadInfo +} + +func (rc *realController) BuildController() (bluegreenstyle.Interface, error) { + if rc.object != nil { + return rc, nil + } + object := &kruiseappsv1alpha1.CloneSet{} + if err := rc.client.Get(context.TODO(), rc.key, object); err != nil { + return rc, err + } + rc.object = object + rc.WorkloadInfo = util.ParseWorkload(object) + return rc, nil +} + +func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) { + if rc.pods != nil { + return rc.pods, nil + } + var err error + rc.pods, err = util.ListOwnedPods(rc.client, rc.object) + return rc.pods, err +} + +func (rc *realController) Initialize(release *v1beta1.BatchRelease) error { + klog.Info("Initialize cloneset controller") + if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) { + return nil + } + + // disable the hpa + if err := hpa.DisableHPA(rc.client, rc.object); err != nil { + return err + } + klog.InfoS("Initialize: disable hpa for cloneset successfully", "cloneset", klog.KObj(rc.object)) + + // patch the cloneset + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error())) + } + control.InitOriginalSetting(&setting, rc.object) + patchData := patch.NewClonesetPatch() + patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting)) + patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef( + release, release.GetObjectKind().GroupVersionKind()))) + // we use partition = 100% to function as "paused" instead of setting pasued field as true + // it is manily to keep consistency with partition style (partition is already set as 100% in webhook) + patchData.UpdatePaused(false) + maxSurge := intstr.FromInt(1) // select the minimum positive number as initial value + maxUnavailable := intstr.FromInt(0) + patchData.UpdateMaxSurge(&maxSurge) + patchData.UpdateMaxUnavailable(&maxUnavailable) + patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds) + klog.InfoS("Initialize: try to update cloneset", "cloneset", klog.KObj(rc.object), "patchData", patchData.String()) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error { + if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil { + return errors.NewFatalError(fmt.Errorf("cannot upgrade batch, because cloneset %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error())) + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true) + current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true) + if current >= desired { + klog.Infof("No need to upgrade batch for cloneset %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired) + return nil + } else { + klog.Infof("Will update batch for cloneset %v: current %d < desired %d", klog.KObj(rc.object), current, desired) + } + patchData := patch.NewClonesetPatch() + // avoid interference from partition + patchData.UpdatePartiton(nil) + patchData.UpdateMaxSurge(&ctx.DesiredSurge) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +func (rc *realController) Finalize(release *v1beta1.BatchRelease) error { + if rc.finalized() { + return nil // No need to finalize again + } + + if release.Spec.ReleasePlan.BatchPartition != nil { + // continuous release (not supported yet) + /* + patchData := patch.NewClonesetPatch() + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) + */ + klog.Warningf("continuous release is not supported yet for bluegreen style release") + return nil + } + + c := util.GetEmptyObjectWithKey(rc.object) + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error())) + } + patchData := patch.NewClonesetPatch() + // why we need a simple MinReadySeconds-based status machine? (ie. the if-else block) + // It's possible for Finalize to be called multiple times, if error returned is not nil. + // if we do all needed operations in a single code block, like, A->B->C, when C need retry, + // both A and B will be executed as well, however, operations like restoreHPA cost a lot(which calls LIST API) + if rc.object.Spec.MinReadySeconds != setting.MinReadySeconds { + // restore the hpa + if err := hpa.RestoreHPA(rc.client, rc.object); err != nil { + return err + } + // restore the original setting + patchData.UpdateMinReadySeconds(setting.MinReadySeconds) + patchData.UpdateMaxSurge(setting.MaxSurge) + patchData.UpdateMaxUnavailable(setting.MaxUnavailable) + if err := rc.client.Patch(context.TODO(), c, patchData); err != nil { + return err + } + // we should return an error to trigger re-enqueue, so that we can go to the next if-else branch in the next reconcile + return errors.NewBenignError(fmt.Errorf("cloneset bluegreen: we should wait all pods updated and available")) + } else { + klog.InfoS("Finalize: cloneset bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object)) + // wait all pods updated and ready + if rc.object.Status.ReadyReplicas != rc.object.Status.UpdatedReadyReplicas { + return errors.NewBenignError(fmt.Errorf("cloneset %v finalize not done, readyReplicas %d != updatedReadyReplicas %d, current policy %s", + klog.KObj(rc.object), rc.object.Status.ReadyReplicas, rc.object.Status.UpdatedReadyReplicas, release.Spec.ReleasePlan.FinalizingPolicy)) + } + klog.InfoS("Finalize: cloneset bluegreen release: all pods updated and ready") + // restore annotation + patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation) + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + return rc.client.Patch(context.TODO(), c, patchData) + } +} + +func (rc *realController) finalized() bool { + if rc.object == nil || rc.object.DeletionTimestamp != nil { + return true + } + if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 { + return true + } + return false +} + +// bluegreen doesn't support rollback in batch, because: +// - bluegreen support traffic rollback instead, rollback in batch is not necessary +// - it's diffcult for both Deployment and CloneSet to support rollback in batch, with the "minReadySeconds" implementation +func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) { + // current batch index + currentBatch := release.Status.CanaryStatus.CurrentBatch + // the number of expected updated pods + desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas + // the number of current updated pods + currentSurge := intstr.FromInt(0) + if rc.object.Spec.UpdateStrategy.MaxSurge != nil { + currentSurge = *rc.object.Spec.UpdateStrategy.MaxSurge + if currentSurge == intstr.FromInt(1) { + // currentSurge == intstr.FromInt(1) means that currentSurge is the initial value + // if the value is indeed set by user, setting it to 0 still does no harm + currentSurge = intstr.FromInt(0) + } + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&desiredSurge, int(rc.Replicas), true) + + batchContext := &batchcontext.BatchContext{ + Pods: rc.pods, + RolloutID: release.Spec.ReleasePlan.RolloutID, + CurrentBatch: currentBatch, + UpdateRevision: release.Status.UpdateRevision, + DesiredSurge: desiredSurge, + CurrentSurge: currentSurge, + // the following fields isused to check if batch is ready + Replicas: rc.Replicas, + UpdatedReplicas: rc.Status.UpdatedReplicas, + UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas, + DesiredUpdatedReplicas: int32(desired), + PlannedUpdatedReplicas: int32(desired), + } + // the number of no need update pods that marked before rollout + // if noNeedUpdate := release.Status.CanaryStatus.NoNeedUpdateReplicas; noNeedUpdate != nil { + // batchContext.FilterFunc = labelpatch.FilterPodsForUnorderedUpdate + // } + return batchContext, nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go new file mode 100644 index 00000000..0df06492 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go @@ -0,0 +1,394 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloneset + +import ( + "context" + "encoding/json" + "reflect" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + rolloutapi "github.com/openkruise/rollouts/api" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/util" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() + + cloneKey = types.NamespacedName{ + Namespace: "default", + Name: "cloneset", + } + cloneDemo = &kruiseappsv1alpha1.CloneSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cloneKey.Name, + Namespace: cloneKey.Namespace, + Generation: 1, + Labels: map[string]string{ + "app": "busybox", + }, + Annotations: map[string]string{ + "type": "unit-test", + }, + }, + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Replicas: pointer.Int32(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + Paused: true, + Partition: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "busybox", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox:latest", + }, + }, + }, + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 10, + UpdatedReplicas: 0, + ReadyReplicas: 10, + AvailableReplicas: 10, + UpdatedReadyReplicas: 0, + UpdateRevision: "version-2", + CurrentRevision: "version-1", + ObservedGeneration: 1, + CollisionCount: pointer.Int32Ptr(1), + }, + } + + releaseDemo = &v1beta1.BatchRelease{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rollouts.kruise.io/v1alpha1", + Kind: "BatchRelease", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "release", + Namespace: cloneKey.Namespace, + UID: uuid.NewUUID(), + }, + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.FromString("10%"), + }, + { + CanaryReplicas: intstr.FromString("50%"), + }, + { + CanaryReplicas: intstr.FromString("100%"), + }, + }, + }, + WorkloadRef: v1beta1.ObjectRef{ + APIVersion: cloneDemo.APIVersion, + Kind: cloneDemo.Kind, + Name: cloneDemo.Name, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 0, + }, + }, + } +) + +func init() { + apps.AddToScheme(scheme) + rolloutapi.AddToScheme(scheme) + kruiseappsv1alpha1.AddToScheme(scheme) +} + +func TestCalculateBatchContext(t *testing.T) { + RegisterFailHandler(Fail) + cases := map[string]struct { + workload func() *kruiseappsv1alpha1.CloneSet + release func() *v1beta1.BatchRelease + result *batchcontext.BatchContext + }{ + "normal case batch0": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32Ptr(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromInt(1); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 10, + UpdatedReplicas: 0, + UpdatedReadyReplicas: 0, + AvailableReplicas: 10, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 0, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 0, + DesiredSurge: intstr.FromString("50%"), + CurrentSurge: intstr.FromInt(0), + Replicas: 10, + UpdatedReplicas: 0, + UpdatedReadyReplicas: 0, + UpdateRevision: "update-version", + PlannedUpdatedReplicas: 5, + DesiredUpdatedReplicas: 5, + }, + }, + + "normal case batch1": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("50%"); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 15, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 5, + AvailableReplicas: 10, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + DesiredSurge: intstr.FromString("100%"), + CurrentSurge: intstr.FromString("50%"), + Replicas: 10, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 5, + UpdateRevision: "update-version", + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + "normal case batch2": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32Ptr(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 20, + UpdatedReplicas: 10, + UpdatedReadyReplicas: 10, + AvailableReplicas: 10, + ReadyReplicas: 20, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 2, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 2, + UpdateRevision: "update-version", + DesiredSurge: intstr.FromString("100%"), + CurrentSurge: intstr.FromString("100%"), + Replicas: 10, + UpdatedReplicas: 10, + UpdatedReadyReplicas: 10, + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + } + + for name, cs := range cases { + t.Run(name, func(t *testing.T) { + control := realController{ + object: cs.workload(), + WorkloadInfo: util.ParseWorkload(cs.workload()), + } + got, err := control.CalculateBatchContext(cs.release()) + Expect(err).NotTo(HaveOccurred()) + Expect(got.Log()).Should(Equal(cs.result.Log())) + }) + } +} + +func TestRealController(t *testing.T) { + RegisterFailHandler(Fail) + + release := releaseDemo.DeepCopy() + clone := cloneDemo.DeepCopy() + // for unit test we should set some default value since no webhook or controller is working + clone.Spec.UpdateStrategy.Type = kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build() + // build new controller + c := NewController(cli, cloneKey, clone.GroupVersionKind()).(*realController) + controller, err := c.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize + err = controller.Initialize(release) + Expect(err).NotTo(HaveOccurred()) + fetch := &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + // check strategy + Expect(fetch.Spec.UpdateStrategy.Type).Should(Equal(kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType)) + // partition is set to 100% in webhook, therefore we cannot observe it in unit test + // Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.Partition, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + // check annotations + Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release))) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MinReadySeconds: 0, + }))) + + c.object = fetch // mock + + for { + batchContext, err := controller.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = controller.UpgradeBatch(batchContext) + fetch = &kruiseappsv1alpha1.CloneSet{} + // mock + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + c.object = fetch + if err == nil { + break + } + } + + fetch = &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("10%")) + Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(0))) + Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false)) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MinReadySeconds: 0, + }))) + + controller.Finalize(release) + fetch = &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("0%")) + Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(1))) + Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false)) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0))) +} + +func getControlInfo(release *v1beta1.BatchRelease) string { + owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) + return string(owner) +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go b/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go new file mode 100644 index 00000000..6f597370 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bluegreenstyle + +import ( + "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/labelpatch" + "github.com/openkruise/rollouts/pkg/util" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realBatchControlPlane struct { + Interface + client.Client + record.EventRecorder + patcher labelpatch.LabelPatcher + release *v1beta1.BatchRelease + newStatus *v1beta1.BatchReleaseStatus +} + +type NewInterfaceFunc func(cli client.Client, key types.NamespacedName, gvk schema.GroupVersionKind) Interface + +// NewControlPlane creates a new release controller with bluegreen-style to drive batch release state machine +func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.EventRecorder, release *v1beta1.BatchRelease, newStatus *v1beta1.BatchReleaseStatus, key types.NamespacedName, gvk schema.GroupVersionKind) *realBatchControlPlane { + return &realBatchControlPlane{ + Client: cli, + EventRecorder: recorder, + newStatus: newStatus, + Interface: f(cli, key, gvk), + release: release.DeepCopy(), + patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)), + } +} + +func (rc *realBatchControlPlane) Initialize() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + // claim workload under our control + err = controller.Initialize(rc.release) + if err != nil { + return err + } + + // record revision and replicas + workloadInfo := controller.GetWorkloadInfo() + rc.newStatus.StableRevision = workloadInfo.Status.StableRevision + rc.newStatus.UpdateRevision = workloadInfo.Status.UpdateRevision + rc.newStatus.ObservedWorkloadReplicas = workloadInfo.Replicas + return err +} + +func (rc *realBatchControlPlane) UpgradeBatch() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + if controller.GetWorkloadInfo().Replicas == 0 { + return nil + } + + batchContext, err := controller.CalculateBatchContext(rc.release) + if err != nil { + return err + } + klog.Infof("BatchRelease %v calculated context when upgrade batch: %s", + klog.KObj(rc.release), batchContext.Log()) + + err = controller.UpgradeBatch(batchContext) + if err != nil { + return err + } + + return nil +} + +func (rc *realBatchControlPlane) CheckBatchReady() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + if controller.GetWorkloadInfo().Replicas == 0 { + return nil + } + + // do not countAndUpdateNoNeedUpdateReplicas when checking, + // the target calculated should be consistent with UpgradeBatch. + batchContext, err := controller.CalculateBatchContext(rc.release) + if err != nil { + return err + } + + klog.Infof("BatchRelease %v calculated context when check batch ready: %s", + klog.KObj(rc.release), batchContext.Log()) + + return batchContext.IsBatchReady() +} + +func (rc *realBatchControlPlane) Finalize() error { + controller, err := rc.BuildController() + if err != nil { + return client.IgnoreNotFound(err) + } + + // release workload control info and clean up resources if it needs + return controller.Finalize(rc.release) +} + +func (rc *realBatchControlPlane) SyncWorkloadInformation() (control.WorkloadEventType, *util.WorkloadInfo, error) { + // ignore the sync if the release plan is deleted + if rc.release.DeletionTimestamp != nil { + return control.WorkloadNormalState, nil, nil + } + + controller, err := rc.BuildController() + if err != nil { + if errors.IsNotFound(err) { + return control.WorkloadHasGone, nil, err + } + return control.WorkloadUnknownState, nil, err + } + + workloadInfo := controller.GetWorkloadInfo() + if !workloadInfo.IsStable() { + klog.Infof("Workload(%v) still reconciling, waiting for it to complete, generation: %v, observed: %v", + workloadInfo.LogKey, workloadInfo.Generation, workloadInfo.Status.ObservedGeneration) + return control.WorkloadStillReconciling, workloadInfo, nil + } + + if workloadInfo.IsPromoted() { + klog.Infof("Workload(%v) has been promoted, no need to rollout again actually, replicas: %v, updated: %v", + workloadInfo.LogKey, workloadInfo.Replicas, workloadInfo.Status.UpdatedReadyReplicas) + return control.WorkloadNormalState, workloadInfo, nil + } + + if workloadInfo.IsScaling(rc.newStatus.ObservedWorkloadReplicas) { + klog.Warningf("Workload(%v) replicas is modified, replicas from: %v to -> %v", + workloadInfo.LogKey, rc.newStatus.ObservedWorkloadReplicas, workloadInfo.Replicas) + return control.WorkloadReplicasChanged, workloadInfo, nil + } + + if workloadInfo.IsRollback(rc.newStatus.StableRevision, rc.newStatus.UpdateRevision) { + klog.Warningf("Workload(%v) is rolling back", workloadInfo.LogKey) + return control.WorkloadRollbackInBatch, workloadInfo, nil + } + + if workloadInfo.IsRevisionNotEqual(rc.newStatus.UpdateRevision) { + klog.Warningf("Workload(%v) updateRevision is modified, updateRevision from: %v to -> %v", + workloadInfo.LogKey, rc.newStatus.UpdateRevision, workloadInfo.Status.UpdateRevision) + return control.WorkloadPodTemplateChanged, workloadInfo, nil + } + + return control.WorkloadNormalState, workloadInfo, nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go new file mode 100644 index 00000000..3c33b968 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go @@ -0,0 +1,303 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + + "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa" + deploymentutil "github.com/openkruise/rollouts/pkg/controller/deployment/util" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + "github.com/openkruise/rollouts/pkg/util/patch" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + utilpointer "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realController struct { + *util.WorkloadInfo + client client.Client + pods []*corev1.Pod + key types.NamespacedName + object *apps.Deployment +} + +func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface { + return &realController{ + key: key, + client: cli, + } +} + +func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo { + return rc.WorkloadInfo +} + +func (rc *realController) BuildController() (bluegreenstyle.Interface, error) { + if rc.object != nil { + return rc, nil + } + object := &apps.Deployment{} + if err := rc.client.Get(context.TODO(), rc.key, object); err != nil { + return rc, err + } + rc.object = object + rc.WorkloadInfo = rc.getWorkloadInfo(object) + return rc, nil +} + +func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) { + if rc.pods != nil { + return rc.pods, nil + } + var err error + rc.pods, err = util.ListOwnedPods(rc.client, rc.object) + return rc.pods, err +} + +// Add OriginalDeploymentStrategyAnnotation to workload +func (rc *realController) Initialize(release *v1beta1.BatchRelease) error { + if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) { + return nil + } + // disable the hpa + if err := hpa.DisableHPA(rc.client, rc.object); err != nil { + return err + } + klog.InfoS("Initialize: disable hpa for deployment successfully", "deployment", klog.KObj(rc.object)) + // update the deployment + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error())) + } + control.InitOriginalSetting(&setting, rc.object) + klog.InfoS("Initialize deployment", "deployment", klog.KObj(rc.object), "setting", util.DumpJSON(&setting)) + + patchData := patch.NewDeploymentPatch() + patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting)) + patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef( + release, release.GetObjectKind().GroupVersionKind()))) + // update: MinReadySeconds, ProgressDeadlineSeconds, MaxSurge, MaxUnavailable + patchData.UpdateStrategy(apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 0}, + }, + }) + patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds) + patchData.UpdateProgressDeadlineSeconds(utilpointer.Int32(v1beta1.MaxProgressSeconds)) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error { + if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil { + return errors.NewFatalError(fmt.Errorf("cannot upgrade batch, because deployment %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error())) + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true) + current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true) + + if current >= desired { + klog.Infof("No need to upgrade batch for deployment %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired) + return nil + } + klog.Infof("Ready to upgrade batch for deployment %v: current %d < desired %d", klog.KObj(rc.object), current, desired) + patchData := patch.NewDeploymentPatch() + // different with canary release, bluegreen don't need to set pause in the process of rollout + patchData.UpdatePaused(false) + patchData.UpdateStrategy(apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &ctx.DesiredSurge, + MaxUnavailable: &intstr.IntOrString{}, + }, + }) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +// set pause to false, restore the original setting, delete annotation +func (rc *realController) Finalize(release *v1beta1.BatchRelease) error { + if rc.finalized() { + return nil // No need to finalize again. + } + if release.Spec.ReleasePlan.BatchPartition != nil { + // continuous release (not supported yet) + /* + patchData := patch.NewDeploymentPatch() + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + if err := rc.client.Patch(context.TODO(), d, patchData); err != nil { + return err + } + */ + klog.Warningf("continuous release is not supported yet for bluegreen style release") + return nil + } + + d := util.GetEmptyObjectWithKey(rc.object) + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewFatalError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error())) + } + patchData := patch.NewDeploymentPatch() + // why we need a simple MinReadySeconds-based status machine? (ie. the if-else block) + // It's possible for Finalize to be called multiple times, if error returned is not nil. + // if we do all needed operations in a single code block, like, A->B->C, when C need retry, + // both A and B will be executed as well, however, operations like restoreHPA cost a lot(which calls LIST API) + if rc.object.Spec.MinReadySeconds != setting.MinReadySeconds { + // restore the hpa + if err := hpa.RestoreHPA(rc.client, rc.object); err != nil { + return err + } + // restore the original setting + patchData.UpdatePaused(false) + patchData.UpdateMinReadySeconds(setting.MinReadySeconds) + patchData.UpdateProgressDeadlineSeconds(setting.ProgressDeadlineSeconds) + patchData.UpdateMaxSurge(setting.MaxSurge) + patchData.UpdateMaxUnavailable(setting.MaxUnavailable) + if err := rc.client.Patch(context.TODO(), d, patchData); err != nil { + return err + } + // we should return an error to trigger re-enqueue, so that we can go to the next if-else branch in the next reconcile + return errors.NewBenignError(fmt.Errorf("deployment bluegreen: we should wait all pods updated and available")) + } else { + klog.InfoS("Finalize: deployment bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object)) + // wait all pods updated and ready + if err := waitAllUpdatedAndReady(d.(*apps.Deployment)); err != nil { + return errors.NewBenignError(err) + } + klog.InfoS("Finalize: deployment is ready to resume, restore the original setting", "deployment", klog.KObj(rc.object)) + // restore label and annotation + patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation) + patchData.DeleteLabel(v1alpha1.DeploymentStableRevisionLabel) + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + return rc.client.Patch(context.TODO(), d, patchData) + } +} + +func (rc *realController) finalized() bool { + if rc.object == nil || rc.object.DeletionTimestamp != nil { + return true + } + if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 { + return true + } + return false +} + +func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) { + currentBatch := release.Status.CanaryStatus.CurrentBatch + desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas + PlannedUpdatedReplicas := deploymentutil.NewRSReplicasLimit(desiredSurge, rc.object) + currentSurge := intstr.FromInt(0) + if rc.object.Spec.Strategy.RollingUpdate != nil && rc.object.Spec.Strategy.RollingUpdate.MaxSurge != nil { + currentSurge = *rc.object.Spec.Strategy.RollingUpdate.MaxSurge + if currentSurge == intstr.FromInt(1) { + // currentSurge == intstr.FromInt(1) means that currentSurge is the initial value + // if the value is indeed set by user, setting it to 0 still does no harm + currentSurge = intstr.FromInt(0) + } + } + return &batchcontext.BatchContext{ + Pods: rc.pods, + RolloutID: release.Spec.ReleasePlan.RolloutID, + CurrentBatch: currentBatch, + CurrentSurge: currentSurge, + DesiredSurge: desiredSurge, + UpdateRevision: release.Status.UpdateRevision, + + Replicas: rc.Replicas, + UpdatedReplicas: rc.Status.UpdatedReplicas, + UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas, + PlannedUpdatedReplicas: PlannedUpdatedReplicas, + DesiredUpdatedReplicas: PlannedUpdatedReplicas, + }, nil +} + +func (rc *realController) getWorkloadInfo(d *apps.Deployment) *util.WorkloadInfo { + workloadInfo := util.ParseWorkload(d) + workloadInfo.Status.UpdatedReadyReplicas = 0 + if res, err := rc.getUpdatedReadyReplicas(d); err == nil { + workloadInfo.Status.UpdatedReadyReplicas = res + } + workloadInfo.Status.StableRevision = d.Labels[v1alpha1.DeploymentStableRevisionLabel] + return workloadInfo +} + +func (rc *realController) getUpdatedReadyReplicas(d *apps.Deployment) (int32, error) { + rss := &apps.ReplicaSetList{} + listOpts := []client.ListOption{ + client.InNamespace(d.Namespace), + client.MatchingLabels(d.Spec.Selector.MatchLabels), + client.UnsafeDisableDeepCopy, + } + if err := rc.client.List(context.TODO(), rss, listOpts...); err != nil { + klog.Warningf("getWorkloadInfo failed, because"+"%s", err.Error()) + return -1, err + } + allRSs := rss.Items + // select rs owner by current deployment + ownedRSs := make([]*apps.ReplicaSet, 0) + for i := range allRSs { + rs := &allRSs[i] + if !rs.DeletionTimestamp.IsZero() { + continue + } + + if metav1.IsControlledBy(rs, d) { + ownedRSs = append(ownedRSs, rs) + } + } + newRS := deploymentutil.FindNewReplicaSet(d, ownedRSs) + updatedReadyReplicas := int32(0) + // if newRS is nil, it means the replicaset hasn't been created (because the deployment is paused) + // therefore we can return 0 directly + if newRS != nil { + updatedReadyReplicas = newRS.Status.ReadyReplicas + } + return updatedReadyReplicas, nil +} + +func waitAllUpdatedAndReady(deployment *apps.Deployment) error { + if deployment.Spec.Paused { + return fmt.Errorf("deployment should not be paused") + } + + // ALL pods updated AND ready + if deployment.Status.ReadyReplicas != deployment.Status.UpdatedReplicas { + return fmt.Errorf("all ready replicas should be updated, and all updated replicas should be ready") + } + + availableReplicas := deployment.Status.AvailableReplicas + allowedUnavailable := util.DeploymentMaxUnavailable(deployment) + if allowedUnavailable+availableReplicas < deployment.Status.Replicas { + return fmt.Errorf("ready replicas should satisfy maxUnavailable") + } + return nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go new file mode 100644 index 00000000..e076c101 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go @@ -0,0 +1,541 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + rolloutapi "github.com/openkruise/rollouts/api" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() + + deploymentKey = types.NamespacedName{ + Name: "deployment", + Namespace: "default", + } + + deploymentDemo = &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentKey.Name, + Namespace: deploymentKey.Namespace, + Generation: 1, + Labels: map[string]string{ + "app": "busybox", + }, + Annotations: map[string]string{ + "type": "unit-test", + }, + }, + Spec: apps.DeploymentSpec{ + Paused: true, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Replicas: pointer.Int32(10), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "busybox", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox:latest", + }, + }, + }, + }, + }, + Status: apps.DeploymentStatus{ + Replicas: 10, + UpdatedReplicas: 0, + ReadyReplicas: 10, + AvailableReplicas: 10, + CollisionCount: pointer.Int32Ptr(1), + ObservedGeneration: 1, + }, + } + + deploymentDemo2 = &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment", + Namespace: "default", + UID: types.UID("87076677"), + Generation: 2, + Labels: map[string]string{ + "app": "busybox", + apps.DefaultDeploymentUniqueLabelKey: "update-pod-hash", + }, + }, + Spec: apps.DeploymentSpec{ + Replicas: pointer.Int32Ptr(10), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(0)}, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: containers("v2"), + }, + }, + }, + Status: apps.DeploymentStatus{ + Replicas: 10, + ReadyReplicas: 10, + UpdatedReplicas: 0, + AvailableReplicas: 10, + }, + } + + releaseDemo = &v1beta1.BatchRelease{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rollouts.kruise.io/v1alpha1", + Kind: "BatchRelease", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "release", + Namespace: deploymentKey.Namespace, + UID: uuid.NewUUID(), + }, + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.FromString("10%"), + }, + { + CanaryReplicas: intstr.FromString("50%"), + }, + { + CanaryReplicas: intstr.FromString("100%"), + }, + }, + }, + WorkloadRef: v1beta1.ObjectRef{ + APIVersion: deploymentDemo.APIVersion, + Kind: deploymentDemo.Kind, + Name: deploymentDemo.Name, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + }, + } +) + +func init() { + apps.AddToScheme(scheme) + rolloutapi.AddToScheme(scheme) + kruiseappsv1alpha1.AddToScheme(scheme) +} + +func TestCalculateBatchContext(t *testing.T) { + RegisterFailHandler(Fail) + cases := map[string]struct { + workload func() []client.Object + release func() *v1beta1.BatchRelease + result *batchcontext.BatchContext + }{ + "noraml case": { + workload: func() []client.Object { + deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + deployment.Status = apps.DeploymentStatus{ + Replicas: 15, + UpdatedReplicas: 5, + AvailableReplicas: 12, + ReadyReplicas: 12, + } + // current partition, ie. maxSurge + deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "50%"} + deployment.Spec.Replicas = pointer.Int32Ptr(10) + newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + newRss.Status.ReadyReplicas = 2 + return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + }, + + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + }, + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}, + }, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "version-2", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + UpdateRevision: "version-2", + DesiredSurge: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}, + CurrentSurge: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + Replicas: 10, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 2, + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + "maxSurge=99%, replicas=5": { + workload: func() []client.Object { + deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + deployment.Status = apps.DeploymentStatus{ + Replicas: 9, + UpdatedReplicas: 4, + AvailableReplicas: 9, + ReadyReplicas: 9, + } + deployment.Spec.Replicas = pointer.Int32Ptr(5) + // current partition, ie. maxSurge + deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "90%"} + newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + newRss.Status.ReadyReplicas = 4 + return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "90%"}, + }, + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "99%"}, + }, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "version-2", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + UpdateRevision: "version-2", + DesiredSurge: intstr.FromString("99%"), + CurrentSurge: intstr.FromString("90%"), + Replicas: 5, + UpdatedReplicas: 4, + UpdatedReadyReplicas: 4, + PlannedUpdatedReplicas: 4, + DesiredUpdatedReplicas: 4, + }, + }, + + // test case for continuous release + // "maxSurge=100%, but it is initialized value": { + // workload: func() []client.Object { + // deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + // deployment.Status = apps.DeploymentStatus{ + // Replicas: 10, + // UpdatedReplicas: 0, + // AvailableReplicas: 10, + // ReadyReplicas: 10, + // } + // // current partition, ie. maxSurge + // deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"} + // newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + // newRss.Status.ReadyReplicas = 0 + // return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + // }, + // release: func() *v1beta1.BatchRelease { + // r := &v1beta1.BatchRelease{ + // Spec: v1beta1.BatchReleaseSpec{ + // ReleasePlan: v1beta1.ReleasePlan{ + // FailureThreshold: &percent, + // FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + // Batches: []v1beta1.ReleaseBatch{ + // { + // CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + // }, + // }, + // }, + // }, + // Status: v1beta1.BatchReleaseStatus{ + // CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + // CurrentBatch: 0, + // }, + // UpdateRevision: "version-2", + // }, + // } + // return r + // }, + // result: &batchcontext.BatchContext{ + // CurrentBatch: 0, + // UpdateRevision: "version-2", + // DesiredPartition: intstr.FromString("50%"), + // FailureThreshold: &percent, + // CurrentPartition: intstr.FromString("0%"), // mainly check this + // Replicas: 10, + // UpdatedReplicas: 0, + // UpdatedReadyReplicas: 0, + // PlannedUpdatedReplicas: 5, + // DesiredUpdatedReplicas: 5, + // }, + // }, + } + + for name, cs := range cases { + t.Run(name, func(t *testing.T) { + cliBuilder := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cs.workload()...) + cli := cliBuilder.Build() + control := realController{ + client: cli, + key: deploymentKey, + } + _, err := control.BuildController() + Expect(err).NotTo(HaveOccurred()) + got, err := control.CalculateBatchContext(cs.release()) + Expect(err).NotTo(HaveOccurred()) + fmt.Printf("expect %s, but got %s", cs.result.Log(), got.Log()) + Expect(got.Log()).Should(Equal(cs.result.Log())) + }) + } +} + +func TestRealController(t *testing.T) { + RegisterFailHandler(Fail) + + release := releaseDemo.DeepCopy() + clone := deploymentDemo.DeepCopy() + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build() + // build new controller + c := NewController(cli, deploymentKey, clone.GroupVersionKind()).(*realController) + controller, err := c.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize + err = controller.Initialize(release) + Expect(err).NotTo(HaveOccurred()) + fetch := &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // check strategy + Expect(fetch.Spec.Paused).Should(BeTrue()) + Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + // check annotations + Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release))) + fmt.Println(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + MinReadySeconds: 0, + ProgressDeadlineSeconds: pointer.Int32(600), + }))) + + c.object = fetch // mock + + for { + batchContext, err := controller.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = controller.UpgradeBatch(batchContext) + fetch := &apps.Deployment{} + // mock + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + c.object = fetch + if err == nil { + break + } + } + fetch = &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // currentBatch is 1, which means br is in the second batch, maxSurge is 50% + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + release.Spec.ReleasePlan.BatchPartition = nil + err = controller.Finalize(release) + Expect(errors.IsBenign(err)).Should(BeTrue()) + fetch = &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // check workload strategy + Expect(fetch.Spec.Paused).Should(BeFalse()) + Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "20%"})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) +} +func getControlInfo(release *v1beta1.BatchRelease) string { + owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) + return string(owner) +} + +func makeCanaryReplicaSets(d client.Object) client.Object { + deploy := d.(*apps.Deployment) + labels := deploy.Spec.Selector.DeepCopy().MatchLabels + labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(&deploy.Spec.Template, nil) + return &apps.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploy.Name + rand.String(5), + Namespace: deploy.Namespace, + UID: uuid.NewUUID(), + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deploy, deploy.GroupVersionKind()), + }, + }, + Spec: apps.ReplicaSetSpec{ + Replicas: deploy.Spec.Replicas, + Selector: deploy.Spec.Selector.DeepCopy(), + Template: *deploy.Spec.Template.DeepCopy(), + }, + } + +} + +func makeStableReplicaSets(d client.Object) client.Object { + deploy := d.(*apps.Deployment) + stableTemplate := deploy.Spec.Template.DeepCopy() + stableTemplate.Spec.Containers = containers("v1") + labels := deploy.Spec.Selector.DeepCopy().MatchLabels + labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(stableTemplate, nil) + return &apps.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploy.Name + rand.String(5), + Namespace: deploy.Namespace, + UID: uuid.NewUUID(), + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deploy, deploy.GroupVersionKind()), + }, + }, + Spec: apps.ReplicaSetSpec{ + Replicas: deploy.Spec.Replicas, + Selector: deploy.Spec.Selector.DeepCopy(), + Template: *stableTemplate, + }, + } +} + +func containers(version string) []corev1.Container { + return []corev1.Container{ + { + Name: "busybox", + Image: fmt.Sprintf("busybox:%v", version), + }, + } +} + +func getStableWithReady(workload client.Object, version string) client.Object { + switch workload.(type) { + case *apps.Deployment: + deploy := workload.(*apps.Deployment) + d := deploy.DeepCopy() + d.Spec.Paused = true + d.ResourceVersion = strconv.Itoa(rand.Intn(100000000000)) + d.Spec.Template.Spec.Containers = containers(version) + d.Status.ObservedGeneration = deploy.Generation + return d + + case *kruiseappsv1alpha1.CloneSet: + clone := workload.(*kruiseappsv1alpha1.CloneSet) + c := clone.DeepCopy() + c.ResourceVersion = strconv.Itoa(rand.Intn(100000000000)) + c.Spec.UpdateStrategy.Paused = true + c.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"} + c.Spec.Template.Spec.Containers = containers(version) + c.Status.ObservedGeneration = clone.Generation + return c + } + return nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go new file mode 100644 index 00000000..1e71a3a2 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go @@ -0,0 +1,106 @@ +package hpa + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + HPADisableSuffix = "-DisableByRollout" +) + +func DisableHPA(cli client.Client, object client.Object) error { + hpa := findHPAForWorkload(cli, object) + if hpa == nil { + return nil + } + targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef") + if err != nil || !found { + return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error()) + } + ref := targetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if !strings.HasSuffix(name, HPADisableSuffix) { + body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, addSuffix(name)) + if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil { + return fmt.Errorf("failed to disable HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error()) + } + } + return nil +} + +func RestoreHPA(cli client.Client, object client.Object) error { + hpa := findHPAForWorkload(cli, object) + if hpa == nil { + return nil + } + targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef") + if err != nil || !found { + return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error()) + } + ref := targetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if strings.HasSuffix(name, HPADisableSuffix) { + body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, removeSuffix(name)) + if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil { + return fmt.Errorf("failed to restore HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error()) + } + } + return nil +} + +func findHPAForWorkload(cli client.Client, object client.Object) *unstructured.Unstructured { + hpa := findHPA(cli, object, "v2") + if hpa != nil { + return hpa + } + return findHPA(cli, object, "v1") +} + +func findHPA(cli client.Client, object client.Object, version string) *unstructured.Unstructured { + unstructuredList := &unstructured.UnstructuredList{} + hpaGvk := schema.GroupVersionKind{Group: "autoscaling", Kind: "HorizontalPodAutoscaler", Version: version} + unstructuredList.SetGroupVersionKind(hpaGvk) + if err := cli.List(context.TODO(), unstructuredList, &client.ListOptions{Namespace: object.GetNamespace()}); err != nil { + klog.Warningf("Get HPA for workload %v failed, because %s", klog.KObj(object), err.Error()) + return nil + } + klog.Infof("Get %d HPA with %s in namespace %s in total", len(unstructuredList.Items), version, object.GetNamespace()) + for _, item := range unstructuredList.Items { + scaleTargetRef, found, err := unstructured.NestedFieldCopy(item.Object, "spec", "scaleTargetRef") + if err != nil || !found { + continue + } + ref := scaleTargetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if version == object.GetObjectKind().GroupVersionKind().GroupVersion().String() && + kind == object.GetObjectKind().GroupVersionKind().Kind && + removeSuffix(name) == object.GetName() { + return &item + } + } + klog.Infof("No HPA found for workload %v", klog.KObj(object)) + return nil +} + +func addSuffix(HPARefName string) string { + if strings.HasSuffix(HPARefName, HPADisableSuffix) { + return HPARefName + } + return HPARefName + HPADisableSuffix +} + +func removeSuffix(HPARefName string) string { + refName := HPARefName + for strings.HasSuffix(refName, HPADisableSuffix) { + refName = refName[:len(refName)-len(HPADisableSuffix)] + } + return refName +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go new file mode 100644 index 00000000..a2e2f643 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go @@ -0,0 +1,151 @@ +package hpa + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + // "github.com/openkruise/rollouts/api/v1alpha1" + // import hpa v1 +) + +var ( + scheme = runtime.NewScheme() +) + +func TestHPAPackage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "HPA Package Suite") +} + +var _ = Describe("HPA Operations", func() { + var ( + cli client.Client + object *unstructured.Unstructured + ) + + BeforeEach(func() { + object = &unstructured.Unstructured{} + object.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + object.SetNamespace("default") + object.SetName("my-deployment") + + cli = fake.NewClientBuilder().WithScheme(scheme).WithObjects(object).Build() + }) + + Context("when disabling and restoring HPA", func() { + It("should disable and restore HPA successfully", func() { + // Create a fake HPA + hpa := &unstructured.Unstructured{} + hpa.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + hpa.SetNamespace("default") + hpa.SetName("my-hpa") + unstructured.SetNestedField(hpa.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + Expect(cli.Create(context.TODO(), hpa)).To(Succeed()) + + // Disable HPA + DisableHPA(cli, object) + + fetchedHPA := &unstructured.Unstructured{} + fetchedHPA.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + Expect(cli.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "my-hpa", + }, fetchedHPA)).To(Succeed()) + + targetRef, found, err := unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + ref := targetRef.(map[string]interface{}) + Expect(ref["name"]).To(Equal("my-deployment" + HPADisableSuffix)) + + // Restore HPA + RestoreHPA(cli, object) + + Expect(cli.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "my-hpa", + }, fetchedHPA)).To(Succeed()) + + targetRef, found, err = unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + ref = targetRef.(map[string]interface{}) + Expect(ref["name"]).To(Equal("my-deployment")) + }) + }) + + Context("when finding HPA for workload", func() { + It("should find the correct HPA", func() { + // Create a fake HPA v2 + hpaV2 := &unstructured.Unstructured{} + hpaV2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + hpaV2.SetNamespace("default") + hpaV2.SetName("my-hpa-v2") + unstructured.SetNestedField(hpaV2.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + // Create a fake HPA v1 + hpaV1 := &unstructured.Unstructured{} + hpaV1.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v1", + Kind: "HorizontalPodAutoscaler", + }) + hpaV1.SetNamespace("default") + hpaV1.SetName("my-hpa-v1") + unstructured.SetNestedField(hpaV1.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + Expect(cli.Create(context.TODO(), hpaV2)).To(Succeed()) + Expect(cli.Create(context.TODO(), hpaV1)).To(Succeed()) + + // Test finding HPA for workload + foundHPA := findHPAForWorkload(cli, object) + Expect(foundHPA).NotTo(BeNil()) + Expect(foundHPA.GetName()).To(Equal("my-hpa-v2")) + + // Delete v2 HPA and check if v1 is found + Expect(cli.Delete(context.TODO(), hpaV2)).To(Succeed()) + foundHPA = findHPAForWorkload(cli, object) + Expect(foundHPA).NotTo(BeNil()) + Expect(foundHPA.GetName()).To(Equal("my-hpa-v1")) + }) + }) +}) diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/interface.go b/pkg/controller/batchrelease/control/bluegreenstyle/interface.go new file mode 100644 index 00000000..7ae602f5 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/interface.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bluegreenstyle + +import ( + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/util" + corev1 "k8s.io/api/core/v1" +) + +type Interface interface { + // BuildController will get workload object and parse workload info, + // and return a initialized controller for workload. + BuildController() (Interface, error) + // GetWorkloadInfo return workload information. + GetWorkloadInfo() *util.WorkloadInfo + // ListOwnedPods fetch the pods owned by the workload. + // Note that we should list pod only if we really need it. + // reserved for future use + ListOwnedPods() ([]*corev1.Pod, error) + // CalculateBatchContext calculate current batch context + // according to release plan and current status of workload. + CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) + // Initialize do something before rolling out, for example: + // - pause the workload + // - update: MinReadySeconds, ProgressDeadlineSeconds, Strategy + Initialize(release *v1beta1.BatchRelease) error + // UpgradeBatch upgrade workload according current batch context. + UpgradeBatch(ctx *batchcontext.BatchContext) error + // Finalize do something after rolling out, for example: + // - set pause to false, restore the original setting, delete annotation + Finalize(release *v1beta1.BatchRelease) error +} diff --git a/pkg/controller/batchrelease/control/util.go b/pkg/controller/batchrelease/control/util.go index 0c0e36bd..b9928fce 100644 --- a/pkg/controller/batchrelease/control/util.go +++ b/pkg/controller/batchrelease/control/util.go @@ -21,8 +21,10 @@ import ( "fmt" "strings" + appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" "github.com/openkruise/rollouts/api/v1beta1" "github.com/openkruise/rollouts/pkg/util" + apps "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -63,6 +65,42 @@ func IsControlledByBatchRelease(release *v1beta1.BatchRelease, object client.Obj return false } +// only when IsReadyForBlueGreenRelease returns true, can we go on to the next batch +func ValidateReadyForBlueGreenRelease(object client.Object) error { + // check the annotation + if object.GetAnnotations()[util.BatchReleaseControlAnnotation] == "" { + return fmt.Errorf("workload has no control info annotation") + } + switch o := object.(type) { + case *apps.Deployment: + // must be RollingUpdate + if len(o.Spec.Strategy.Type) > 0 && o.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType { + return fmt.Errorf("deployment strategy type is not RollingUpdate") + } + if o.Spec.Strategy.RollingUpdate == nil { + return fmt.Errorf("deployment strategy rollingUpdate is nil") + } + // MinReadySeconds and ProgressDeadlineSeconds must be set + if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds || o.Spec.ProgressDeadlineSeconds == nil || *o.Spec.ProgressDeadlineSeconds != v1beta1.MaxProgressSeconds { + return fmt.Errorf("deployment strategy minReadySeconds or progressDeadlineSeconds is not MaxReadySeconds or MaxProgressSeconds") + } + + case *appsv1alpha1.CloneSet: + // must be ReCreate + if len(o.Spec.UpdateStrategy.Type) > 0 && o.Spec.UpdateStrategy.Type != appsv1alpha1.RecreateCloneSetUpdateStrategyType { + return fmt.Errorf("cloneSet strategy type is not ReCreate") + } + // MinReadySeconds and ProgressDeadlineSeconds must be set + if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds { + return fmt.Errorf("cloneSet strategy minReadySeconds is not MaxReadySeconds") + } + + default: + panic("unsupported workload type to ValidateReadyForBlueGreenRelease function") + } + return nil +} + // BuildReleaseControlInfo return a NewControllerRef of release with escaped `"`. func BuildReleaseControlInfo(release *v1beta1.BatchRelease) string { owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) @@ -112,3 +150,101 @@ func IsCurrentMoreThanOrEqualToDesired(current, desired intstr.IntOrString) bool desiredNum, _ := intstr.GetScaledValueFromIntOrPercent(&desired, 10000000, true) return currentNum >= desiredNum } + +// GetDeploymentStrategy decode the strategy object for advanced deployment +// from the annotation "rollouts.kruise.io/original-deployment-strategy" +func GetOriginalSetting(object client.Object) (OriginalDeploymentStrategy, error) { + setting := OriginalDeploymentStrategy{} + settingStr := object.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation] + if settingStr == "" { + return setting, nil + } + err := json.Unmarshal([]byte(settingStr), &setting) + return setting, err +} + +// InitOriginalSetting will update the original setting based on the workload object +// note: update the maxSurge and maxUnavailable only when MaxSurge and MaxUnavailable are nil, +// which means they should keep unchanged in continuous release (though continuous release isn't supported for now) +func InitOriginalSetting(setting *OriginalDeploymentStrategy, object client.Object) { + var changeLogs []string + switch o := object.(type) { + case *apps.Deployment: + if setting.MaxSurge == nil { + setting.MaxSurge = getMaxSurgeFromDeployment(o.Spec.Strategy.RollingUpdate) + changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String())) + } + if setting.MaxUnavailable == nil { + setting.MaxUnavailable = getMaxUnavailableFromDeployment(o.Spec.Strategy.RollingUpdate) + changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String())) + } + if setting.ProgressDeadlineSeconds == nil { + setting.ProgressDeadlineSeconds = getIntPtrOrDefault(o.Spec.ProgressDeadlineSeconds, 600) + changeLogs = append(changeLogs, fmt.Sprintf("progressDeadlineSeconds changed from nil to %d", *setting.ProgressDeadlineSeconds)) + } + if setting.MinReadySeconds == 0 { + setting.MinReadySeconds = o.Spec.MinReadySeconds + changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds)) + } + case *appsv1alpha1.CloneSet: + if setting.MaxSurge == nil { + setting.MaxSurge = getMaxSurgeFromCloneset(o.Spec.UpdateStrategy) + changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String())) + } + if setting.MaxUnavailable == nil { + setting.MaxUnavailable = getMaxUnavailableFromCloneset(o.Spec.UpdateStrategy) + changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String())) + } + if setting.ProgressDeadlineSeconds == nil { + // cloneset is planned to support progressDeadlineSeconds field + } + if setting.MinReadySeconds == 0 { + setting.MinReadySeconds = o.Spec.MinReadySeconds + changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds)) + } + default: + panic(fmt.Errorf("unsupported object type %T", o)) + } + if len(changeLogs) == 0 { + klog.InfoS("InitOriginalSetting: original setting unchanged", "object", object.GetName()) + return + } + klog.InfoS("InitOriginalSetting: original setting updated", "object", object.GetName(), "changes", strings.Join(changeLogs, ";")) +} + +func getMaxSurgeFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString { + defaultMaxSurge := intstr.FromString("25%") + if ru == nil || ru.MaxSurge == nil { + return &defaultMaxSurge + } + return ru.MaxSurge +} +func getMaxUnavailableFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString { + defaultMaxAnavailale := intstr.FromString("25%") + if ru == nil || ru.MaxUnavailable == nil { + return &defaultMaxAnavailale + } + return ru.MaxUnavailable +} + +func getMaxSurgeFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString { + defaultMaxSurge := intstr.FromString("0%") + if us.MaxSurge == nil { + return &defaultMaxSurge + } + return us.MaxSurge +} +func getMaxUnavailableFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString { + defaultMaxUnavailable := intstr.FromString("20%") + if us.MaxUnavailable == nil { + return &defaultMaxUnavailable + } + return us.MaxUnavailable +} + +func getIntPtrOrDefault(ptr *int32, defaultVal int32) *int32 { + if ptr == nil { + return &defaultVal + } + return ptr +} diff --git a/pkg/controller/rollout/rollout_bluegreen.go b/pkg/controller/rollout/rollout_bluegreen.go index 562172f0..7cfeaa82 100644 --- a/pkg/controller/rollout/rollout_bluegreen.go +++ b/pkg/controller/rollout/rollout_bluegreen.go @@ -392,7 +392,10 @@ func (m *blueGreenReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, blu // TODO: optimize the logic to better understand blueGreenStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d", br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1) - + // br.Status.Message records messages that help users to understand what is going wrong + if len(br.Status.Message) > 0 { + blueGreenStatus.Message += fmt.Sprintf(", %s", br.Status.Message) + } // sync rolloutId from blueGreenStatus to BatchRelease if blueGreenStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID { body := fmt.Sprintf(`{"spec":{"releasePlan":{"rolloutID":"%s"}}}`, blueGreenStatus.ObservedRolloutID) diff --git a/pkg/controller/rollout/rollout_canary.go b/pkg/controller/rollout/rollout_canary.go index f6d3c8cd..cc64c0ef 100644 --- a/pkg/controller/rollout/rollout_canary.go +++ b/pkg/controller/rollout/rollout_canary.go @@ -457,6 +457,10 @@ func (m *canaryReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, canary // TODO: optimize the logic to better understand canaryStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d", br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1) + // br.Status.Message records messages that help users to understand what is going wrong + if len(br.Status.Message) > 0 { + canaryStatus.Message += fmt.Sprintf(", %s", br.Status.Message) + } // sync rolloutId from canaryStatus to BatchRelease if canaryStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID { diff --git a/pkg/util/condition.go b/pkg/util/condition.go index b1ed7075..1cca8c32 100644 --- a/pkg/util/condition.go +++ b/pkg/util/condition.go @@ -45,6 +45,16 @@ func GetRolloutCondition(status v1beta1.RolloutStatus, condType v1beta1.RolloutC return nil } +func GetBatchReleaseCondition(status v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) *v1beta1.RolloutCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + // SetRolloutCondition updates the rollout to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason, then we are not going to update // by returning false. Returns true if the condition was updated @@ -63,6 +73,21 @@ func SetRolloutCondition(status *v1beta1.RolloutStatus, condition v1beta1.Rollou return true } +func SetBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condition v1beta1.RolloutCondition) bool { + currentCond := GetBatchReleaseCondition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && + currentCond.Message == condition.Message { + return false + } + // Do not update lastTransitionTime if the status of the condition doesn't change. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) + return true +} + // filterOutCondition returns a new slice of rollout conditions without conditions with the provided type. func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.RolloutConditionType) []v1beta1.RolloutCondition { var newConditions []v1beta1.RolloutCondition @@ -78,3 +103,7 @@ func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1. func RemoveRolloutCondition(status *v1beta1.RolloutStatus, condType v1beta1.RolloutConditionType) { status.Conditions = filterOutCondition(status.Conditions, condType) } + +func RemoveBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) { + status.Conditions = filterOutCondition(status.Conditions, condType) +} diff --git a/pkg/util/errors/types.go b/pkg/util/errors/types.go new file mode 100644 index 00000000..59f2f6a6 --- /dev/null +++ b/pkg/util/errors/types.go @@ -0,0 +1,70 @@ +package errors + +import ( + "errors" + "fmt" +) + +// BenignError represents a benign error that can be handled or ignored by the caller. +// It encapsulates information that is non-critical and does not require immediate attention. +type BenignError struct { + Err error +} + +// Error implements the error interface for BenignError. +// It returns the error message of the encapsulated error or a default message. +func (e *BenignError) Error() string { + if e.Err != nil { + return fmt.Sprintf("[benign]: %s", e.Err.Error()) + } + return "benign error" +} + +// NewBenignError creates a new instance of BenignError. +// If the provided err is nil, it signifies a benign condition without a specific error message. +func NewBenignError(err error) *BenignError { + return &BenignError{Err: err} +} + +func IsBenign(err error) bool { + var benignErr *BenignError + return errors.As(err, &benignErr) +} + +func AsBenign(err error, target **BenignError) bool { + return errors.As(err, target) +} + +// FatalError represents a fatal error that requires special handling. +// Such errors are critical and may necessitate logging, alerts, or even program termination. +type FatalError struct { + Err error +} + +// Error implements the error interface for FatalError. +// It returns the error message of the encapsulated error or a default message. +func (e *FatalError) Error() string { + if e.Err != nil { + return e.Err.Error() + } + return "fatal error" +} + +// NewFatalError creates a new instance of FatalError. +// It encapsulates the provided error, marking it as critical. +func NewFatalError(err error) *FatalError { + return &FatalError{Err: err} +} + +// IsFatal checks whether the provided error is of type FatalError. +// It returns true if the error is a FatalError or wraps a FatalError, false otherwise. +func IsFatal(err error) bool { + var fatalErr *FatalError + return AsFatal(err, &fatalErr) +} + +// AsFatal attempts to cast the provided error to a FatalError. +// It returns true if the casting is successful, allowing the caller to handle it accordingly. +func AsFatal(err error, target **FatalError) bool { + return errors.As(err, target) +} diff --git a/pkg/util/patch/patch_utils.go b/pkg/util/patch/patch_utils.go index 42fd8a6b..b38eed91 100644 --- a/pkg/util/patch/patch_utils.go +++ b/pkg/util/patch/patch_utils.go @@ -23,6 +23,8 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -222,3 +224,156 @@ func (s *DeploymentPatch) UpdatePaused(paused bool) *DeploymentPatch { } return s } + +func (s *DeploymentPatch) UpdateMinReadySeconds(seconds int32) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["minReadySeconds"] = seconds + } + return s +} + +func (s *DeploymentPatch) UpdateProgressDeadlineSeconds(seconds *int32) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["progressDeadlineSeconds"] = seconds + } + return s +} + +func (s *DeploymentPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["strategy"]; !ok { + spec["strategy"] = make(map[string]interface{}) + } + strategy := spec["strategy"].(map[string]interface{}) + if _, ok := strategy["rollingUpdate"]; !ok { + strategy["rollingUpdate"] = make(map[string]interface{}) + } + rollingUpdate := strategy["rollingUpdate"].(map[string]interface{}) + rollingUpdate["maxSurge"] = maxSurge + } + return s +} + +func (s *DeploymentPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["strategy"]; !ok { + spec["strategy"] = make(map[string]interface{}) + } + strategy := spec["strategy"].(map[string]interface{}) + if _, ok := strategy["rollingUpdate"]; !ok { + strategy["rollingUpdate"] = make(map[string]interface{}) + } + rollingUpdate := strategy["rollingUpdate"].(map[string]interface{}) + rollingUpdate["maxUnavailable"] = maxUnavailable + } + return s +} + +type ClonesetPatch struct { + CommonPatch +} + +func NewClonesetPatch() *ClonesetPatch { + return &ClonesetPatch{CommonPatch{PatchType: types.MergePatchType, PatchData: make(map[string]interface{})}} +} + +func (s *ClonesetPatch) UpdateMinReadySeconds(seconds int32) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMinReadySeconds to %v", seconds) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["minReadySeconds"] = seconds + } + return s +} + +func (s *ClonesetPatch) UpdatePaused(paused bool) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updatePaused to %v", paused) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["paused"] = paused + } + return s +} + +func (s *ClonesetPatch) UpdatePartiton(partition *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updatePartition to %v", partition) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["partition"] = partition + } + return s +} + +func (s *ClonesetPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMaxSurge to %v", maxSurge) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["maxSurge"] = maxSurge + } + return s +} + +func (s *ClonesetPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMaxUnavailable to %v", maxUnavailable) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["maxUnavailable"] = maxUnavailable + } + return s +} diff --git a/pkg/util/workloads_utils.go b/pkg/util/workloads_utils.go index 86bc8659..219c22e8 100644 --- a/pkg/util/workloads_utils.go +++ b/pkg/util/workloads_utils.go @@ -154,7 +154,7 @@ func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string { func SafeEncodeString(s string) string { r := make([]byte, len(s)) for i, b := range []rune(s) { - r[i] = alphanums[(int(b) % len(alphanums))] + r[i] = alphanums[int(b)%len(alphanums)] } return string(r) } @@ -329,11 +329,11 @@ func IsWorkloadType(object client.Object, t WorkloadType) bool { // DeploymentMaxUnavailable returns the maximum unavailable pods a rolling deployment can take. func DeploymentMaxUnavailable(deployment *apps.Deployment) int32 { strategy := deployment.Spec.Strategy - if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *(deployment.Spec.Replicas) == 0 { + if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *deployment.Spec.Replicas == 0 { return int32(0) } // Error caught by validation - _, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + _, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *deployment.Spec.Replicas) if maxUnavailable > *deployment.Spec.Replicas { return *deployment.Spec.Replicas } diff --git a/test/e2e/rollout_v1beta1_test.go b/test/e2e/rollout_v1beta1_test.go index cd2f67c4..a3c3e3e1 100644 --- a/test/e2e/rollout_v1beta1_test.go +++ b/test/e2e/rollout_v1beta1_test.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "reflect" "sort" "strings" "time" @@ -28,8 +29,11 @@ import ( appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1" "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" "github.com/openkruise/rollouts/pkg/util" apps "k8s.io/api/apps/v1" + scalingV1 "k8s.io/api/autoscaling/v1" + scalingV2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -41,11 +45,6 @@ import ( "k8s.io/klog/v2" utilpointer "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - // "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - // "k8s.io/apimachinery/pkg/util/intstr" - // gatewayv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - // "github.com/openkruise/rollouts/api/v1alpha1" - // "k8s.io/apimachinery/pkg/api/errors" ) var _ = SIGDescribe("Rollout v1beta1", func() { @@ -113,6 +112,25 @@ var _ = SIGDescribe("Rollout v1beta1", func() { return clone } + // continuous release is not allowed for now, therefor we expect failure when updating + UpdateDeploymentFailed := func(object *apps.Deployment) *apps.Deployment { + var clone *apps.Deployment + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &apps.Deployment{} + err := GetObject(object.Name, clone) + if err != nil { + return err + } + clone.Spec.Replicas = utilpointer.Int32(*object.Spec.Replicas) + clone.Spec.Template = *object.Spec.Template.DeepCopy() + clone.Labels = mergeMap(clone.Labels, object.Labels) + clone.Annotations = mergeMap(clone.Annotations, object.Annotations) + clone.Spec.Paused = object.Spec.Paused + return k8sClient.Update(context.TODO(), clone) + })).To(HaveOccurred()) + + return clone + } UpdateCloneSet := func(object *appsv1alpha1.CloneSet) *appsv1alpha1.CloneSet { var clone *appsv1alpha1.CloneSet @@ -132,6 +150,25 @@ var _ = SIGDescribe("Rollout v1beta1", func() { return clone } + // continuous release is not allowed for now, therefor we expect failure when updating + UpdateCloneSetFail := func(object *appsv1alpha1.CloneSet) *appsv1alpha1.CloneSet { + var clone *appsv1alpha1.CloneSet + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &appsv1alpha1.CloneSet{} + err := GetObject(object.Name, clone) + if err != nil { + return err + } + clone.Spec.Replicas = utilpointer.Int32(*object.Spec.Replicas) + clone.Spec.Template = *object.Spec.Template.DeepCopy() + clone.Labels = mergeMap(clone.Labels, object.Labels) + clone.Annotations = mergeMap(clone.Annotations, object.Annotations) + return k8sClient.Update(context.TODO(), clone) + })).To(HaveOccurred()) + + return clone + } + // UpdateDaemonSet := func(object *appsv1alpha1.DaemonSet) *appsv1alpha1.DaemonSet { // var daemon *appsv1alpha1.DaemonSet // Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -202,51 +239,61 @@ var _ = SIGDescribe("Rollout v1beta1", func() { return clone } - ResumeRolloutCanary := func(name string) { + UpdateRolloutFail := func(object *v1beta1.Rollout) *v1beta1.Rollout { + var clone *v1beta1.Rollout + // still ignore the conflict error + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &v1beta1.Rollout{} + err := GetObject(object.Name, clone) + if err != nil { + return err + } + clone.Spec = *object.Spec.DeepCopy() + return k8sClient.Update(context.TODO(), clone) + })).To(HaveOccurred()) + return clone + } + + ResumeRollout := func(name string) { + clone := &v1beta1.Rollout{} + Expect(GetObject(name, clone)).NotTo(HaveOccurred()) + currentIndex := clone.Status.GetSubStatus().CurrentStepIndex Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus.CurrentStepState != v1beta1.CanaryStepStatePaused { + if clone.Status.GetSubStatus().CurrentStepIndex == currentIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStatePaused { + klog.Info("patch to stepReady") + body := fmt.Sprintf(`{"status":{"canaryStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) + if clone.Spec.Strategy.IsBlueGreenRelease() { + body = fmt.Sprintf(`{"status":{"blueGreenStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) + } + Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) + return false + } else { fmt.Println("resume rollout success, and CurrentStepState", util.DumpJSON(clone.Status)) return true } - - body := fmt.Sprintf(`{"status":{"canaryStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) - Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) - return false }, 10*time.Second, time.Millisecond*500).Should(BeTrue()) } - RolloutJumpCanaryStep := func(name string, target int) { + JumpRolloutStep := func(name string, target int) { Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus.CurrentStepState != v1beta1.CanaryStepStatePaused { + if clone.Status.GetSubStatus().CurrentStepState != v1beta1.CanaryStepStatePaused { fmt.Println("Jump successfully, and current status ", util.DumpJSON(clone.Status)) return true } body := fmt.Sprintf(`{"status":{"canaryStatus":{"nextStepIndex":%d}}}`, target) + if clone.Spec.Strategy.IsBlueGreenRelease() { + body = fmt.Sprintf(`{"status":{"blueGreenStatus":{"nextStepIndex":%d}}}`, target) + } Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) return false }, 10*time.Second, time.Second).Should(BeTrue()) } - // RolloutJumpBlueGreenStep := func(name string, target int) { - // Eventually(func() bool { - // clone := &v1alpha1.Rollout{} - // Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - // if clone.Status.CanaryStatus.CurrentStepState !=v1beta1.CanaryStepStatePaused { - // fmt.Println("Jump successfully, and current status ", util.DumpJSON(clone.Status)) - // return true - // } - - // body := fmt.Sprintf(`{"status":{"blueGreenStatus":{"nextStepIndex":"%d"}}}`, target) - // Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) - // return false - // }, 10*time.Second, time.Second).Should(BeTrue()) - // } - WaitDeploymentAllPodsReady := func(deployment *apps.Deployment) { Eventually(func() bool { clone := &apps.Deployment{} @@ -293,7 +340,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // }, 5*time.Minute, time.Second).Should(BeTrue()) // } - // WaitDeploymentReplicas := func(deployment *apps.Deployment) { + // WaitDeploymentCanaryReplicas := func(deployment *apps.Deployment) { // Eventually(func() bool { // clone := &apps.Deployment{} // Expect(GetObject(deployment.Name, clone)).NotTo(HaveOccurred()) @@ -302,7 +349,42 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // }, 10*time.Minute, time.Second).Should(BeTrue()) // } - WaitRolloutCanaryStepPaused := func(name string, stepIndex int32) { + WaitDeploymentBlueGreenReplicas := func(deployment *apps.Deployment) { + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.ObservedGeneration == clone.Generation && + *clone.Spec.Replicas == clone.Status.AvailableReplicas && clone.Status.ReadyReplicas == clone.Status.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + } + + // WaitClonesetBlueGreenReplicas := func(cloneset *appsv1alpha1.CloneSet) { + // Eventually(func() bool { + // clone := &appsv1alpha1.CloneSet{} + // Expect(GetObject(cloneset.Name, clone)).NotTo(HaveOccurred()) + // return clone.Status.ObservedGeneration == clone.Generation && + // *clone.Spec.Replicas == clone.Status.AvailableReplicas && clone.Status.ReadyReplicas == clone.Status.Replicas + // }, 10*time.Minute, time.Second).Should(BeTrue()) + // } + + // WaitRolloutStepUpgrade := func(name string, stepIndex int32) { + // start := time.Now() + // Eventually(func() bool { + // if start.Add(time.Minute * 5).Before(time.Now()) { + // DumpAllResources() + // Expect(true).Should(BeFalse()) + // } + // clone := &v1beta1.Rollout{} + // Expect(GetObject(name, clone)).NotTo(HaveOccurred()) + // if clone.Status.GetSubStatus() == nil { + // return false + // } + // klog.Infof("current step:%v target step:%v current step state %v", clone.Status.GetSubStatus().CurrentStepIndex, stepIndex, clone.Status.GetSubStatus().CurrentStepState) + // return clone.Status.GetSubStatus().CurrentStepIndex == stepIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStateUpgrade + // }, 20*time.Minute, time.Second).Should(BeTrue()) + // } + + WaitRolloutStepPaused := func(name string, stepIndex int32) { start := time.Now() Eventually(func() bool { if start.Add(time.Minute * 5).Before(time.Now()) { @@ -311,11 +393,11 @@ var _ = SIGDescribe("Rollout v1beta1", func() { } clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus == nil { + if clone.Status.GetSubStatus() == nil { return false } - klog.Infof("current step:%v target step:%v current step state %v", clone.Status.CanaryStatus.CurrentStepIndex, stepIndex, clone.Status.CanaryStatus.CurrentStepState) - return clone.Status.CanaryStatus.CurrentStepIndex == stepIndex && clone.Status.CanaryStatus.CurrentStepState == v1beta1.CanaryStepStatePaused + klog.Infof("current step:%v target step:%v current step state %v", clone.Status.GetSubStatus().CurrentStepIndex, stepIndex, clone.Status.GetSubStatus().CurrentStepState) + return clone.Status.GetSubStatus().CurrentStepIndex == stepIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStatePaused }, 20*time.Minute, time.Second).Should(BeTrue()) } @@ -331,7 +413,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - return clone.Status.CanaryStatus.ObservedWorkloadGeneration == generation + return clone.Status.GetSubStatus().ObservedWorkloadGeneration == generation }, time.Minute, time.Second).Should(BeTrue()) } @@ -532,7 +614,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 3) // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) @@ -550,8 +632,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 3)) @@ -566,9 +648,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) // rollout - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) // canary workload @@ -582,8 +664,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 5)) @@ -611,12 +693,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -687,14 +769,14 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -715,12 +797,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 3)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -741,12 +823,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -767,7 +849,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) klog.Infof("rollout(%s) completed, and check", namespace) @@ -841,7 +923,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -853,7 +935,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus := util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 1)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -881,8 +963,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -894,7 +976,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 2)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -909,8 +991,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { CheckIngressRestored(service.Name) // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -922,7 +1004,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -938,8 +1020,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -951,7 +1033,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -967,8 +1049,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -981,7 +1063,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1012,7 +1094,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) @@ -1028,7 +1110,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1056,8 +1138,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1069,7 +1151,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1085,8 +1167,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1098,7 +1180,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1126,7 +1208,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhase(v1beta1.RolloutPhaseHealthy)) klog.Infof("rollout(%s) completed, and check", namespace) @@ -1200,7 +1282,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) @@ -1233,8 +1315,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 2)) @@ -1255,8 +1337,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -1292,7 +1374,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) @@ -1328,8 +1410,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1350,8 +1432,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1384,8 +1466,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1406,8 +1488,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1440,7 +1522,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhase(v1beta1.RolloutPhaseHealthy)) klog.Infof("rollout(%s) completed, and check", namespace) @@ -1470,6 +1552,2414 @@ var _ = SIGDescribe("Rollout v1beta1", func() { WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) }) + + // step1-> 2-> 3-> 4-> remove 2-4 steps + It("V1->V2: Deployment, Canary, remove 2-4 steps", func() { + finder := util.NewControllerFinder(k8sClient) + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_canary_base.yaml", rollout)).ToNot(HaveOccurred()) + CreateObject(rollout) + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + rss, err := finder.GetReplicaSetsForDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + Expect(len(rss)).Should(BeNumerically("==", 1)) + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update deployment image from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // canary workload + cWorkload, err := GetCanaryDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + crss, err := finder.GetReplicaSetsForDeployment(cWorkload) + Expect(err).NotTo(HaveOccurred()) + Expect(len(crss)).Should(BeNumerically("==", 1)) + Expect(cWorkload.Status.AvailableReplicas).Should(BeNumerically("==", 1)) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + + // Jump to step 3 + By("Jump to step 3") + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) + // canary workload + cWorkload, err = GetCanaryDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + canaryRevision := crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] + Expect(cWorkload.Status.AvailableReplicas).Should(BeNumerically("==", 3)) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + // canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.Canary.Steps[2].Traffic))) + + // remove step 2 3 4 + By("Remove step 2 3 4") + Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // update rollout step configuration + rollout.Spec.Strategy.Canary.Steps = []v1beta1.CanaryStep{ + { + TrafficRoutingStrategy: v1beta1.TrafficRoutingStrategy{ + Traffic: utilpointer.String("20%"), + }, + Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + Pause: v1beta1.RolloutPause{}, + }, + } + // now modifying the amount of steps is forbidden in webhook, we expect an error + _ = UpdateRolloutFail(rollout) + }) + }) + + KruiseDescribe("Bluegreen Release - Deployment - Ingress", func() { + It("bluegreen rolling with traffic case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(util.ComputeHash(&workload.Spec.Template, nil))) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(GetCanaryRSRevision(workload))) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + // when traffic is 0%, ingress canary won't create and annotation won't be set (for ingress-nginx) + // cIngress := &netv1.Ingress{} + // Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // ------ step 3: replicas: 100%, traffic: 50% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(3)") + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ step 4: replicas: 100%, traffic: 100% ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(4)") + WaitRolloutStepPaused(rollout.Name, 4) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[3].Traffic))) + + // ------ Final approval ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait to Finalise") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitDeploymentAllPodsReady(workload) + By("rollout completed, and check") + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + + // scale up replicas 5 -> 6 + workload.Spec.Replicas = utilpointer.Int32(6) + UpdateDeployment(workload) + By("Update workload replicas from(5) -> to(6)") + time.Sleep(time.Second * 2) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + }) + + It("bluegreen rollback case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(util.ComputeHash(&workload.Spec.Template, nil))) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(GetCanaryRSRevision(workload))) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + // when traffic is 0%, ingress canary won't create and annotation won't be set (for ingress-nginx) + // cIngress := &netv1.Ingress{} + // Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // ------ step 3: replicas: 100%, traffic: 50% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(3)") + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ step 4: replicas: 100%, traffic: 100% ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(4)") + WaitRolloutStepPaused(rollout.Name, 4) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[3].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 3") + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + By("Jump to step 2") + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ Rollback: PaaS rollback ------ + By("update workload env NODE_NAME from(version2) -> to(version1)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitDeploymentAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + // canary ingress and canary service should be deleted + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).To(HaveOccurred()) + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).To(HaveOccurred()) + + // check service update + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + }) + + It("bluegreen deployment continuous rolling case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") + // ----- Continuous Release ------ + updatedRevision := rollout.Status.BlueGreenStatus.UpdatedRevision + By(updatedRevision) + By("update workload env NODE_NAME from(version2) -> to(version3)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version3"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeploymentFailed(workload) + // the next code is used to test continuous release scenario, in case we need it in the future, keep it as comment + /* + UpdateDeployment(workload) + // from step 1 to step 1, we need to additionally check stepUpgrad to distinguish the two steps + WaitRolloutStepUpgrade(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) + // stable revision shouldn't change + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).ShouldNot(Equal(updatedRevision)) + Expect(workload.Labels[v1beta1.DeploymentStableRevisionLabel]).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting = control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(util.ComputeHash(&workload.Spec.Template, nil))) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(GetCanaryRSRevision(workload))) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + // ----- Continuous Release, AGAIN------ + updatedRevision = rollout.Status.BlueGreenStatus.UpdatedRevision + By("update workload env NODE_NAME from(version3) -> to(version4)") + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version4"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + WaitRolloutStepPaused(rollout.Name, 1) + // stable revision shouldn't change + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).ShouldNot(Equal(updatedRevision)) + Expect(workload.Labels[v1beta1.DeploymentStableRevisionLabel]).Should(Equal(stableRevision)) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // ------ step 4: replicas: 100%, traffic: 100% ------ + // resume rollout canary + By("Jump to step 4") + JumpRolloutStep(rollout.Name, 4) + WaitRolloutStepPaused(rollout.Name, 4) + + // ------ Final approval ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, final approval") + // wait rollout complete + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhase(v1beta1.RolloutPhaseHealthy)) + klog.Infof("rollout(%s) completed, and check", namespace) + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", -1)) + // check service & ingress + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", *workload.Spec.Replicas)) + Expect(workload.Status.Replicas).Should(BeNumerically("==", *workload.Spec.Replicas)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", *workload.Spec.Replicas)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version4")) + } + } + // check progressing succeed + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + */ + }) + + It("bluegreen scale up and down", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // ------ 50% maxSurge, scale up: from 5 to 6 ------ + workload.Spec.Replicas = utilpointer.Int32(6) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 6)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 9)) + + // ------ scale up: from 6 to 7 ------ + workload.Spec.Replicas = utilpointer.Int32(7) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 7)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 11)) + + // ------ scale up: from 7 to 8 ------ + workload.Spec.Replicas = utilpointer.Int32(8) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 12)) + + // ------ scale down: from 8 to 4 ------ + workload.Spec.Replicas = utilpointer.Int32(4) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 2)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 2)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 2)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 6)) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + + // ------ scale up: from 4 to 7 ------ + workload.Spec.Replicas = utilpointer.Int32(7) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 7)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 7)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 7)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 7)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 7)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 14)) + + // ------ scale up: from 7 to 8 ------ + workload.Spec.Replicas = utilpointer.Int32(8) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 8)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 8)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 16)) + + // ------ scale down: from 8 to 4 ------ + workload.Spec.Replicas = utilpointer.Int32(4) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + }) + + It("bluegreen delete rollout case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + }) + + It("bluegreen disable rollout case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // By("before disable rollout") + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + + limit := 0 + for !reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0}) { + By(fmt.Sprintf("workload.Spec.Strategy.RollingUpdate.MaxUnavailable: %v, workload.Spec.Strategy.RollingUpdate.MaxSurge: %v", workload.Spec.Strategy.RollingUpdate.MaxUnavailable, workload.Spec.Strategy.RollingUpdate.MaxSurge)) + time.Sleep(time.Second * 500) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + if limit > 10 { + Expect(false).To(BeTrue()) + } + limit++ + time.Sleep(time.Second) + } + + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + }) + }) + + KruiseDescribe("Bluegreen Release - Deployment - HPA disable", func() { + It("bluegreen disable hpa test case - autoscaling/v1 for v1.19", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + By("Creating v1 HPA...") + hpa := &scalingV1.HorizontalPodAutoscaler{} + Expect(ReadYamlToObject("./test_data/rollout/hpa_v1.yaml", hpa)).ToNot(HaveOccurred()) + CreateObject(hpa) + time.Sleep(time.Second * 3) + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // check hpa + HPADisableSuffix := "-DisableByRollout" + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name + HPADisableSuffix)) + + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + + // check hpa + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name)) + }) + + It("bluegreen disable hpa test case - autoscaling/v2 for v1.23", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + By("Creating v2 HPA...") + hpa := &scalingV2.HorizontalPodAutoscaler{} + Expect(ReadYamlToObject("./test_data/rollout/hpa_v2.yaml", hpa)).ToNot(HaveOccurred()) + CreateObject(hpa) + time.Sleep(time.Second * 3) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // check hpa + HPADisableSuffix := "-DisableByRollout" + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name + HPADisableSuffix)) + + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + // check hpa + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name)) + }) + }) + + // test for cloneset + KruiseDescribe("Bluegreen Release - Cloneset - Ingress", func() { + It("bluegreen rolling with traffic case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // wait step 3 complete + By("wait step(3) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", -1)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ Final approval ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait to Finalise") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + By("rollout completed, and check") + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + }) + + It("bluegreen rollback case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // wait step 3 complete + By("wait step(3) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", -1)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 2") + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 1") + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ Rollback: PaaS rollback ------ + By("update workload env NODE_NAME from(version2) -> to(version1)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + CheckIngressRestored(service.Name) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + }) + + It("bluegreen continuous rolling case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ----- Continuous Release ------ + updatedRevision := rollout.Status.BlueGreenStatus.UpdatedRevision + By(updatedRevision) + By("update workload env NODE_NAME from(version2) -> to(version3)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version3"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSetFail(workload) + }) + + // cloneset now only support single step, keep this case for future + // It("bluegreen scale up and down for cloneset", func() { + // By("Creating Rollout...") + // rollout := &v1beta1.Rollout{} + // Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + // rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + // APIVersion: "apps.kruise.io/v1alpha1", + // Kind: "CloneSet", + // Name: "echoserver", + // } + // CreateObject(rollout) + + // By("Creating workload and waiting for all pods ready...") + // // service + // service := &v1.Service{} + // Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + // CreateObject(service) + // // ingress + // ingress := &netv1.Ingress{} + // Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + // CreateObject(ingress) + // // workload + // workload := &appsv1alpha1.CloneSet{} + // Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + // CreateObject(workload) + // WaitCloneSetAllPodsReady(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + // Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + // stableRevision := rollout.Status.BlueGreenStatus.StableRevision + // By("check rollout status & paused success") + + // // v1 -> v2, start rollout action + // newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + // workload.Spec.Template.Spec.Containers[0].Env = newEnvs + // UpdateCloneSet(workload) + // By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + // time.Sleep(time.Second * 3) + + // // wait step 1 complete + // By("wait step(1) pause") + // WaitRolloutStepPaused(rollout.Name, 1) + // // check workload status & paused + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + // Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + // Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + // By("check cloneSet status & paused success") + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + // Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + // Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + // canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // // check stable, canary service & ingress + // // stable service + // Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + // Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // //canary service + // cService := &v1.Service{} + // Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + // Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // // ------ 50% maxSurge, scale up: from 5 to 6 ------ + // By("scale up: from 5 to 6") + // workload.Spec.Replicas = utilpointer.Int32(6) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 6)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 6)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 6)) + // Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 6)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 12)) + + // // ------ scale up: from 6 to 7 ------ + // By("scale up: from 6 to 7") + // workload.Spec.Replicas = utilpointer.Int32(7) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 7)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 7)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 7)) + // Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 7)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 14)) + + // // ------ scale up: from 7 to 8 ------ + // By("scale up: from 7 to 8") + // workload.Spec.Replicas = utilpointer.Int32(8) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 8)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 8)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 8)) + // Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 8)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 16)) + + // // ------ scale down: from 8 to 4 ------ + // By("scale down: from 8 to 4") + // workload.Spec.Replicas = utilpointer.Int32(4) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + // Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 4)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // }) + + It("bluegreen delete rollout case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ delete rollout ------ + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check workload annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check workload spec + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).To(HaveOccurred()) + }) + + It("bluegreen disable rollout case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + // check workload annotation + settingStr = workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check workload spec + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + time.Sleep(time.Second * 3) + }) }) KruiseDescribe("CloneSet canary rollout with Ingress", func() { @@ -1526,7 +4016,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1552,9 +4042,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -1565,7 +4055,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) By("rollout completed, and check") @@ -1671,7 +4161,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -1753,7 +4243,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1774,7 +4264,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -1785,7 +4275,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1805,7 +4295,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { service: service, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -1873,7 +4363,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1895,17 +4385,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // v1 -> v2 -> v1, continuous release By("Update cloneSet env NODE_NAME from(version2) -> to(version1)") // resume rollout canary - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateCloneSet(workload) // make sure CloneSet is rolling back in batch By("Wait step 1 paused") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) By("Wait step 2 paused") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -1981,7 +4471,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -2001,7 +4491,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) By("rollout completed, and check") @@ -2091,7 +4581,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -2123,9 +4613,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { service: service, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -2138,7 +4628,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(strategy.Paused).Should(BeFalse()) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitDeploymentAllPodsReady(workload) By("rollout completed, and check") @@ -2226,7 +4716,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2241,9 +4731,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2257,7 +4747,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateDeployment(workload) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision = workload.Labels[v1beta1.DeploymentStableRevisionLabel] Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2272,9 +4762,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2332,7 +4822,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2347,9 +4837,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2415,7 +4905,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2486,7 +4976,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2519,9 +5009,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { }, 5*time.Minute, time.Second).Should(BeTrue()) By("rolling deployment to be completed") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) WaitDeploymentAllPodsReady(workload) }) }) @@ -2566,17 +5056,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) @@ -2652,7 +5142,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) @@ -2674,7 +5164,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // resume rollout canary By("check rollout canary status success, resume rollout, and wait rollout canary complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -2737,17 +5227,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 1) @@ -2762,26 +5252,26 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // make sure disable quickly rollback policy By("Wait step (1) paused") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -2840,17 +5330,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 1) By("Only update rollout id = '2', and check batch label again") @@ -2858,19 +5348,19 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(3) pause again") - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) time.Sleep(30 * time.Second) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -2929,7 +5419,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("Only update rollout id = '2', and check batch label again") @@ -2940,26 +5430,26 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(30 * time.Second) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -3028,7 +5518,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateNativeStatefulSet(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3054,9 +5544,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -3067,7 +5557,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(*workload.Spec.UpdateStrategy.RollingUpdate.Partition).Should(BeNumerically("==", *workload.Spec.Replicas-3)) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3152,7 +5642,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateNativeStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3171,7 +5661,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -3182,7 +5672,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3202,7 +5692,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) @@ -3297,7 +5787,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -3386,7 +5876,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateNativeStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3404,7 +5894,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3496,7 +5986,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateAdvancedStatefulSet(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3522,9 +6012,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -3536,7 +6026,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(*workload.Spec.UpdateStrategy.RollingUpdate.Partition).Should(BeNumerically("==", *workload.Spec.Replicas-3)) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3621,7 +6111,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateAdvancedStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3640,7 +6130,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -3651,7 +6141,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3671,7 +6161,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) @@ -3766,7 +6256,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -3855,7 +6345,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateAdvancedStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3873,7 +6363,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3962,19 +6452,19 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) Expect(checkUpdateReadyPods(1, 1)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) Expect(checkUpdateReadyPods(2, 3)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 2) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) Expect(checkUpdateReadyPods(4, 6)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 3) @@ -4030,7 +6520,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Spec.Paused).Should(BeTrue()) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.CanaryReplicas).Should(BeNumerically("==", 1)) @@ -4051,9 +6541,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-by-header-value", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("pc")) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("Resume rollout, and wait next step(2), routing 50% traffic to new version pods") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.CanaryReplicas).Should(BeNumerically("==", 2)) @@ -4072,7 +6562,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-by-header-value", nginxIngressAnnotationDefaultPrefix)]).Should(BeEmpty()) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) By("rollout completed, and check") // check ingress & service & virtualservice & deployment diff --git a/test/e2e/test_data/rollout/hpa_v1.yaml b/test/e2e/test_data/rollout/hpa_v1.yaml new file mode 100644 index 00000000..653b87a8 --- /dev/null +++ b/test/e2e/test_data/rollout/hpa_v1.yaml @@ -0,0 +1,12 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: hpa-dp +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: echoserver + minReplicas: 2 + maxReplicas: 6 + targetCPUUtilizationPercentage: 1 \ No newline at end of file diff --git a/test/e2e/test_data/rollout/hpa_v2.yaml b/test/e2e/test_data/rollout/hpa_v2.yaml new file mode 100644 index 00000000..2391b960 --- /dev/null +++ b/test/e2e/test_data/rollout/hpa_v2.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: hpa-dp +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: echoserver + behavior: + scaleDown: + stabilizationWindowSeconds: 10 + # selectPolicy: Disabled + # scaleUp: + # selectPolicy: Disabled + minReplicas: 2 + maxReplicas: 6 + metrics: + - type: Resource + resource: + name: cpu + target: + type: AverageValue + averageValue: '1m' diff --git a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml index 0959f065..d8ce5fa0 100644 --- a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml +++ b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml @@ -10,21 +10,15 @@ spec: strategy: blueGreen: steps: - - traffic: 20% - replicas: 20% + - replicas: 50% + traffic: 0% pause: {} - - traffic: 40% - replicas: 40% - pause: {duration: 10} - - traffic: 60% - replicas: 60% - pause: {duration: 10} - - traffic: 80% - replicas: 80% - pause: {duration: 10} - - traffic: 100% - replicas: 100% - pause: {duration: 0} + - replicas: 100% + traffic: 0% + - replicas: 100% + traffic: 50% + - replicas: 100% + traffic: 100% trafficRoutings: - service: echoserver ingress: diff --git a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml new file mode 100644 index 00000000..6fc10f5d --- /dev/null +++ b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml @@ -0,0 +1,24 @@ +apiVersion: rollouts.kruise.io/v1beta1 # we use v1beta1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + workloadRef: + apiVersion: apps.kruise.io/v1alpha1 + kind: CloneSet + name: echoserver + strategy: + blueGreen: + steps: + - replicas: 100% + traffic: 0% + pause: {} + - replicas: 100% + traffic: 50% + - replicas: 100% + traffic: 100% + trafficRoutings: + - service: echoserver + ingress: + classType: nginx + name: echoserver \ No newline at end of file