-
Notifications
You must be signed in to change notification settings - Fork 738
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Do not promote when not ready on skip analysis #695
Changes from all commits
2907526
013949a
8119acb
4b098cc
0eee5b7
26d53dc
3b63026
1a90392
065c864
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
#!/usr/bin/env bash | ||
|
||
# This script setups the scenarios for istio tests by creating a Kubernetes namespace, installing the load tester and a test workload (podinfo) | ||
# Prerequisites: Kubernetes Kind and Istio | ||
|
||
set -o errexit | ||
|
||
REPO_ROOT=$(git rev-parse --show-toplevel) | ||
|
||
echo '>>> Creating test namespace' | ||
kubectl create namespace test | ||
kubectl label namespace test istio-injection=enabled | ||
|
||
echo '>>> Installing the load tester' | ||
kubectl apply -k ${REPO_ROOT}/kustomize/tester | ||
kubectl -n test rollout status deployment/flagger-loadtester | ||
|
||
echo '>>> Deploy podinfo' | ||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,138 @@ | ||
#!/usr/bin/env bash | ||
|
||
# This script runs e2e tests for when the canary analysis is skipped | ||
# Prerequisites: Kubernetes Kind and Istio | ||
|
||
set -o errexit | ||
|
||
echo '>>> Initialising canary' | ||
cat <<EOF | kubectl apply -f - | ||
apiVersion: flagger.app/v1beta1 | ||
kind: Canary | ||
metadata: | ||
name: podinfo | ||
namespace: test | ||
spec: | ||
targetRef: | ||
apiVersion: apps/v1 | ||
kind: Deployment | ||
name: podinfo | ||
progressDeadlineSeconds: 60 | ||
service: | ||
port: 9898 | ||
portDiscovery: true | ||
skipAnalysis: true | ||
analysis: | ||
interval: 15s | ||
threshold: 15 | ||
maxWeight: 30 | ||
stepWeight: 10 | ||
webhooks: | ||
- name: load-test | ||
url: http://flagger-loadtester.test/ | ||
timeout: 5s | ||
metadata: | ||
type: cmd | ||
cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" | ||
logCmdOutput: "true" | ||
EOF | ||
|
||
echo '>>> Waiting for primary to be ready' | ||
retries=50 | ||
count=0 | ||
ok=false | ||
until ${ok}; do | ||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false | ||
sleep 5 | ||
count=$(($count + 1)) | ||
if [[ ${count} -eq ${retries} ]]; then | ||
kubectl -n istio-system logs deployment/flagger | ||
echo "No more retries left" | ||
exit 1 | ||
fi | ||
done | ||
|
||
echo '✔ Canary initialization test passed' | ||
|
||
echo '>>> Triggering canary deployment' | ||
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 | ||
|
||
echo '>>> Waiting for canary promotion' | ||
retries=50 | ||
count=0 | ||
ok=false | ||
until ${ok}; do | ||
kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false | ||
sleep 10 | ||
kubectl -n istio-system logs deployment/flagger --tail 1 | ||
count=$(($count + 1)) | ||
if [[ ${count} -eq ${retries} ]]; then | ||
kubectl -n test describe deployment/podinfo | ||
kubectl -n test describe deployment/podinfo-primary | ||
kubectl -n istio-system logs deployment/flagger | ||
echo "No more retries left" | ||
exit 1 | ||
fi | ||
done | ||
|
||
echo '>>> Waiting for canary finalization' | ||
retries=50 | ||
count=0 | ||
ok=false | ||
until ${ok}; do | ||
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false | ||
sleep 5 | ||
count=$(($count + 1)) | ||
if [[ ${count} -eq ${retries} ]]; then | ||
kubectl -n istio-system logs deployment/flagger | ||
echo "No more retries left" | ||
exit 1 | ||
fi | ||
done | ||
|
||
echo '✔ Canary promotion test passed' | ||
|
||
if [[ "$1" = "canary" ]]; then | ||
exit 0 | ||
fi | ||
|
||
echo '>>> Triggering canary deployment with a bad release (non existent docker image)' | ||
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/potato:1.0.0 | ||
|
||
echo '>>> Waiting for canary to fail' | ||
retries=50 | ||
count=0 | ||
ok=false | ||
until ${ok}; do | ||
kubectl get canary/podinfo -n test -o=jsonpath='{.status.phase}' | grep 'Failed' && ok=true || ok=false | ||
sleep 10 | ||
kubectl -n istio-system logs deployment/flagger --tail 1 | ||
count=$(($count + 1)) | ||
if [[ ${count} -eq ${retries} ]]; then | ||
kubectl -n test describe deployment/podinfo | ||
kubectl -n test describe deployment/podinfo-primary | ||
kubectl -n istio-system logs deployment/flagger | ||
echo "No more retries left" | ||
exit 1 | ||
fi | ||
done | ||
|
||
echo '>>> Confirm primary pod is still running and with correct version' | ||
retries=50 | ||
count=0 | ||
ok=false | ||
until ${okImage} && ${okRunning}; do | ||
kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && okRunning=true || okRunning=false | ||
kubectl -n test describe deployment/podinfo-primary | grep '3.1.3' && okImage=true || okImage=false | ||
sleep 5 | ||
count=$(($count + 1)) | ||
if [[ ${count} -eq ${retries} ]]; then | ||
kubectl -n istio-system logs deployment/flagger | ||
echo "No more retries left" | ||
exit 1 | ||
fi | ||
done | ||
|
||
kubectl -n istio-system logs deployment/flagger | ||
|
||
echo '✔ All tests passed' |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,19 +5,6 @@ | |
|
||
set -o errexit | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you please add back There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Of course. ps: I don't know if this new structure of tests makes sense. I'm happy to change this if you want. |
||
|
||
REPO_ROOT=$(git rev-parse --show-toplevel) | ||
|
||
echo '>>> Creating test namespace' | ||
kubectl create namespace test | ||
kubectl label namespace test istio-injection=enabled | ||
|
||
echo '>>> Installing the load tester' | ||
kubectl apply -k ${REPO_ROOT}/kustomize/tester | ||
kubectl -n test rollout status deployment/flagger-loadtester | ||
|
||
echo '>>> Deploy podinfo' | ||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml | ||
|
||
echo '>>> Create latency metric template' | ||
cat <<EOF | kubectl apply -f - | ||
apiVersion: flagger.app/v1beta1 | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
without this block, flagger immediately tries to promote the new version which at this point has 0 healthy pods.