-
Notifications
You must be signed in to change notification settings - Fork 3
152 lines (130 loc) · 5.7 KB
/
tests.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
name: "Integration tests"
on:
push:
branches: [main]
pull_request:
branches: [main]
types: [opened, synchronize, reopened, ready_for_review]
defaults:
run:
shell: bash
# Cancel previous running actions for the same PR
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
jobs:
checks:
if: github.event.pull_request.draft == false
runs-on: ubuntu-24.04
steps:
- name: "Checkout code"
uses: actions/checkout@v4
# Formatting checks
- name: "Python formatting check"
run: ./bin/inv_wrapper.sh format-code --check
# Rust formatting checks
- name: "Run cargo lints"
run: |
for dir in "./vm-cache/" "./tools/check-kata-hashes" "./tools/tee-detect" "./tools/purge-containerd" "./tools/purge-k8s"; do
pushd ${dir} >> /dev/null
cargo fmt --all -- --check
cargo clippy -- -D warnings
popd >> /dev/null
done
- name: "Check Kata hashes match"
run: cargo run --release
working-directory: ./tools/check-kata-hashes
integration-tests:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
include:
- tee: snp
runtime_classes: "qemu qemu-coco-dev qemu-snp qemu-snp-sc2"
- tee: tdx
runtime_classes: "qemu qemu-coco-dev qemu-tdx qemu-tdx-sc2"
runs-on: [self-hosted, "${{ matrix.tee }}"]
env:
KUBECONFIG: .config/kubeadm_kubeconfig
steps:
- name: "Checkout code"
uses: actions/checkout@v4
- name: "Install SC2"
run: ./bin/inv_wrapper.sh sc2.deploy --clean
- name: "Run python hello world"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/helloworld-py/deployment.yaml | ./bin/kubectl apply -f -
# Wait for pod to be ready
until [ "$(./bin/kubectl get pods -l ${{ env.POD_LABEL }} -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}')" = "True" ]; do echo "Waiting for pod to be ready..."; sleep 2; done
sleep 1
# Get the pod's IP
service_ip=$(./bin/kubectl get services -o jsonpath='{.items[?(@.metadata.name=="coco-helloworld-py-node-port")].spec.clusterIP}')
[ "$(curl --retry 3 -X GET ${service_ip}:8080)" = "Hello World!" ]
envsubst < ./demo-apps/helloworld-py/deployment.yaml | ./bin/kubectl delete -f -
# Wait for pod to be deleted
./bin/kubectl wait --for=delete -l ${{ env.POD_LABEL }} pod --timeout=30s
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
POD_LABEL: apps.sc2.io/name=helloworld-py
- name: "Run Knative hello world"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/helloworld-knative/service.yaml | ./bin/kubectl apply -f -
sleep 1
# Get the service URL
service_url=$(./bin/kubectl get ksvc helloworld-knative --output=custom-columns=URL:.status.url --no-headers)
[ "$(curl --retry 3 ${service_url})" = "Hello World!" ]
# Wait for pod to be deleted
envsubst < ./demo-apps/helloworld-knative/service.yaml | ./bin/kubectl delete -f -
./bin/kubectl wait --for=delete -l ${{ env.POD_LABEL }} pod --timeout=60s
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
POD_LABEL: apps.sc2.io/name=helloworld-py
- name: "Enable default-memory annotation"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
./bin/inv_wrapper.sh kata.enable-annotation default_memory --runtime ${runtime_class}
# Here we benefit that the last variable is the one we want to use
# for vm-cache
export SC2_RUNTIME_CLASS=${runtime_class}
done
# Aftre changing the annotation of the qemu-snp-sc2 runtime class we
# need to restart the VM cache
sudo -E ./vm-cache/target/release/vm-cache stop
sudo -E ./vm-cache/target/release/vm-cache background
- name: "Run knative chaining demo"
run: |
for runtime_class in ${{ matrix.runtime_classes }}; do
echo "Running test for ${runtime_class}..."
export SC2_RUNTIME_CLASS=${runtime_class}
envsubst < ./demo-apps/knative-chaining/chaining.yaml | ./bin/kubectl apply -f -
sleep 1
# Curl the channel URL
./demo-apps/knative-chaining/curl_cmd.sh
# Wait for pod 3 to be scaled down
until [ "$(kubectl -n ${{ env.NAMESPACE }} logs -l ${{ env.POD_LABEL_THREE }} | grep 'cloudevent(s3): done!' | wc -l)" = "1" ]; do echo "Waiting for chain to finish..."; sleep 2; done
# Finally, clear-up
envsubst < ./demo-apps/knative-chaining/chaining.yaml | ./bin/kubectl delete -f -
# Extra cautionary sleep
sleep 5
echo "Test for ${runtime_class} successful!"
done
env:
NAMESPACE: chaining-test
POD_LABEL_ONE: apps.sc2.io/name=knative-chaining-one
POD_LABEL_THREE: apps.sc2.io/name=knative-chaining-three
- name: "Clean-up"
if: always()
run: ./bin/inv_wrapper.sh sc2.destroy