Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI] Use Kubernetes GC to clean kubevirt VMs (packet-* jobs) #11530

Merged
merged 10 commits into from
Nov 14, 2024
Prev Previous commit
Next Next commit
CI: use kubevirt.core dynamic inventory
This allows a single source of truth for the virtual machines in a
kubevirt ci-run.

`etcd_member_name` should be correctly handled in kubespray-defaults for
testing the recover cases.
VannTen committed Nov 14, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit 329ffd45f0e607dfdac81690d7687960d59d512a
2 changes: 1 addition & 1 deletion pipeline.Dockerfile
Original file line number Diff line number Diff line change
@@ -60,4 +60,4 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 \
&& vagrant plugin install vagrant-libvirt \
# Install Kubernetes collections
&& pip install --no-compile --no-cache-dir kubernetes \
&& ansible-galaxy collection install kubernetes.core
&& ansible-galaxy collection install kubernetes.core kubevirt.core
31 changes: 8 additions & 23 deletions tests/cloud_playbooks/roles/packet-ci/tasks/create-vms.yml
Original file line number Diff line number Diff line change
@@ -1,24 +1,11 @@
---
- name: "Create temp dir /tmp/{{ test_name }} for CI files"
file:
path: "/tmp/{{ test_name }}"
state: directory
mode: "0755"

- name: Template vm files for CI job
set_fact:
vms_files: "{{ vms_files + [lookup('ansible.builtin.template', 'vm.yml.j2') | from_yaml] }}"
vars:
vms_files: []
loop: "{{ range(1, vm_count | int + 1, 1) | list }}"
loop_control:
index_var: vm_id

- name: Start vms for CI job
vars:
tvars:
kubespray_groups: "{{ item }}"
kubernetes.core.k8s:
definition: "{{ item }}"
changed_when: false
loop: "{{ vms_files }}"
definition: "{{ lookup('template', 'vm.yml.j2', template_vars=tvars) }}"
loop: "{{ scenarios[mode | d('default')] }}"

- name: Wait for vms to have IP addresses
kubernetes.core.k8s_info:
@@ -34,10 +21,8 @@
retries: 30
delay: 10

- name: "Create inventory for CI test in file /tmp/{{ test_name }}/inventory"
- name: "Create inventory for CI tests"
template:
src: "inventory.j2"
dest: "{{ inventory_path }}"
src: "inv.kubevirt.yml.j2"
dest: "{{ inventory_path }}/inv.kubevirt.yml"
mode: "0644"
vars:
vms: "{{ vm_ips }}"
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
plugin: kubevirt.core.kubevirt
namespaces:
- {{ pod_namespace }}
label_selector: ci_job_id={{ ci_job_id }}
create_groups: true
compose:
ci_groups: |
group_names |
select('ansible.builtin.match', 'label_kubespray_io*') |
map('regex_replace', 'label_kubespray_io_(.*)_true', '\1')
use_service: false
host_format: "{name}"
keyed_groups:
- key: ci_groups
prefix: ""
separator: ""
98 changes: 0 additions & 98 deletions tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2

This file was deleted.

3 changes: 3 additions & 0 deletions tests/cloud_playbooks/roles/packet-ci/templates/vm.yml.j2
Original file line number Diff line number Diff line change
@@ -12,6 +12,9 @@ metadata:
kubevirt.io/domain: "{{ test_name }}"
ci_job_id: "{{ ci_job_id }}"
ci_job_name: "{{ ci_job_name }}"
{% for group in kubespray_groups -%}
kubespray.io/{{ group }}: "true"
{% endfor -%}
# leverage the Kubernetes GC for resources cleanup
ownerReferences:
- apiVersion: v1
38 changes: 28 additions & 10 deletions tests/cloud_playbooks/roles/packet-ci/vars/main.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,32 @@
---
_vm_count_dict:
separate: 3
ha: 3
ha-recover: 3
ha-recover-noquorum: 3
all-in-one: 1
node-etcd-client: 4
default: 2

vm_count: "{{ _vm_count_dict[mode | d('default')] }}"
# This is a list of nodes with groups for each scenario/cluster layouts
scenarios:
separate:
- ['kube_control_plane']
- ['kube_node']
- ['etcd']
ha:
- ['kube_control_plane', 'etcd']
- ['kube_control_plane', 'etcd']
- ['kube_node', 'etcd']
default:
- ['kube_control_plane', 'etcd']
- ['kube_node']
all-in-one:
- ['kube_control_plane', 'etcd', 'kube_node']
ha-recover:
- ['kube_control_plane', 'etcd']
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_node', 'etcd']
ha-recover-noquorum:
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_control_plane', 'etcd', 'broken_kube_control_plane', 'broken_etcd']
- ['kube_node', 'etcd']
node-etcd-client:
- ['kube_node', 'kube_control_plane', 'etcd']
- ['kube_node', 'etcd']
- ['kube_node', 'etcd']
- ['kube_node']

# Get pod metadata / CI vars from environment