diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e8e45b9b80e..df2962fefcc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -42,6 +42,7 @@ before_script: - packet image: quay.io/kubespray/kubespray:$KUBESPRAY_VERSION artifacts: + when: always paths: - cluster-dump/ diff --git a/.gitlab-ci/packet.yml b/.gitlab-ci/packet.yml index 05a3feb0302..b2b20b9adac 100644 --- a/.gitlab-ci/packet.yml +++ b/.gitlab-ci/packet.yml @@ -63,6 +63,11 @@ packet_ubuntu16-kube-router-sep: extends: .packet when: manual +packet_ubuntu16-kube-router-svc-proxy: + stage: deploy-part2 + extends: .packet + when: manual + packet_debian10-containerd: stage: deploy-part2 extends: .packet diff --git a/.gitlab-ci/terraform.yml b/.gitlab-ci/terraform.yml index d3565400b6b..3f0e4665558 100644 --- a/.gitlab-ci/terraform.yml +++ b/.gitlab-ci/terraform.yml @@ -38,6 +38,7 @@ when: manual only: [/^pr-.*$/] artifacts: + when: always paths: - cluster-dump/ variables: diff --git a/extra_playbooks/build-cephfs-provisioner.yml b/extra_playbooks/build-cephfs-provisioner.yml deleted file mode 100644 index d0d87c9ea39..00000000000 --- a/extra_playbooks/build-cephfs-provisioner.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- - -- hosts: localhost - tasks: - - name: CephFS Provisioner | Install pip packages - pip: - name: "{{ item.name }}" - version: "{{ item.version }}" - state: "{{ item.state }}" - extra_args: "{{ pip_extra_args | default(omit) }}" - with_items: - - { state: "present", name: "docker", version: "3.4.1" } - - { state: "present", name: "docker-compose", version: "1.21.2" } - - - name: CephFS Provisioner | Check Go version - shell: | - go version - ignore_errors: yes - register: go_version_result - - - name: CephFS Provisioner | Install Go 1.9 - shell: | - add-apt-repository -y ppa:gophers/archive - apt-get update - apt-get install -y golang-1.9 - ln -fs /usr/lib/go-1.9/bin/* /usr/local/bin/ - when: 'go_version_result.rc != 0 or "go version go1.9" not in go_version_result.stdout' - - - name: CephFS Provisioner | Check if image exists - shell: | - docker image list | grep 'cephfs-provisioner' - ignore_errors: yes - register: check_image_result - - - block: - - name: CephFS Provisioner | Clone repo - git: - repo: https://github.com/kubernetes-incubator/external-storage.git - dest: "~/go/src/github.com/kubernetes-incubator/external-storage" - version: 06fddbe2 - clone: yes - update: yes - - - name: CephFS Provisioner | Build image - shell: | - cd ~/go/src/github.com/kubernetes-incubator/external-storage - REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs - - - name: CephFS Provisioner | Push image - docker_image: - name: quay.io/kubespray/cephfs-provisioner:06fddbe2 - push: yes - register: docker_image - retries: 10 - until: docker_image is succeeded - - when: check_image_result.rc != 0 diff --git a/roles/container-engine/cri-o/tasks/crio_repo.yml b/roles/container-engine/cri-o/tasks/crio_repo.yml index 60cfae99f23..d5999b651bb 100644 --- a/roles/container-engine/cri-o/tasks/crio_repo.yml +++ b/roles/container-engine/cri-o/tasks/crio_repo.yml @@ -10,6 +10,10 @@ url: "https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/{{ crio_kubic_debian_repo_name }}/Release.key" state: present when: crio_kubic_debian_repo_name is defined + register: apt_key_download + until: apt_key_download is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" - name: Add CRI-O kubic repo apt_repository: @@ -38,15 +42,17 @@ - name: Enable modular repos for CRI-O ini_file: - path: "/etc/yum.repos.d/{{ item }}.repo" - section: "{{ item }}" + path: "/etc/yum.repos.d/{{ item.repo }}.repo" + section: "{{ item.section }}" option: enabled value: 1 become: true when: is_ostree loop: - - "fedora-updates-modular" - - "fedora-modular" + - repo: "fedora-updates-modular" + section: "updates-modular" + - repo: "fedora-modular" + section: "fedora-modular" - name: Enable CRI-O module command: "dnf -y module enable cri-o:{{ crio_version }}" diff --git a/roles/container-engine/cri-o/tasks/main.yaml b/roles/container-engine/cri-o/tasks/main.yaml index 095206270dc..af0ecb92eec 100644 --- a/roles/container-engine/cri-o/tasks/main.yaml +++ b/roles/container-engine/cri-o/tasks/main.yaml @@ -47,6 +47,10 @@ when: not is_ostree with_items: "{{ crio_packages }}" notify: restart crio + register: package_install + until: package_install is succeeded + retries: 4 + delay: "{{ retry_stagger | d(3) }}" - name: Gather the rpm package facts package_facts: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 48dbc6573a9..4e785219034 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -682,7 +682,7 @@ downloads: - k8s-cluster cilium: - enabled: "{{ kube_network_plugin == 'cilium' }}" + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" container: true repo: "{{ cilium_image_repo }}" tag: "{{ cilium_image_tag }}" @@ -691,7 +691,7 @@ downloads: - k8s-cluster cilium_init: - enabled: "{{ kube_network_plugin == 'cilium' }}" + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" container: true repo: "{{ cilium_init_image_repo }}" tag: "{{ cilium_init_image_tag }}" @@ -700,7 +700,7 @@ downloads: - k8s-cluster cilium_operator: - enabled: "{{ kube_network_plugin == 'cilium' }}" + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" container: true repo: "{{ cilium_operator_image_repo }}" tag: "{{ cilium_operator_image_tag }}" diff --git a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 index f0c80f51747..ead631e07a0 100644 --- a/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dns-autoscaler.yml.j2 @@ -65,6 +65,11 @@ spec: requests: cpu: {{ dns_autoscaler_cpu_requests }} memory: {{ dns_autoscaler_memory_requests }} + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP command: - /cluster-proportional-autoscaler - --namespace=kube-system diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 index 2ca3e4486b4..4dbaf4e17c3 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin-rbac.yml.j2 @@ -16,16 +16,19 @@ metadata: rules: - apiGroups: [""] resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "patch"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] + verbs: ["get", "list", "watch", "patch"] - apiGroups: ["storage.k8s.io"] resources: ["csinodes"] verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] --- diff --git a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 index ad7abefbc51..f6fe0f6240f 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/cinder/templates/cinder-csi-controllerplugin.yml.j2 @@ -27,7 +27,6 @@ spec: - "--csi-address=$(ADDRESS)" {% if cinder_csi_controller_replicas is defined and cinder_csi_controller_replicas > 1 %} - --leader-election - - --leader-election-type=leases - --leader-election-namespace=kube-system {% endif %} env: diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml new file mode 100644 index 00000000000..7add2dac539 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/basedirs.yml @@ -0,0 +1,12 @@ +--- +# include to workaround mitogen issue +# https://github.com/dw/mitogen/issues/663 + +- name: "Local Volume Provisioner | Ensure base dir {{ delegate_host_base_dir.1 }} is created on {{ delegate_host_base_dir.0 }}" + file: + path: "{{ local_volume_provisioner_storage_classes[delegate_host_base_dir.1].host_dir }}" + state: directory + owner: root + group: root + mode: "{{ local_volume_provisioner_directory_mode }}" + delegate_to: "{{ delegate_host_base_dir.0 }}" diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index 862084b7b98..b4c4f68eb96 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -1,15 +1,10 @@ --- + - name: Local Volume Provisioner | Ensure base dir is created on all hosts - file: - path: "{{ local_volume_provisioner_storage_classes[item.1].host_dir }}" - state: directory - owner: root - group: root - mode: "{{ local_volume_provisioner_directory_mode }}" - delegate_to: "{{ item[0] }}" - with_nested: - - "{{ groups['k8s-cluster'] }}" - - "{{ local_volume_provisioner_storage_classes.keys() | list }}" + include_tasks: basedirs.yml + loop_control: + loop_var: delegate_host_base_dir + loop: "{{ groups['k8s-cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}" - name: Local Volume Provisioner | Create addon dir file: diff --git a/roles/kubernetes-apps/metallb/OWNERS b/roles/kubernetes-apps/metallb/OWNERS new file mode 100644 index 00000000000..b64c7bc7acf --- /dev/null +++ b/roles/kubernetes-apps/metallb/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +reviewers: + - oomichi diff --git a/roles/kubernetes-apps/network_plugin/meta/main.yml b/roles/kubernetes-apps/network_plugin/meta/main.yml index c208839d375..b5d1c04734a 100644 --- a/roles/kubernetes-apps/network_plugin/meta/main.yml +++ b/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -1,7 +1,7 @@ --- dependencies: - role: kubernetes-apps/network_plugin/cilium - when: kube_network_plugin == 'cilium' + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool tags: - cilium diff --git a/roles/kubernetes/kubeadm/defaults/main.yml b/roles/kubernetes/kubeadm/defaults/main.yml index 988cbc594ad..9dc577edfda 100644 --- a/roles/kubernetes/kubeadm/defaults/main.yml +++ b/roles/kubernetes/kubeadm/defaults/main.yml @@ -4,9 +4,6 @@ discovery_timeout: 60s kubeadm_join_timeout: 120s -# Optionally remove kube_proxy installed by kubeadm -kube_proxy_remove: false - # If non-empty, will use this string as identification instead of the actual hostname kube_override_hostname: >- {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} diff --git a/roles/kubernetes/kubeadm/tasks/main.yml b/roles/kubernetes/kubeadm/tasks/main.yml index 3b6fe89740b..bf2c2687972 100644 --- a/roles/kubernetes/kubeadm/tasks/main.yml +++ b/roles/kubernetes/kubeadm/tasks/main.yml @@ -163,7 +163,6 @@ delegate_to: "{{ groups['kube-master']|first }}" when: - kube_proxy_remove - - kubeadm_discovery_address != kube_apiserver_endpoint | replace("https://", "") # When scaling/adding nodes in the existing k8s cluster, kube-proxy wouldn't be created, as `kubeadm init` wouldn't run. ignore_errors: true tags: diff --git a/roles/kubernetes/master/tasks/kubeadm-upgrade.yml b/roles/kubernetes/master/tasks/kubeadm-upgrade.yml index 0d37540ea0c..7ee893cad98 100644 --- a/roles/kubernetes/master/tasks/kubeadm-upgrade.yml +++ b/roles/kubernetes/master/tasks/kubeadm-upgrade.yml @@ -17,9 +17,7 @@ --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all --allow-experimental-upgrades - --allow-release-candidate-upgrades - --etcd-upgrade=false - --certificate-renewal=true + --etcd-upgrade={{ etcd_kubeadm_enabled | bool | lower }} --force register: kubeadm_upgrade # Retry is because upload config sometimes fails @@ -39,9 +37,7 @@ --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all --allow-experimental-upgrades - --allow-release-candidate-upgrades - --etcd-upgrade=false - --certificate-renewal=true + --etcd-upgrade={{ etcd_kubeadm_enabled | bool | lower }} --force register: kubeadm_upgrade when: inventory_hostname != groups['kube-master']|first diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 53adbdc8944..9ebf325109a 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -22,9 +22,6 @@ kubelet_kubelet_cgroups: "/systemd/system.slice" ### fail with swap on (default true) kubelet_fail_swap_on: true -# Optionally remove kube_proxy installed by kubeadm -kube_proxy_remove: false - # Reserve this space for kube resources kube_memory_reserved: 256M kube_cpu_reserved: 100m diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index d558fff7a9b..987a4643a54 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -128,9 +128,9 @@ - name: Stop if kernel version is too low assert: - that: ansible_kernel.split('-')[0] is version('4.8', '>=') + that: ansible_kernel.split('-')[0] is version('4.9.17', '>=') when: - - kube_network_plugin == 'cilium' + - kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool - not ignore_assert_errors - name: Stop if bad hostname diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index c8abe8d1917..223e484d052 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -57,6 +57,8 @@ spec: name: host-local-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir + securityContext: + privileged: true {% endif %} # This container installs the Calico CNI binaries # and CNI network config file on each node. @@ -88,6 +90,8 @@ spec: name: cni-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir + securityContext: + privileged: true {% endif %} containers: {% if calico_version is version('v3.3.0', '>=') and calico_version is version('v3.4.0', '<') %} diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index fa4361fb18b..dce905deec1 100755 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -33,3 +33,7 @@ cilium_monitor_aggregation: medium cilium_preallocate_bpf_maps: false cilium_tofqdns_enable_poller: false cilium_enable_legacy_services: false + +# Deploy cilium even if kube_network_plugin is not cilium. +# This enables to deploy cilium alongside another CNI to replace kube-proxy. +cilium_deploy_additionally: false diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index 779bdfc5d8f..66b283e3360 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,7 +1,7 @@ --- dependencies: - role: network_plugin/cilium - when: kube_network_plugin == 'cilium' + when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool tags: - cilium diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml index a0dbcd6fe2c..6896ecba1e1 100644 --- a/roles/remove-node/post-remove/tasks/main.yml +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -1,5 +1,5 @@ --- - name: Delete node - command: "{{ bin_dir }}/kubectl delete node {{ inventory_hostname }}" + command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube-master']|first }}" ignore_errors: yes diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index 26cf42003f0..f287aa3dd95 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: cordon-node | Mark all nodes as unschedulable before drain command: >- - {{ bin_dir }}/kubectl cordon {{ item }} + {{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }} with_items: - "{{ node.split(',') | default(groups['kube-node']) }}" failed_when: false @@ -16,7 +16,7 @@ --ignore-daemonsets --grace-period {{ drain_grace_period }} --timeout {{ drain_timeout }} - --delete-local-data {{ item }} + --delete-local-data {{ hostvars[item]['kube_override_hostname']|default(item) }} with_items: - "{{ node.split(',') | default(groups['kube-node']) }}" failed_when: false diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index d5d59025c82..5e6309e174a 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: Uncordon node - command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ inventory_hostname }}" + command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube-master'][0] }}" when: - needs_cordoning|default(false) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index f47954b1c39..e67c4a52e82 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -3,7 +3,7 @@ # Node NotReady: type = ready, status = Unknown - name: See if node is in ready state shell: >- - {{ bin_dir }}/kubectl get node {{ inventory_hostname }} + {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} -o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }' register: kubectl_node_ready delegate_to: "{{ groups['kube-master'][0] }}" @@ -14,7 +14,7 @@ # else unschedulable key doesn't exist - name: See if node is schedulable shell: >- - {{ bin_dir }}/kubectl get node {{ inventory_hostname }} + {{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }} -o jsonpath='{ .spec.unschedulable }' register: kubectl_node_schedulable delegate_to: "{{ groups['kube-master'][0] }}" @@ -31,7 +31,7 @@ {%- endif %} - name: Cordon node - command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}" + command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}" delegate_to: "{{ groups['kube-master'][0] }}" when: needs_cordoning @@ -61,7 +61,7 @@ --ignore-daemonsets --grace-period {{ drain_grace_period }} --timeout {{ drain_timeout }} - --delete-local-data {{ inventory_hostname }} + --delete-local-data {{ kube_override_hostname|default(inventory_hostname) }} {% if drain_pod_selector %}--pod-selector '{{ drain_pod_selector }}'{% endif %} delegate_to: "{{ groups['kube-master'][0] }}" when: diff --git a/roles/win_nodes/kubernetes_patch/defaults/main.yml b/roles/win_nodes/kubernetes_patch/defaults/main.yml index ab979d4c8b2..ad598883073 100644 --- a/roles/win_nodes/kubernetes_patch/defaults/main.yml +++ b/roles/win_nodes/kubernetes_patch/defaults/main.yml @@ -1,7 +1,5 @@ --- kubernetes_user_manifests_path: "{{ ansible_env.HOME }}/kube-manifests" -# Optionally remove kube_proxy installed by kubeadm -kube_proxy_remove: false # nodeselector for kube-proxy ds is beta until 1.18 kube_proxy_nodeselector: "{{ 'kubernetes.io/os' if kube_version is version('v1.18.0', '>=') else 'beta.kubernetes.io/os' }}" diff --git a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml index 4798b539dd1..30cf414d307 100644 --- a/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/defaults/main.yml @@ -36,6 +36,12 @@ images: checksum: sha256:e3c1b309d9203604922d6e255c2c5d098a309c2d46215d8fc026954f3c5c27a0 converted: true + fedora-coreos: + filename: fedora-coreos-32.20200601.3.0-openstack.x86_64.qcow2.xz + url: https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20200601.3.0/x86_64/fedora-coreos-32.20200601.3.0-openstack.x86_64.qcow2.xz + checksum: sha256:fe78c348189d745eb5f6f80ff9eb2af67da8e84880d264f4301faaf7c2a72646 + converted: true + centos-7: filename: CentOS-7-x86_64-GenericCloud-1809.qcow2 url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2 diff --git a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml index 270a39f7b47..aae85e4092f 100644 --- a/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml +++ b/test-infra/image-builder/roles/kubevirt-images/tasks/main.yml @@ -13,16 +13,22 @@ with_dict: - "{{ images }}" +- name: Unxz compressed images + command: unxz --force {{ images_dir }}/{{ item.value.filename }} + with_dict: + - "{{ images }}" + when: + - item.value.filename.endswith('.xz') + - name: Convert images which is not in qcow2 format - command: qemu-img convert -O qcow2 {{ images_dir }}/{{ item.value.filename }} {{ images_dir }}/{{ item.key }}.qcow2 + command: qemu-img convert -O qcow2 {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 with_dict: - "{{ images }}" when: - not (item.value.converted|bool) - register: converted - name: Make sure all images are ending with qcow2 - command: cp {{ images_dir }}/{{ item.value.filename }} {{ images_dir }}/{{ item.key }}.qcow2 + command: cp {{ images_dir }}/{{ item.value.filename.rstrip('.xz') }} {{ images_dir }}/{{ item.key }}.qcow2 with_dict: - "{{ images }}" when: diff --git a/tests/files/packet_ubuntu16-kube-router-svc-proxy.yml b/tests/files/packet_ubuntu16-kube-router-svc-proxy.yml new file mode 100644 index 00000000000..8be6ef3f537 --- /dev/null +++ b/tests/files/packet_ubuntu16-kube-router-svc-proxy.yml @@ -0,0 +1,12 @@ +--- +# Instance settings +cloud_image: ubuntu-1604 +mode: separate + +# Kubespray settings +bootstrap_os: ubuntu +kube_network_plugin: kube-router +deploy_netchecker: true +dns_min_replicas: 1 + +kube_router_run_service_proxy: true