Skip to content

Commit

Permalink
Fix outdated tag and experimental ansible-lint rules (kubernetes-sigs…
Browse files Browse the repository at this point in the history
…#10254)

* project: fix outdated tag and experimental

Signed-off-by: Arthur Outhenin-Chalandre <[email protected]>

* project: remove no longer useful noqa 301

Signed-off-by: Arthur Outhenin-Chalandre <[email protected]>

* project: replace unnamed-task by name[missing]

Signed-off-by: Arthur Outhenin-Chalandre <[email protected]>

* project: fix daemon-reload -> daemon_reload

Signed-off-by: Arthur Outhenin-Chalandre <[email protected]>

---------

Signed-off-by: Arthur Outhenin-Chalandre <[email protected]>
  • Loading branch information
MrFreezeex authored and pedromcpedro committed May 8, 2024
1 parent b389728 commit 7209904
Show file tree
Hide file tree
Showing 56 changed files with 101 additions and 122 deletions.
17 changes: 0 additions & 17 deletions .ansible-lint
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,11 @@ skip_list:

# These rules are intentionally skipped:
#
# [E204]: "Lines should be no longer than 160 chars"
# This could be re-enabled with a major rewrite in the future.
# For now, there's not enough value gain from strictly limiting line length.
# (Disabled in May 2019)
- '204'

# [E701]: "meta/main.yml should contain relevant info"
# Roles in Kubespray are not intended to be used/imported by Ansible Galaxy.
# While it can be useful to have these metadata available, they are also available in the existing documentation.
# (Disabled in May 2019)
- '701'

# [role-name] "meta/main.yml" Role name role-name does not match ``^+$`` pattern
# Meta roles in Kubespray don't need proper names
# (Disabled in June 2021)
- 'role-name'

- 'experimental'
# [var-naming] "defaults/main.yml" File defines variable 'apiVersion' that violates variable naming standards
# In Kubespray we use variables that use camelCase to match their k8s counterparts
# (Disabled in June 2021)
Expand Down Expand Up @@ -65,10 +52,6 @@ skip_list:
# Disable run-once check with free strategy
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'run-once[task]'

# Disable outdated-tag check
# (Disabled in June 2023 after ansible upgrade; FIXME)
- 'warning[outdated-tag]'
exclude_paths:
# Generated files
- tests/files/custom_cni/cilium.yaml
Expand Down
2 changes: 1 addition & 1 deletion contrib/azurerm/roles/generate-inventory/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---

- name: Query Azure VMs # noqa 301
- name: Query Azure VMs
command: azure vm list-ip-address --json {{ azure_resource_group }}
register: vm_list_cmd

Expand Down
6 changes: 3 additions & 3 deletions contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
---

- name: Query Azure VMs IPs # noqa 301
- name: Query Azure VMs IPs
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
register: vm_ip_list_cmd

- name: Query Azure VMs Roles # noqa 301
- name: Query Azure VMs Roles
command: az vm list -o json --resource-group {{ azure_resource_group }}
register: vm_list_cmd

- name: Query Azure Load Balancer Public IP # noqa 301
- name: Query Azure Load Balancer Public IP
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
register: lb_pubip_cmd

Expand Down
3 changes: 1 addition & 2 deletions contrib/dind/roles/dind-host/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@

# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
# handle manually
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
raw: |
echo {{ item | hash('sha1') }} > /etc/machine-id.new
mv -b /etc/machine-id.new /etc/machine-id
Expand All @@ -79,7 +79,6 @@
with_items: "{{ containers.results }}"

- name: Early hack image install to adapt for DIND
# noqa 302 - this task uses the raw module intentionally
raw: |
rm -fv /usr/bin/udevadm /usr/sbin/udevadm
delegate_to: "{{ item._ansible_item_label|default(item.item) }}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use

- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
name: "{{ gluster_volume_node_mount_dir }}"
src: "{{ disk_volume_device_1 }}"
fstype: xfs
state: mounted"
state: mounted

# Setup/install tasks.
- include_tasks: setup-RedHat.yml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
register: glusterfs_ppa_added
when: glusterfs_ppa_use

- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa no-handler
apt:
name: "{{ item }}"
state: absent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
- name: "Delete bootstrap Heketi."
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
- name: "Ensure there is nothing left over." # noqa 301
- name: "Ensure there is nothing left over."
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
- name: "Copy topology configuration into container."
changed_when: false
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology." # noqa no-handler
when: "render.changed"
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
register: "load_heketi"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
- name: "Provision database volume."
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
when: "heketi_database_volume_exists is undefined"
- name: "Copy configuration from pod." # noqa 301
- name: "Copy configuration from pod."
become: true
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
- name: "Get heketi volume ids."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
src: "topology.json.j2"
dest: "{{ kube_config_dir }}/topology.json"
mode: 0644
- name: "Copy topology configuration into container." # noqa 503
- name: "Copy topology configuration into container." # noqa no-handler
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
- name: "Load heketi topology." # noqa 503
- name: "Load heketi topology." # noqa no-handler
when: "rendering.changed"
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
- name: "Get heketi topology."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
ignore_errors: true # noqa ignore-errors
changed_when: false

- name: "Remove volume groups." # noqa 301
- name: "Remove volume groups."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
command: "vgremove {{ volume_group }} --yes"
with_items: "{{ volume_groups.stdout_lines }}"
loop_control: { loop_var: "volume_group" }

- name: "Remove physical volume from cluster disks." # noqa 301
- name: "Remove physical volume from cluster disks."
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
become: true
Expand Down
22 changes: 11 additions & 11 deletions contrib/network-storage/heketi/roles/tear-down/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,43 +1,43 @@
---
- name: Remove storage class. # noqa 301
- name: Remove storage class.
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
ignore_errors: true # noqa ignore-errors
- name: Tear down heketi. # noqa 301
- name: Tear down heketi.
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
ignore_errors: true # noqa ignore-errors
- name: Tear down heketi. # noqa 301
- name: Tear down heketi.
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
ignore_errors: true # noqa ignore-errors
- name: Tear down bootstrap.
include_tasks: "../../provision/tasks/bootstrap/tear-down.yml"
- name: Ensure there is nothing left over. # noqa 301
- name: Ensure there is nothing left over.
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: Ensure there is nothing left over. # noqa 301
- name: Ensure there is nothing left over.
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
register: "heketi_result"
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
retries: 60
delay: 5
- name: Tear down glusterfs. # noqa 301
- name: Tear down glusterfs.
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi storage service. # noqa 301
- name: Remove heketi storage service.
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi gluster role binding # noqa 301
- name: Remove heketi gluster role binding
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi config secret # noqa 301
- name: Remove heketi config secret
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi db backup # noqa 301
- name: Remove heketi db backup
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
ignore_errors: true # noqa ignore-errors
- name: Remove heketi service account # noqa 301
- name: Remove heketi service account
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
ignore_errors: true # noqa ignore-errors
- name: Get secrets
Expand Down
4 changes: 2 additions & 2 deletions extra_playbooks/migrate_openstack_provider.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@
src: get_cinder_pvs.sh
dest: /tmp
mode: u+rwx
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
- name: Get PVs provisioned by in-tree cloud provider
command: /tmp/get_cinder_pvs.sh
register: pvs
- name: Remove get_cinder_pvs.sh
file:
path: /tmp/get_cinder_pvs.sh
state: absent
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
loop: "{{ pvs.stdout_lines | list }}"
1 change: 0 additions & 1 deletion roles/container-engine/containerd/tasks/reset.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
name: containerd
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_containerd
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/cri-o/tasks/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@
register: service_start

- name: cri-o | trigger service restart only when needed
service: # noqa 503
service:
name: crio
state: restarted
when:
Expand Down
1 change: 0 additions & 1 deletion roles/container-engine/cri-o/tasks/reset.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@
name: crio
daemon_reload: true
enabled: false
masked: true
state: stopped
tags:
- reset_crio
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@
state: started
when: docker_task_result is not changed
rescue:
- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
file:
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/reset.yml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,6 @@
- /etc/docker
ignore_errors: true # noqa ignore-errors

- name: Docker | systemctl daemon-reload # noqa 503
- name: Docker | systemctl daemon-reload # noqa no-handler
systemd:
daemon_reload: true
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/set_facts_dns.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
check_mode: no

- name: check system search domains
# noqa 306 - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# noqa risky-shell-pipe - if resolf.conf has no search domain, grep will exit 1 which would force us to add failed_when: false
# Therefore -o pipefail is not applicable in this specific instance
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
args:
Expand Down
2 changes: 1 addition & 1 deletion roles/container-engine/docker/tasks/systemd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
when: http_proxy is defined or https_proxy is defined

- name: get systemd version
# noqa 303 - systemctl is called intentionally here
# noqa command-instead-of-module - systemctl is called intentionally here
shell: set -o pipefail && systemctl --version | head -n 1 | cut -d " " -f 2
args:
executable: /bin/bash
Expand Down
2 changes: 1 addition & 1 deletion roles/download/tasks/check_pull_required.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
# The image_info_command depends on the Container Runtime and will output something like the following:
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305 image_info_command contains a pipe, therefore requiring shell
- name: check_pull_required | Generate a list of information about the images on a node # noqa command-instead-of-shell - image_info_command contains a pipe, therefore requiring shell
shell: "{{ image_info_command }}"
register: docker_images
changed_when: false
Expand Down
6 changes: 3 additions & 3 deletions roles/download/tasks/download_container.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
when:
- not download_always_pull

- debug: # noqa unnamed-task
- debug: # noqa name[missing]
msg: "Pull {{ image_reponame }} required is: {{ pull_required }}"

- name: download_container | Determine if image is in cache
Expand Down Expand Up @@ -68,7 +68,7 @@
- not image_is_cached

- name: download_container | Save and compress image
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa 305 image_save_command_on_localhost contains a pipe, therefore requires shell
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}" # noqa command-instead-of-shell - image_save_command_on_localhost contains a pipe, therefore requires shell
delegate_to: "{{ download_delegate }}"
delegate_facts: no
register: container_save_status
Expand Down Expand Up @@ -108,7 +108,7 @@
- download_force_cache

- name: download_container | Load image into the local container registry
shell: "{{ image_load_command }}" # noqa 305 image_load_command uses pipes, therefore requires shell
shell: "{{ image_load_command }}" # noqa command-instead-of-shell - image_load_command uses pipes, therefore requires shell
register: container_load_status
failed_when: container_load_status is failed
when:
Expand Down
4 changes: 2 additions & 2 deletions roles/download/tasks/prep_download.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
- asserts

- name: prep_download | On localhost, check if user has access to the container runtime without using sudo
shell: "{{ image_info_command_on_localhost }}" # noqa 305 image_info_command_on_localhost contains pipe, therefore requires shell
shell: "{{ image_info_command_on_localhost }}" # noqa command-instead-of-shell - image_info_command_on_localhost contains pipe, therefore requires shell
delegate_to: localhost
connection: local
run_once: true
Expand Down Expand Up @@ -57,7 +57,7 @@
- asserts

- name: prep_download | Register docker images info
shell: "{{ image_info_command }}" # noqa 305 image_info_command contains pipe therefore requires shell
shell: "{{ image_info_command }}" # noqa command-instead-of-shell - image_info_command contains pipe therefore requires shell
no_log: "{{ not (unsafe_show_logs|bool) }}"
register: docker_images
failed_when: false
Expand Down
2 changes: 1 addition & 1 deletion roles/etcd/tasks/join_etcd-events_member.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
- name: Join Member | Add member to etcd-events cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0
Expand Down
2 changes: 1 addition & 1 deletion roles/etcd/tasks/join_etcd_member.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
- name: Join Member | Add member to etcd cluster # noqa 301 305
- name: Join Member | Add member to etcd cluster
command: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
register: member_add_result
until: member_add_result.rc == 0 or 'Peer URLs already exists' in member_add_result.stderr
Expand Down
6 changes: 3 additions & 3 deletions roles/etcd/tasks/upd_ca_trust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@
mode: 0640
register: etcd_ca_cert

- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa 503
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Flatcar) # noqa no-handler
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]

- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
- name: Gen_certs | update ca-certificates (RedHat) # noqa no-handler
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"

- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa no-handler
command: clrtrust add "{{ ca_cert_path }}"
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
4 changes: 2 additions & 2 deletions roles/helm-apps/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Add Helm repositories
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}"
kubernetes.core.helm_repository: "{{ helm_repository_defaults | combine(item) }}" # noqa args[module]
loop: "{{ repositories }}"

- name: Update Helm repositories
Expand All @@ -15,5 +15,5 @@
- helm_update

- name: Install Helm Applications
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}"
kubernetes.core.helm: "{{ helm_defaults | combine(release_common_opts, item) }}" # noqa args[module]
loop: "{{ releases }}"
Loading

0 comments on commit 7209904

Please sign in to comment.