Skip to content
This repository has been archived by the owner on Feb 29, 2024. It is now read-only.

Commit

Permalink
Normalise all pacemaker resource upgrade tasks for staged upgrades
Browse files Browse the repository at this point in the history
To follow on from I2e88dc34fa59624523de4c52a1873438c78e972f we now
normalise all the resource upgrade tasks to improve idempotency and
speed up the process of verifying and upgrading images as necessary.

In doing so we clean up a few things as well:

1. There were some unnecessary blocks present without conditions
   or any shared properties for the tasks.
2. Use 'failed_when: false' rather than 'ignore_errors: true'
   because ignoring errors shows a failed task which is confusing
   to users.
3. Some tasks had an empty conditional.

Change-Id: I8b5b25d03b86b2c44b2d47e5a0624e7dd13873da
Related-Bug: #1838971
Closes: rhbz#1758578
  • Loading branch information
odyssey4me committed Oct 16, 2019
1 parent 7ee240c commit dfd8b73
Show file tree
Hide file tree
Showing 7 changed files with 280 additions and 224 deletions.
109 changes: 61 additions & 48 deletions deployment/cinder/cinder-backup-pacemaker-puppet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -227,65 +227,76 @@ outputs:
container_image: {get_param: ContainerCinderBackupImage}
container_image_latest: *cinder_backup_image_pcmklatest
update_tasks:
- name: Cinder-Backup fetch and retag container image for pacemaker
- name: cinder_backup fetch and retag container image for pacemaker
when: step|int == 2
block: &cinder_backup_fetch_retag_container_tasks
- name: Get docker Cinder-Backup image
- name: Get container cinder_backup image
set_fact:
docker_image: {get_param: ContainerCinderBackupImage}
docker_image_latest: *cinder_backup_image_pcmklatest
- name: Get previous Cinder-Backup image id
shell: "{{container_cli}} images | awk '/cinder-backup.* pcmklatest/{print $3}' | uniq"
register: cinder_backup_image_id
cinder_backup_image: {get_param: ContainerCinderBackupImage}
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
- name: Pull latest cinder_backup images
command: "{{container_cli}} pull {{cinder_backup_image}}"
- name: Get previous cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image_latest}}"
register: old_cinder_backup_image_id
failed_when: false
- name: Get new cinder_backup image id
shell: "{{container_cli}} inspect --format '{{'{{'}}.Id{{'}}'}}' {{cinder_backup_image}}"
register: new_cinder_backup_image_id
- name: Retag pcmklatest to latest cinder_backup image
include_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_backup_image}}"
container_image_latest: "{{cinder_backup_image_latest}}"
when:
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout
- block:
- name: Get a list of container using Cinder-Backup image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{cinder_backup_image_id.stdout}}'"
- name: Get a list of containers using cinder_backup image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_cinder_backup_image_id.stdout}}'"
register: cinder_backup_containers_to_destroy
# It will be recreated with the deploy step.
- name: Remove any container using the same Cinder-Backup image
- name: Remove any containers using the same cinder_backup image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ cinder_backup_containers_to_destroy.stdout_lines }}"
- name: Remove previous Cinder-Backup images
shell: "{{container_cli}} rmi -f {{cinder_backup_image_id.stdout}}"
- name: Remove previous cinder_backup images
shell: "{{container_cli}} rmi -f {{old_cinder_backup_image_id.stdout}}"
when:
- cinder_backup_image_id.stdout != ''
- name: Pull latest Cinder-Backup images
command: "{{container_cli}} pull {{docker_image}}"
- name: Retag pcmklatest to latest Cinder-Backup image
import_role:
name: tripleo-container-tag
vars:
container_image: "{{docker_image}}"
container_image_latest: "{{docker_image_latest}}"
# Got to check that pacemaker_is_active is working fine with bundle.
# TODO: pacemaker_is_active resource doesn't support bundle.
- old_cinder_backup_image_id.stdout != ''
- old_cinder_backup_image_id.stdout != new_cinder_backup_image_id.stdout

upgrade_tasks:
- when: step|int == 0
tags: common
- name: Prepare switch of cinder_backup image name
when:
- step|int == 0
block:
- name: Get docker Cinder-Backup image
- name: Get cinder_backup image id currently used by pacemaker
shell: "pcs resource config openstack-cinder-backup | grep -Eo 'image=[^ ]+' | awk -F= '{print $2;}'"
register: cinder_backup_image_current_res
failed_when: false
- name: cinder_backup image facts
set_fact:
cinder_backup_docker_image_latest: *cinder_backup_image_pcmklatest
- name: Prepare the switch to new cinder_backup container image name in pacemaker
when: cinder_backup_containerized|bool
block:
- name: Get cinder_backup image id currently used by pacemaker
shell: "{{container_cli}} images | awk '/cinder-backup.* pcmklatest/{print $3}' | uniq"
register: cinder_backup_current_pcmklatest_id
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
when: cinder_backup_current_pcmklatest_id.stdout != ''
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_backup_current_pcmklatest_id.stdout}}"
container_image_latest: "{{cinder_backup_docker_image_latest}}"
pull_image: false
cinder_backup_image_latest: *cinder_backup_image_pcmklatest
cinder_backup_image_current: "{{cinder_backup_image_current_res.stdout}}"
- name: Temporarily tag the current cinder_backup image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_backup_current_pcmklatest_id.stdout}}"
container_image_latest: "{{cinder_backup_docker_image_latest}}"
pull_image: false
when:
- cinder_backup_image_current != ''
- cinder_backup_image_current != cinder_backup_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check openstack-cinder-backup cluster resource status
pacemaker_resource:
resource: openstack-cinder-backup
state: show
check_mode: false
ignore_errors: true
shell: pcs resource config openstack-cinder-backup
failed_when: false
changed_when: false
register: cinder_backup_pcs_res_result
- name: Set fact cinder_backup_pcs_res
set_fact:
Expand All @@ -298,6 +309,7 @@ outputs:
- step|int == 1
- is_cinder_backup_bootstrap_node
- cinder_backup_pcs_res|bool
- cinder_backup_image_current != cinder_backup_image_latest
block:
- name: Disable the cinder_backup cluster resource before container upgrade
pacemaker_resource:
Expand All @@ -308,7 +320,7 @@ outputs:
retries: 5
until: output.rc == 0
- name: Update the cinder_backup bundle to use the new container image name
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_docker_image_latest}}"
command: "pcs resource bundle update openstack-cinder-backup container image={{cinder_backup_image_latest}}"
- name: Enable the cinder_backup cluster resource
pacemaker_resource:
resource: openstack-cinder-backup
Expand All @@ -321,6 +333,7 @@ outputs:
when:
- step|int == 3
block: *cinder_backup_fetch_retag_container_tasks

fast_forward_upgrade_tasks:
- when:
- step|int == 0
Expand All @@ -332,7 +345,7 @@ outputs:
resource: openstack-cinder-backup
state: show
check_mode: false
ignore_errors: true
failed_when: false
register: cinder_backup_res_result
- name: Set fact cinder_backup_res
set_fact:
Expand Down
33 changes: 18 additions & 15 deletions deployment/cinder/cinder-volume-pacemaker-puppet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -238,11 +238,11 @@ outputs:
when:
- old_cinder_volume_image_id.stdout != new_cinder_volume_image_id.stdout
- block:
- name: Get a list of container using cinder_volume image
- name: Get a list of containers using cinder_volume image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_cinder_volume_image_id.stdout}}'"
register: cinder_volume_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same cinder_volume image
- name: Remove any containers using the same cinder_volume image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ cinder_volume_containers_to_destroy.stdout_lines }}"
- name: Remove previous cinder_volume images
Expand All @@ -264,20 +264,24 @@ outputs:
set_fact:
cinder_volume_image_latest: *cinder_volume_image_pcmklatest
cinder_volume_image_current: "{{cinder_volume_image_current_res.stdout}}"
- name: Prepare the switch to new cinder_volume container image name in pacemaker
block:
- name: Temporarily tag the current cinder_volume image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_volume_image_current}}"
container_image_latest: "{{cinder_volume_image_latest}}"
pull_image: false
when:
- cinder_volume_image_current != ''
- cinder_volume_image_current != cinder_volume_image_latest
- name: Temporarily tag the current cinder_volume image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{cinder_volume_image_current}}"
container_image_latest: "{{cinder_volume_image_latest}}"
pull_image: false
when:
- cinder_volume_image_current != ''
- cinder_volume_image_current != cinder_volume_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check openstack-cinder-volume cluster resource status
shell: pcs resource config openstack-cinder-volume
changed_when: false
failed_when: false
register: cinder_volume_pcs_res_result
- name: Set fact cinder_volume_pcs_res
Expand All @@ -304,7 +308,6 @@ outputs:
- name: pcs resource bundle update cinder_volume for new container image name
command: "pcs resource bundle update openstack-cinder-volume container image={{cinder_volume_image_latest}}"
- name: Enable the cinder_volume cluster resource
when:
pacemaker_resource:
resource: openstack-cinder-volume
state: enable
Expand Down
28 changes: 16 additions & 12 deletions deployment/database/mysql-pacemaker-puppet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -418,21 +418,25 @@ outputs:
set_fact:
galera_image_latest: *mysql_image_pcmklatest
galera_image_current: "{{galera_image_current_res.stdout}}"
- name: Prepare the switch to new galera container image name in pacemaker
block:
- name: Temporarily tag the current galera image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{galera_image_current}}"
container_image_latest: "{{galera_image_latest}}"
pull_image: false
when:
- galera_image_current != ''
- galera_image_current != galera_image_latest
- name: Temporarily tag the current galera image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{galera_image_current}}"
container_image_latest: "{{galera_image_latest}}"
pull_image: false
when:
- galera_image_current != ''
- galera_image_current != galera_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check galera cluster resource status
shell: pcs resource config galera-bundle
failed_when: false
changed_when: false
register: galera_pcs_res_result
- name: Set fact galera_pcs_res
set_fact:
Expand Down
32 changes: 18 additions & 14 deletions deployment/database/redis-pacemaker-puppet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -330,11 +330,11 @@ outputs:
when:
- old_redis_image_id.stdout != new_redis_image_id.stdout
- block:
- name: Get a list of container using redis image
- name: Get a list of containers using redis image
shell: "{{container_cli}} ps -a -q -f 'ancestor={{old_redis_image_id.stdout}}'"
register: redis_containers_to_destroy
# It will be recreated with the delpoy step.
- name: Remove any container using the same redis image
- name: Remove any containers using the same redis image
shell: "{{container_cli}} rm -fv {{item}}"
with_items: "{{ redis_containers_to_destroy.stdout_lines }}"
- name: Remove previous redis images
Expand All @@ -356,21 +356,25 @@ outputs:
set_fact:
redis_image_latest: *redis_image_pcmklatest
redis_image_current: "{{redis_image_current_res.stdout}}"
- name: Prepare the switch to new redis container image name in pacemaker
block:
- name: Temporarily tag the current redis image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{redis_image_current}}"
container_image_latest: "{{redis_image_latest}}"
pull_image: false
when:
- redis_image_current != ''
- redis_image_current != redis_image_latest
- name: Temporarily tag the current redis image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{redis_image_current}}"
container_image_latest: "{{redis_image_latest}}"
pull_image: false
when:
- redis_image_current != ''
- redis_image_current != redis_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check redis cluster resource status
shell: pcs resource config redis-bundle
failed_when: false
changed_when: false
register: redis_pcs_res_result
- name: Set upgrade redis facts
set_fact:
Expand Down
31 changes: 18 additions & 13 deletions deployment/haproxy/haproxy-pacemaker-puppet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,8 @@ outputs:
until: output.rc == 0
when: haproxy_cert_mounted.rc == 6
- name: Haproxy fetch and retag container image for pacemaker
when: step|int == 2
when:
- step|int == 2
block: &haproxy_fetch_retag_container_tasks
- name: Get container haproxy image
set_fact:
Expand Down Expand Up @@ -452,21 +453,25 @@ outputs:
set_fact:
haproxy_image_latest: *haproxy_image_pcmklatest
haproxy_image_current: "{{haproxy_image_current_res.stdout}}"
- name: Prepare the switch to new haproxy container image name in pacemaker
block:
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
- name: Temporarily tag the current haproxy image id with the upgraded image name
import_role:
name: tripleo-container-tag
vars:
container_image: "{{haproxy_image_current}}"
container_image_latest: "{{haproxy_image_latest}}"
pull_image: false
when:
- haproxy_image_current != ''
- haproxy_image_current != haproxy_image_latest
# During an OS Upgrade, the cluster may not exist so we use
# the shell module instead.
# TODO(odyssey4me):
# Fix the pacemaker_resource module to handle the exception
# for a non-existant cluster more gracefully.
- name: Check haproxy cluster resource status
shell: pcs resource config haproxy-bundle
failed_when: false
changed_when: false
register: haproxy_pcs_res_result
- name: Set upgrade haproxy facts
set_fact:
Expand Down
Loading

0 comments on commit dfd8b73

Please sign in to comment.