From 0ec759f96c98c50b84cb3e66619e2c868f641040 Mon Sep 17 00:00:00 2001 From: Pavel Bar Date: Sun, 26 Jun 2022 21:36:53 +0300 Subject: [PATCH] Fix violations reported by "yamllint" (wrong indentation). Fixing 141 indentation violations similar to: yaml: wrong indentation: expected 8 but found 10 (yaml[indentation]) Signed-off-by: Pavel Bar Bug-Url: https://bugzilla.redhat.com/2097332 --- .../examples/dr_ovirt_setup.yml | 2 +- roles/disaster_recovery/examples/dr_play.yml | 2 +- .../tasks/clean/remove_disks.yml | 10 +- .../tasks/clean/remove_domain.yml | 18 +- .../tasks/clean/remove_domain_process.yml | 8 +- ...remove_invalid_filtered_master_domains.yml | 14 +- .../remove_valid_filtered_master_domains.yml | 26 +-- .../tasks/clean/remove_vms.yml | 10 +- .../tasks/clean/shutdown_vm.yml | 14 +- .../tasks/clean/shutdown_vms.yml | 16 +- .../tasks/clean/update_ovf_store.yml | 14 +- .../disaster_recovery/tasks/clean_engine.yml | 64 +++--- .../tasks/generate_mapping.yml | 2 +- roles/disaster_recovery/tasks/main.yml | 18 +- .../tasks/recover/add_domain.yml | 74 +++---- .../tasks/recover/add_fcp_domain.yml | 28 +-- .../tasks/recover/add_glusterfs_domain.yml | 28 +-- .../tasks/recover/add_iscsi_domain.yml | 60 +++--- .../tasks/recover/add_nfs_domain.yml | 24 +-- .../tasks/recover/add_posixfs_domain.yml | 30 +-- .../tasks/recover/print_info.yml | 4 +- .../tasks/recover/register_template.yml | 22 +- .../tasks/recover/register_templates.yml | 14 +- .../tasks/recover/register_vm.yml | 26 +-- .../tasks/recover/register_vms.yml | 16 +- .../tasks/recover/run_vms.yml | 12 +- .../tasks/recover_engine.yml | 192 +++++++++--------- .../tasks/run_unregistered_entities.yml | 22 +- .../tasks/unregister_entities.yml | 36 ++-- 29 files changed, 403 insertions(+), 403 deletions(-) diff --git a/roles/disaster_recovery/examples/dr_ovirt_setup.yml b/roles/disaster_recovery/examples/dr_ovirt_setup.yml index 9143d3e0d..102a63baf 100644 --- a/roles/disaster_recovery/examples/dr_ovirt_setup.yml +++ b/roles/disaster_recovery/examples/dr_ovirt_setup.yml @@ -8,4 +8,4 @@ roles: - disaster_recovery collections: - - @NAMESPACE@.@NAME@ + - @NAMESPACE@.@NAME@ diff --git a/roles/disaster_recovery/examples/dr_play.yml b/roles/disaster_recovery/examples/dr_play.yml index 2847d93bc..68606295c 100644 --- a/roles/disaster_recovery/examples/dr_play.yml +++ b/roles/disaster_recovery/examples/dr_play.yml @@ -5,4 +5,4 @@ roles: - disaster_recovery collections: - - @NAMESPACE@.@NAME@ + - @NAMESPACE@.@NAME@ diff --git a/roles/disaster_recovery/tasks/clean/remove_disks.yml b/roles/disaster_recovery/tasks/clean/remove_disks.yml index e6ee938a3..472811c46 100644 --- a/roles/disaster_recovery/tasks/clean/remove_disks.yml +++ b/roles/disaster_recovery/tasks/clean/remove_disks.yml @@ -1,10 +1,10 @@ - block: - name: Remove disk ovirt_disk: - state: absent - id: "{{ disk.id }}" - auth: "{{ ovirt_auth }}" + state: absent + id: "{{ disk.id }}" + auth: "{{ ovirt_auth }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/remove_domain.yml b/roles/disaster_recovery/tasks/clean/remove_domain.yml index b18460e16..815204a7c 100644 --- a/roles/disaster_recovery/tasks/clean/remove_domain.yml +++ b/roles/disaster_recovery/tasks/clean/remove_domain.yml @@ -3,18 +3,18 @@ # We should wait for some time and try again - name: Remove storage domain ovirt_storage_domain: - state: absent - id: "{{ sd.id }}" - name: "{{ sd.name }}" - auth: "{{ ovirt_auth }}" - host: "{{ host }}" - destroy: "{{ dr_force }}" - data_center: "{{ sp_uuid }}" + state: absent + id: "{{ sd.id }}" + name: "{{ sd.name }}" + auth: "{{ ovirt_auth }}" + host: "{{ host }}" + destroy: "{{ dr_force }}" + data_center: "{{ sp_uuid }}" register: result until: dr_force or result is not failed retries: "{{ dr_cleanup_retries_maintenance }}" delay: "{{ dr_cleanup_delay_maintenance }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/remove_domain_process.yml b/roles/disaster_recovery/tasks/clean/remove_domain_process.yml index b153bb366..7df068b34 100644 --- a/roles/disaster_recovery/tasks/clean/remove_domain_process.yml +++ b/roles/disaster_recovery/tasks/clean/remove_domain_process.yml @@ -35,15 +35,15 @@ - name: Remove storage domain with no force include_tasks: remove_domain.yml vars: - host: "{{ host_info.ovirt_hosts[0].id }}" + host: "{{ host_info.ovirt_hosts[0].id }}" when: "host_info.ovirt_hosts is defined and host_info.ovirt_hosts|length > 0 and not dr_force" - name: Force remove storage domain include_tasks: remove_domain.yml vars: - host: "00000000-0000-0000-0000-000000000000" + host: "00000000-0000-0000-0000-000000000000" when: "dr_force" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml b/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml index 2f00c835a..5d37fe74b 100644 --- a/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml +++ b/roles/disaster_recovery/tasks/clean/remove_invalid_filtered_master_domains.yml @@ -1,20 +1,20 @@ - block: - name: Fetch invalid storage domain for remove ovirt_storage_domain_info: - pattern: name={{ storage['dr_' + dr_source_map + '_name'] }} and {{ dr_inactive_domain_search }} - auth: "{{ ovirt_auth }}" + pattern: name={{ storage['dr_' + dr_source_map + '_name'] }} and {{ dr_inactive_domain_search }} + auth: "{{ ovirt_auth }}" register: storage_domain_info - name: Remove invalid storage domain include_tasks: remove_domain_process.yml vars: - sd: "{{ sd }}" + sd: "{{ sd }}" with_items: - - "{{ storage_domain_info.ovirt_storage_domains }}" + - "{{ storage_domain_info.ovirt_storage_domains }}" when: (not only_master and not sd.master) or (only_master and sd.master) loop_control: - loop_var: sd + loop_var: sd ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml b/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml index b5034f2cd..79a260231 100644 --- a/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml +++ b/roles/disaster_recovery/tasks/clean/remove_valid_filtered_master_domains.yml @@ -1,26 +1,26 @@ - block: - name: Fetch active/maintenance/detached storage domain for remove ovirt_storage_domain_info: - pattern: > - name={{ storage['dr_' + dr_source_map + '_name'] }} and - ( - datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_active_domain_search }} or - datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_maintenance_domain_search }} or - {{ dr_unattached_domain_search }} - ) - auth: "{{ ovirt_auth }}" + pattern: > + name={{ storage['dr_' + dr_source_map + '_name'] }} and + ( + datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_active_domain_search }} or + datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} and {{ dr_maintenance_domain_search }} or + {{ dr_unattached_domain_search }} + ) + auth: "{{ ovirt_auth }}" register: storage_domain_info - name: Remove valid storage domain include_tasks: remove_domain_process.yml vars: - sd: "{{ sd }}" + sd: "{{ sd }}" with_items: - - "{{ storage_domain_info.ovirt_storage_domains }}" + - "{{ storage_domain_info.ovirt_storage_domains }}" when: (not only_master and not sd.master) or (only_master and sd.master) loop_control: - loop_var: sd + loop_var: sd ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/remove_vms.yml b/roles/disaster_recovery/tasks/clean/remove_vms.yml index 4b8edcc72..239cc93f8 100644 --- a/roles/disaster_recovery/tasks/clean/remove_vms.yml +++ b/roles/disaster_recovery/tasks/clean/remove_vms.yml @@ -1,10 +1,10 @@ - block: - name: Remove diskless VMs ovirt_vm: - state: absent - name: "{{ vm.name }}" - auth: "{{ ovirt_auth }}" + state: absent + name: "{{ vm.name }}" + auth: "{{ ovirt_auth }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/shutdown_vm.yml b/roles/disaster_recovery/tasks/clean/shutdown_vm.yml index b076776cd..d6c5ab396 100644 --- a/roles/disaster_recovery/tasks/clean/shutdown_vm.yml +++ b/roles/disaster_recovery/tasks/clean/shutdown_vm.yml @@ -1,12 +1,12 @@ - block: - name: Shutdown VM ovirt_vm: - state: stopped - name: "{{ vms.name }}" - force: true - wait: true - auth: "{{ ovirt_auth }}" + state: stopped + name: "{{ vms.name }}" + force: true + wait: true + auth: "{{ ovirt_auth }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/shutdown_vms.yml b/roles/disaster_recovery/tasks/clean/shutdown_vms.yml index d3a49aa3e..b0e0ac894 100644 --- a/roles/disaster_recovery/tasks/clean/shutdown_vms.yml +++ b/roles/disaster_recovery/tasks/clean/shutdown_vms.yml @@ -2,20 +2,20 @@ # Get all the running VMs related to a storage domain and shut them down - name: Fetch VMs in the storage domain ovirt_vm_info: - pattern: > - status != down and - storage.name={{ storage['dr_' + dr_source_map + '_name'] }} and - datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} - auth: "{{ ovirt_auth }}" + pattern: > + status != down and + storage.name={{ storage['dr_' + dr_source_map + '_name'] }} and + datacenter={{ storage['dr_' + dr_source_map + '_dc_name'] }} + auth: "{{ ovirt_auth }}" register: vm_info # TODO: Add a wait until the VM is really down - name: Shutdown VMs include_tasks: shutdown_vm.yml vars: - vms: "{{ item }}" + vms: "{{ item }}" with_items: "{{ vm_info.ovirt_vms }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean/update_ovf_store.yml b/roles/disaster_recovery/tasks/clean/update_ovf_store.yml index 7c904aff7..897469b49 100644 --- a/roles/disaster_recovery/tasks/clean/update_ovf_store.yml +++ b/roles/disaster_recovery/tasks/clean/update_ovf_store.yml @@ -1,18 +1,18 @@ - block: - name: Fetch storage domain only if active ovirt_storage_domain_info: - pattern: status = active and storage.name={{ storage['dr_' + dr_source_map + '_name'] }} - auth: "{{ ovirt_auth }}" + pattern: status = active and storage.name={{ storage['dr_' + dr_source_map + '_name'] }} + auth: "{{ ovirt_auth }}" register: storage_domain_info - name: Update OVF store for active storage domain ovirt_storage_domain: - state: update_ovf_store - name: "{{ iscsi_storage['dr_' + dr_source_map + '_name'] }}" - auth: "{{ ovirt_auth }}" + state: update_ovf_store + name: "{{ iscsi_storage['dr_' + dr_source_map + '_name'] }}" + auth: "{{ ovirt_auth }}" with_items: - "{{ storage_domain_info.ovirt_storage_domains }}" ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine diff --git a/roles/disaster_recovery/tasks/clean_engine.yml b/roles/disaster_recovery/tasks/clean_engine.yml index c7f70e6b4..0da1d3943 100644 --- a/roles/disaster_recovery/tasks/clean_engine.yml +++ b/roles/disaster_recovery/tasks/clean_engine.yml @@ -1,24 +1,24 @@ - block: - name: Obtain SSO token ovirt_auth: - url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}" - username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}" - password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}" - ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}" + url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}" + username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}" + password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}" + ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}" - name: Shutdown running VMs include_tasks: clean/shutdown_vms.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage - name: Update OVF_STORE disk for storage domains include_tasks: clean/update_ovf_store.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage - name: Set force remove flag to false for non master domains set_fact: dr_force=False @@ -27,10 +27,10 @@ # Note: Export storage domain is not supported and should not be part of storage mapping - name: Setup queries for storage domains set_fact: - dr_active_domain_search='status = active and type != cinder' - dr_maintenance_domain_search='status = maintenance and type != cinder' - dr_unattached_domain_search='status = unattached and type != cinder and type != glance' - dr_inactive_domain_search='type != glance and type != cinder and status != active' + dr_active_domain_search='status = active and type != cinder' + dr_maintenance_domain_search='status = maintenance and type != cinder' + dr_unattached_domain_search='status = unattached and type != cinder and type != glance' + dr_inactive_domain_search='type != glance and type != cinder and status != active' - name: Set master storage domain filter set_fact: only_master=False @@ -38,9 +38,9 @@ - name: Remove non master storage domains with valid statuses include_tasks: clean/remove_valid_filtered_master_domains.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage # We use inactive filter only at the end, since we are not sure if there were any storage domains # which became inactive on the process or if there were any at the beginning. @@ -50,9 +50,9 @@ - name: Remove non master storage domains with invalid statuses using force remove include_tasks: clean/remove_invalid_filtered_master_domains.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage - name: Set master storage domain filter set_fact: only_master=True @@ -63,9 +63,9 @@ - name: Remove master storage domains with valid statuses include_tasks: clean/remove_valid_filtered_master_domains.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage - name: Set force remove flag to true for master domain set_fact: dr_force=True @@ -73,44 +73,44 @@ - name: Remove master storage domains with invalid statuses using force remove include_tasks: clean/remove_invalid_filtered_master_domains.yml with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" loop_control: - loop_var: storage + loop_var: storage - name: Fetch leftover storage domains ovirt_storage_domain_info: - pattern: type != glance - auth: "{{ ovirt_auth }}" + pattern: type != glance + auth: "{{ ovirt_auth }}" register: storage_domain_info # TODO: Document that behavior # Remove VMs only if there are no data storage domains left in the setup - name: Fetch leftover VMs in the setup ovirt_vm_info: - pattern: status = down - auth: "{{ ovirt_auth }}" + pattern: status = down + auth: "{{ ovirt_auth }}" register: vm_info when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0 - name: Remove vms if no storage domains left in setup include_tasks: clean/remove_vms.yml vars: - vm: "{{ item }}" + vm: "{{ item }}" with_items: "{{ vm_info.ovirt_vms }}" when: dr_clean_orphaned_vms and storage_domain_info.ovirt_storage_domains | length == 0 # Remove direct LUN disks - name: Fetch leftover direct LUN disks in the setup ovirt_disk_info: - pattern: disk_type = lun and number_of_vms =0 - auth: "{{ ovirt_auth }}" + pattern: disk_type = lun and number_of_vms =0 + auth: "{{ ovirt_auth }}" register: disk_info when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0 - name: Remove LUN disks if no storage domains left in setup include_tasks: clean/remove_disks.yml vars: - disk: "{{ item }}" + disk: "{{ item }}" with_items: "{{ disk_info.ovirt_disks }}" when: dr_clean_orphaned_disks and storage_domain_info.ovirt_storage_domains | length == 0 @@ -118,11 +118,11 @@ # Default value is set in role defaults ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back - - clean_engine + - fail_back + - clean_engine always: - name: Revoke the SSO token ovirt_auth: - state: absent - ovirt_auth: "{{ ovirt_auth }}" + state: absent + ovirt_auth: "{{ ovirt_auth }}" diff --git a/roles/disaster_recovery/tasks/generate_mapping.yml b/roles/disaster_recovery/tasks/generate_mapping.yml index 67be02505..adead20ce 100644 --- a/roles/disaster_recovery/tasks/generate_mapping.yml +++ b/roles/disaster_recovery/tasks/generate_mapping.yml @@ -3,4 +3,4 @@ command: python3 {{ role_path }}/files/generate_mapping.py -a "{{ site }}" -u "{{ username }}" -p "{{ password }}" -c "{{ ca }}" -f "{{ var_file }}" run_once: true tags: - - generate_mapping + - generate_mapping diff --git a/roles/disaster_recovery/tasks/main.yml b/roles/disaster_recovery/tasks/main.yml index ccd036382..a95f0321c 100644 --- a/roles/disaster_recovery/tasks/main.yml +++ b/roles/disaster_recovery/tasks/main.yml @@ -2,32 +2,32 @@ - name: Start to unregister entities include_tasks: unregister_entities.yml tags: - - fail_back + - fail_back - name: Clean engine setup include_tasks: clean_engine.yml tags: - - fail_back - - clean_engine + - fail_back + - clean_engine - name: Failback Replication Sync pause pause: - prompt: "[Failback Replication Sync] Please press ENTER once the destination storage domains are ready to be used for the destination setup" + prompt: "[Failback Replication Sync] Please press ENTER once the destination storage domains are ready to be used for the destination setup" tags: - - fail_back + - fail_back - name: Recover target engine include_tasks: recover_engine.yml tags: - - fail_over - - fail_back + - fail_over + - fail_back - name: Run the appropriate unregistered entities include_tasks: run_unregistered_entities.yml tags: - - fail_back + - fail_back - name: Genereate mapping var file include_tasks: generate_mapping.yml tags: - - generate_mapping + - generate_mapping diff --git a/roles/disaster_recovery/tasks/recover/add_domain.yml b/roles/disaster_recovery/tasks/recover/add_domain.yml index 31c2449ff..d10ca0bed 100644 --- a/roles/disaster_recovery/tasks/recover/add_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_domain.yml @@ -1,55 +1,55 @@ - block: - name: Fetch available hosts in data center ovirt_host_info: - pattern: "status=up and datacenter={{ storage['dr_' + dr_target_host + '_dc_name'] }}" - auth: "{{ ovirt_auth }}" + pattern: "status=up and datacenter={{ storage['dr_' + dr_target_host + '_dc_name'] }}" + auth: "{{ ovirt_auth }}" register: host_info - block: - - name: "Check for available hosts" - fail: msg="No hosts available" - when: host_info.ovirt_hosts.0 is undefined + - name: "Check for available hosts" + fail: msg="No hosts available" + when: host_info.ovirt_hosts.0 is undefined - block: - - name: Add storage domain if NFS - include_tasks: add_nfs_domain.yml - with_items: - - "{{ storage }}" - when: "storage.dr_domain_type == 'nfs'" - loop_control: + - name: Add storage domain if NFS + include_tasks: add_nfs_domain.yml + with_items: + - "{{ storage }}" + when: "storage.dr_domain_type == 'nfs'" + loop_control: loop_var: nfs_storage - - name: Add storage domain if Gluster - include_tasks: add_glusterfs_domain.yml - with_items: - - "{{ storage }}" - when: "storage.dr_domain_type == 'glusterfs'" - loop_control: + - name: Add storage domain if Gluster + include_tasks: add_glusterfs_domain.yml + with_items: + - "{{ storage }}" + when: "storage.dr_domain_type == 'glusterfs'" + loop_control: loop_var: gluster_storage - - name: Add storage domain if Posix - include_tasks: add_posixfs_domain.yml - with_items: - - "{{ storage }}" - when: "storage.dr_domain_type == 'posixfs'" - loop_control: + - name: Add storage domain if Posix + include_tasks: add_posixfs_domain.yml + with_items: + - "{{ storage }}" + when: "storage.dr_domain_type == 'posixfs'" + loop_control: loop_var: posix_storage - - name: Add storage domain is scsi - include_tasks: add_iscsi_domain.yml - with_items: - - "{{ storage }}" - when: "storage.dr_domain_type == 'iscsi'" - loop_control: + - name: Add storage domain is scsi + include_tasks: add_iscsi_domain.yml + with_items: + - "{{ storage }}" + when: "storage.dr_domain_type == 'iscsi'" + loop_control: loop_var: iscsi_storage - - name: Add storage domain if fcp - include_tasks: add_fcp_domain.yml - with_items: - - "{{ storage }}" - when: "storage.dr_domain_type == 'fcp'" - loop_control: + - name: Add storage domain if fcp + include_tasks: add_fcp_domain.yml + with_items: + - "{{ storage }}" + when: "storage.dr_domain_type == 'fcp'" + loop_control: loop_var: fcp_storage when: host_info.ovirt_hosts.0 is defined ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml b/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml index dc119204d..c58d52107 100644 --- a/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_fcp_domain.yml @@ -1,18 +1,18 @@ - block: - name: Import FCP storage domain ovirt_storage_domain: - state: imported - id: "{{ fcp_storage['dr_domain_id'] }}" - name: "{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}" - critical_space_action_blocker: "{{ fcp_storage['dr_critical_space_action_blocker'] }}" - warning_low_space: "{{ fcp_storage['dr_warning_low_space'] }}" - discard_after_delete: "{{ fcp_storage['dr_discard_after_delete'] }}" - wipe_after_delete: "{{ fcp_storage['dr_wipe_after_delete'] }}" - backup: "{{ fcp_storage['dr_backup'] }}" - host: "{{ host_info.ovirt_hosts[0].name }}" - auth: "{{ ovirt_auth }}" - data_center: "{{ fcp_storage['dr_' + dr_target_host + '_dc_name'] }}" - fcp: {} + state: imported + id: "{{ fcp_storage['dr_domain_id'] }}" + name: "{{ fcp_storage['dr_' + dr_target_host + '_name']|default('') }}" + critical_space_action_blocker: "{{ fcp_storage['dr_critical_space_action_blocker'] }}" + warning_low_space: "{{ fcp_storage['dr_warning_low_space'] }}" + discard_after_delete: "{{ fcp_storage['dr_discard_after_delete'] }}" + wipe_after_delete: "{{ fcp_storage['dr_wipe_after_delete'] }}" + backup: "{{ fcp_storage['dr_backup'] }}" + host: "{{ host_info.ovirt_hosts[0].name }}" + auth: "{{ ovirt_auth }}" + data_center: "{{ fcp_storage['dr_' + dr_target_host + '_dc_name'] }}" + fcp: {} register: result - name: Log append to succeed_storage_domains @@ -26,5 +26,5 @@ when: result is failed ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml b/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml index 6960348ba..bbe867af6 100644 --- a/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_glusterfs_domain.yml @@ -1,18 +1,18 @@ - block: - name: Add Gluster storage domain ovirt_storage_domain: - name: "{{ gluster_storage['dr_' + dr_target_host + '_name'] }}" - critical_space_action_blocker: "{{ gluster_storage['dr_critical_space_action_blocker'] }}" - domain_function: "{{ gluster_storage['dr_storage_domain_type'] }}" - warning_low_space: "{{ gluster_storage['dr_warning_low_space'] }}" - wipe_after_delete: "{{ gluster_storage['dr_wipe_after_delete'] }}" - backup: "{{ gluster_storage['dr_backup'] }}" - host: "{{ host_info.ovirt_hosts[0].name }}" - data_center: "{{ gluster_storage['dr_' + dr_target_host + '_dc_name'] }}" - auth: "{{ ovirt_auth }}" - glusterfs: - path: "{{ gluster_storage['dr_' + dr_target_host + '_path'] }}" - address: "{{ gluster_storage['dr_' + dr_target_host + '_address'] }}" + name: "{{ gluster_storage['dr_' + dr_target_host + '_name'] }}" + critical_space_action_blocker: "{{ gluster_storage['dr_critical_space_action_blocker'] }}" + domain_function: "{{ gluster_storage['dr_storage_domain_type'] }}" + warning_low_space: "{{ gluster_storage['dr_warning_low_space'] }}" + wipe_after_delete: "{{ gluster_storage['dr_wipe_after_delete'] }}" + backup: "{{ gluster_storage['dr_backup'] }}" + host: "{{ host_info.ovirt_hosts[0].name }}" + data_center: "{{ gluster_storage['dr_' + dr_target_host + '_dc_name'] }}" + auth: "{{ ovirt_auth }}" + glusterfs: + path: "{{ gluster_storage['dr_' + dr_target_host + '_path'] }}" + address: "{{ gluster_storage['dr_' + dr_target_host + '_address'] }}" register: result - name: Log append to succeed_storage_domains @@ -26,5 +26,5 @@ when: result is failed ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml b/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml index a0b3bd779..cab9d3ddb 100644 --- a/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_iscsi_domain.yml @@ -2,25 +2,25 @@ # TODO: Add support for connect to multiple targets with the same LUN. # Every connect should be done using a different ip - block: - - name: Login to iSCSI targets - ovirt_host: + - name: Login to iSCSI targets + ovirt_host: state: iscsilogin name: "{{ host_info.ovirt_hosts[0].name }}" auth: "{{ ovirt_auth }}" iscsi: - username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}" - password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}" - address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}" - target: "{{ dr_target }}" - # Make port to be optional - port: "{{ iscsi_storage['dr_' + dr_target_host + '_port']|default('3260'|int, true) }}" - with_items: - - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}" - loop_control: - loop_var: dr_target + username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}" + password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}" + address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}" + target: "{{ dr_target }}" + # Make port to be optional + port: "{{ iscsi_storage['dr_' + dr_target_host + '_port']|default('3260'|int, true) }}" + with_items: + - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}" + loop_control: + loop_var: dr_target - - name: Import iSCSI storage domain - ovirt_storage_domain: + - name: Import iSCSI storage domain + ovirt_storage_domain: state: imported id: "{{ iscsi_storage['dr_domain_id'] }}" name: "{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}" @@ -34,25 +34,25 @@ backup: "{{ iscsi_storage['dr_backup'] }}" # TODO: For import iSCSI there is no need for the iscsi parameters iscsi: - username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}" - password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}" - address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}" - # We use target since state imported in ovirt_storage_domain.py creates a storage domain - # which calls login, therfore we must have a target althout the targets were already connected before. - # Therefore passing the first target in the list as a transient target. - target: "{{ dr_target }}" - with_items: - - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}" - loop_control: - loop_var: dr_target - - name: Log append to succeed_storage_domains - set_fact: - succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]" + username: "{{ iscsi_storage['dr_' + dr_target_host + '_username']|default('') }}" + password: "{{ iscsi_storage['dr_' + dr_target_host + '_password']|default('') }}" + address: "{{ iscsi_storage['dr_' + dr_target_host + '_address'] }}" + # We use target since state imported in ovirt_storage_domain.py creates a storage domain + # which calls login, therefore we must have a target although the targets were already connected before. + # Therefore passing the first target in the list as a transient target. + target: "{{ dr_target }}" + with_items: + - "{{ iscsi_storage['dr_' + dr_target_host + '_target'] }}" + loop_control: + loop_var: dr_target + - name: Log append to succeed_storage_domains + set_fact: + succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]" rescue: - name: Log append to failed_storage_domains set_fact: failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ iscsi_storage['dr_' + dr_target_host + '_name']|default('') }}\" ]" ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml b/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml index 8c4fa2aca..7732c8036 100644 --- a/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_nfs_domain.yml @@ -1,6 +1,6 @@ - block: - - name: Add NFS storage domain - ovirt_storage_domain: + - name: Add NFS storage domain + ovirt_storage_domain: name: "{{ nfs_storage['dr_' + dr_target_host + '_name'] }}" domain_function: "{{ nfs_storage['dr_storage_domain_type'] }}" critical_space_action_blocker: "{{ nfs_storage['dr_critical_space_action_blocker'] }}" @@ -10,17 +10,17 @@ data_center: "{{ nfs_storage['dr_' + dr_target_host + '_dc_name'] }}" auth: "{{ ovirt_auth }}" nfs: - path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}" - address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}" - - name: Log append to successful storage domains - set_fact: - succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]" + path: "{{ nfs_storage['dr_' + dr_target_host + '_path'] }}" + address: "{{ nfs_storage['dr_' + dr_target_host + '_address'] }}" + - name: Log append to successful storage domains + set_fact: + succeed_storage_domains: "{{ succeed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]" rescue: - - name: Log append to failed storage domains - set_fact: - failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]" + - name: Log append to failed storage domains + set_fact: + failed_storage_domains: "{{ failed_storage_domains }} + [ \"{{ nfs_storage['dr_' + dr_target_host + '_name'] }}\" ]" ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml b/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml index e48c61ff5..5f6f69860 100644 --- a/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml +++ b/roles/disaster_recovery/tasks/recover/add_posixfs_domain.yml @@ -1,19 +1,19 @@ - block: - name: Add posix storage domain ovirt_storage_domain: - name: "{{ posix_storage['dr_' + dr_target_host + '_name'] }}" - critical_space_action_blocker: "{{ posix_storage['dr_critical_space_action_blocker'] }}" - domain_function: "{{ posix_storage['dr_storage_domain_type'] }}" - warning_low_space: "{{ posix_storage['dr_warning_low_space'] }}" - wipe_after_delete: "{{ posix_storage['dr_wipe_after_delete'] }}" - backup: "{{ posix_storage['dr_backup'] }}" - host: "{{ host_info.ovirt_hosts[0].name }}" - data_center: "{{ posix_storage['dr_' + dr_target_host + '_dc_name'] }}" - auth: "{{ ovirt_auth }}" - posixfs: - vfs_type: "{{ posix_storage['dr_' + dr_target_host + '_vfs_type'] }}" - path: "{{ posix_storage['dr_' + dr_target_host + '_path'] }}" - address: "{{ posix_storage['dr_' + dr_target_host + '_address'] }}" + name: "{{ posix_storage['dr_' + dr_target_host + '_name'] }}" + critical_space_action_blocker: "{{ posix_storage['dr_critical_space_action_blocker'] }}" + domain_function: "{{ posix_storage['dr_storage_domain_type'] }}" + warning_low_space: "{{ posix_storage['dr_warning_low_space'] }}" + wipe_after_delete: "{{ posix_storage['dr_wipe_after_delete'] }}" + backup: "{{ posix_storage['dr_backup'] }}" + host: "{{ host_info.ovirt_hosts[0].name }}" + data_center: "{{ posix_storage['dr_' + dr_target_host + '_dc_name'] }}" + auth: "{{ ovirt_auth }}" + posixfs: + vfs_type: "{{ posix_storage['dr_' + dr_target_host + '_vfs_type'] }}" + path: "{{ posix_storage['dr_' + dr_target_host + '_path'] }}" + address: "{{ posix_storage['dr_' + dr_target_host + '_address'] }}" register: result - name: Log append to succeed_storage_domains @@ -27,5 +27,5 @@ when: result is failed ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/print_info.yml b/roles/disaster_recovery/tasks/recover/print_info.yml index 1dd6f83f9..485c7bb21 100644 --- a/roles/disaster_recovery/tasks/recover/print_info.yml +++ b/roles/disaster_recovery/tasks/recover/print_info.yml @@ -12,5 +12,5 @@ - name: Print report file to stdout debug: msg="{{ content.stdout_lines | quote }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/register_template.yml b/roles/disaster_recovery/tasks/recover/register_template.yml index f4d719b4f..b974837a9 100644 --- a/roles/disaster_recovery/tasks/recover/register_template.yml +++ b/roles/disaster_recovery/tasks/recover/register_template.yml @@ -1,15 +1,15 @@ - block: - name: Register unregistered Template ovirt_template: - state: registered - storage_domain: "{{ storage.name }}" - id: "{{ unreg_template.id }}" - allow_partial_import: "{{ dr_partial_import }}" - auth: "{{ ovirt_auth }}" - cluster_mappings: "{{ dr_cluster_map }}" - domain_mappings: "{{ dr_domain_map }}" - vnic_profile_mappings: "{{ dr_network_map }}" - role_mappings: "{{ dr_role_map }}" + state: registered + storage_domain: "{{ storage.name }}" + id: "{{ unreg_template.id }}" + allow_partial_import: "{{ dr_partial_import }}" + auth: "{{ ovirt_auth }}" + cluster_mappings: "{{ dr_cluster_map }}" + domain_mappings: "{{ dr_domain_map }}" + vnic_profile_mappings: "{{ dr_network_map }}" + role_mappings: "{{ dr_role_map }}" register: template_register_result - name: Log append failed Template to issues failed_template_names @@ -23,5 +23,5 @@ when: template_register_result is succeeded ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/register_templates.yml b/roles/disaster_recovery/tasks/recover/register_templates.yml index ef9fde286..c7a12d6ee 100644 --- a/roles/disaster_recovery/tasks/recover/register_templates.yml +++ b/roles/disaster_recovery/tasks/recover/register_templates.yml @@ -1,10 +1,10 @@ - block: - name: Fetch unregistered Templates from storage domain ovirt_storage_template_info: - nested_attributes: "id" - unregistered: true - storage_domain: "{{ storage.name }}" - auth: "{{ ovirt_auth }}" + nested_attributes: "id" + unregistered: true + storage_domain: "{{ storage.name }}" + auth: "{{ ovirt_auth }}" register: storage_template_info - name: Register template @@ -14,8 +14,8 @@ with_items: "{{ storage_template_info.ovirt_storage_templates }}" # We use loop_control so storage.name will not be overridden by the nested loop. loop_control: - loop_var: unreg_template + loop_var: unreg_template ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/register_vm.yml b/roles/disaster_recovery/tasks/recover/register_vm.yml index a694c063c..0f0fb2e41 100644 --- a/roles/disaster_recovery/tasks/recover/register_vm.yml +++ b/roles/disaster_recovery/tasks/recover/register_vm.yml @@ -1,6 +1,6 @@ - block: - - name: Register VMs - ovirt_vm: + - name: Register VMs + ovirt_vm: state: registered storage_domain: "{{ storage.name }}" id: "{{ unreg_vm.id }}" @@ -14,18 +14,18 @@ vnic_profile_mappings: "{{ dr_network_map }}" lun_mappings: "{{ dr_lun_map }}" reassign_bad_macs: "{{ dr_reset_mac_pool }}" - register: vm_register_result + register: vm_register_result - - name: Log append failed VM to failed_vm_names - set_fact: - failed_vm_names: "{{ failed_vm_names }} + [ '{{ unreg_vm.name }}' ]" - when: vm_register_result is failed + - name: Log append failed VM to failed_vm_names + set_fact: + failed_vm_names: "{{ failed_vm_names }} + [ '{{ unreg_vm.name }}' ]" + when: vm_register_result is failed - - name: Log append succeed_vm_names - set_fact: - succeed_vm_names: "{{ succeed_vm_names }} + [ '{{ unreg_vm.name }}' ]" - when: vm_register_result is succeeded + - name: Log append succeed_vm_names + set_fact: + succeed_vm_names: "{{ succeed_vm_names }} + [ '{{ unreg_vm.name }}' ]" + when: vm_register_result is succeeded ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/register_vms.yml b/roles/disaster_recovery/tasks/recover/register_vms.yml index 23951237b..c46fdae23 100644 --- a/roles/disaster_recovery/tasks/recover/register_vms.yml +++ b/roles/disaster_recovery/tasks/recover/register_vms.yml @@ -1,15 +1,15 @@ - block: - name: Fetch unregistered VMs from storage domain ovirt_storage_vm_info: - nested_attributes: "id" - unregistered: true - storage_domain: "{{ storage.name }}" - auth: "{{ ovirt_auth }}" + nested_attributes: "id" + unregistered: true + storage_domain: "{{ storage.name }}" + auth: "{{ ovirt_auth }}" register: storage_vm_info - name: Set unregistered VMs set_fact: - unreg_vms: "{{ unreg_vms|default([]) + storage_vm_info.ovirt_storage_vms }}" + unreg_vms: "{{ unreg_vms|default([]) + storage_vm_info.ovirt_storage_vms }}" # TODO: We should filter out VMs which already exist in the setup (diskless VMs) - name: Register VM @@ -17,8 +17,8 @@ with_items: "{{ storage_vm_info.ovirt_storage_vms }}" # We use loop_control so storage.name will not be overridden by the nested loop. loop_control: - loop_var: unreg_vm + loop_var: unreg_vm ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover/run_vms.yml b/roles/disaster_recovery/tasks/recover/run_vms.yml index 766d7c615..96ade65e5 100644 --- a/roles/disaster_recovery/tasks/recover/run_vms.yml +++ b/roles/disaster_recovery/tasks/recover/run_vms.yml @@ -1,10 +1,10 @@ - block: - name: Run VMs ovirt_vm: - state: running - name: "{{ vms.name }}" - wait: false - auth: "{{ ovirt_auth }}" + state: running + name: "{{ vms.name }}" + wait: false + auth: "{{ ovirt_auth }}" register: result - name: Log append succeed_to_run_vms set_fact: @@ -17,5 +17,5 @@ when: result is failed ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back diff --git a/roles/disaster_recovery/tasks/recover_engine.yml b/roles/disaster_recovery/tasks/recover_engine.yml index 851cde040..7f4fb278c 100644 --- a/roles/disaster_recovery/tasks/recover_engine.yml +++ b/roles/disaster_recovery/tasks/recover_engine.yml @@ -1,34 +1,34 @@ - block: - name: Obtain SSO token ovirt_auth: - url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}" - username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}" - password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}" - ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}" + url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}" + username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}" + password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}" + ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}" ignore_errors: false - name: Delete previous report log file: - path: "/tmp/{{ dr_report_file }}" - state: absent + path: "/tmp/{{ dr_report_file }}" + state: absent ignore_errors: true - name: Create report file file: - path: "/tmp/{{ dr_report_file }}" - state: touch - mode: 0644 + path: "/tmp/{{ dr_report_file }}" + state: touch + mode: 0644 - name: Init entity status list set_fact: - failed_vm_names: [] - succeed_vm_names: [] - failed_template_names: [] - succeed_template_names: [] - failed_to_run_vms: [] - succeed_to_run_vms: [] - succeed_storage_domains: [] - failed_storage_domains: [] + failed_vm_names: [] + succeed_vm_names: [] + failed_template_names: [] + succeed_template_names: [] + failed_to_run_vms: [] + succeed_to_run_vms: [] + succeed_storage_domains: [] + failed_storage_domains: [] # TODO: We should add a validation task that will validate whether # all the hosts in the other site (primary or secondary) could not be connected @@ -43,121 +43,121 @@ - name: Add master storage domain to the setup include_tasks: recover/add_domain.yml vars: - storage: "{{ item }}" + storage: "{{ item }}" with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" when: item['dr_' + dr_target_host + '_master_domain'] - name: Add non master storage domains to the setup include_tasks: recover/add_domain.yml vars: - storage: "{{ item }}" + storage: "{{ item }}" with_items: - - "{{ dr_import_storages }}" + - "{{ dr_import_storages }}" when: not item['dr_' + dr_target_host + '_master_domain'] # Get all the active storage domains in the setup to register # all the templates/VMs/Disks - name: Fetching active storage domains ovirt_storage_domain_info: - pattern: "status=active" - auth: "{{ ovirt_auth }}" + pattern: "status=active" + auth: "{{ ovirt_auth }}" register: storage_domain_info - name: Set initial Maps set_fact: - dr_cluster_map: "{{ [] }}" - dr_affinity_group_map: "{{ [] }}" - dr_affinity_label_map: "{{ [] }}" - dr_domain_map: "{{ [] }}" - dr_role_map: "{{ [] }}" - dr_lun_map: "{{ [] }}" - dr_network_map: "{{ [] }}" + dr_cluster_map: "{{ [] }}" + dr_affinity_group_map: "{{ [] }}" + dr_affinity_label_map: "{{ [] }}" + dr_domain_map: "{{ [] }}" + dr_role_map: "{{ [] }}" + dr_lun_map: "{{ [] }}" + dr_network_map: "{{ [] }}" - name: Set Cluster Map set_fact: - dr_cluster_map: "{{ dr_cluster_map + [ - { - 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), - 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) - } - ] }}" + dr_cluster_map: "{{ dr_cluster_map + [ + { + 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), + 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) + } + ] }}" with_items: "{{ dr_cluster_mappings }}" when: dr_cluster_mappings is not none - name: Set Affinity Group Map set_fact: - dr_affinity_group_map: "{{ dr_affinity_group_map + [ - { - 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), - 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) - } - ] }}" + dr_affinity_group_map: "{{ dr_affinity_group_map + [ + { + 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), + 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) + } + ] }}" with_items: "{{ dr_affinity_group_mappings }}" when: dr_affinity_group_mappings is not none - name: Set Network Map set_fact: - dr_network_map: "{{ dr_network_map + [ - { - 'source_network_name': item[dr_source_map + '_network_name'] | default('EMPTY_ELEMENT', true), - 'source_profile_name': item[dr_source_map + '_profile_name'] | default('EMPTY_ELEMENT', true), - 'target_network_dc': item[dr_target_host + '_network_dc'] | default('EMPTY_ELEMENT', true), - 'target_profile_id': item[dr_target_host + '_profile_id'] | default('00000000-0000-0000-0000-000000000000', true) - } - ] }}" + dr_network_map: "{{ dr_network_map + [ + { + 'source_network_name': item[dr_source_map + '_network_name'] | default('EMPTY_ELEMENT', true), + 'source_profile_name': item[dr_source_map + '_profile_name'] | default('EMPTY_ELEMENT', true), + 'target_network_dc': item[dr_target_host + '_network_dc'] | default('EMPTY_ELEMENT', true), + 'target_profile_id': item[dr_target_host + '_profile_id'] | default('00000000-0000-0000-0000-000000000000', true) + } + ] }}" with_items: "{{ dr_network_mappings }}" when: dr_network_mappings is not none - name: Set Affinity Label Map set_fact: - dr_affinity_label_map: "{{ dr_affinity_label_map + [ - { - 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), - 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) - } - ] }}" + dr_affinity_label_map: "{{ dr_affinity_label_map + [ + { + 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), + 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) + } + ] }}" with_items: "{{ dr_affinity_label_mappings }}" when: dr_affinity_label_mappings is not none - name: Set aaa extensions Map set_fact: - dr_domain_map: "{{ dr_domain_map + [ - { - 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), - 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) - } - ] }}" + dr_domain_map: "{{ dr_domain_map + [ + { + 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), + 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) + } + ] }}" with_items: "{{ dr_domain_mappings }}" when: dr_domain_mappings is not none - name: Set Role Map set_fact: - dr_role_map: "{{ dr_role_map + [ - { - 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), - 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) - } - ] }}" + dr_role_map: "{{ dr_role_map + [ + { + 'source_name': item[dr_source_map + '_name'] | default('EMPTY_ELEMENT', true), + 'dest_name': item[dr_target_host + '_name'] | default('EMPTY_ELEMENT', true) + } + ] }}" with_items: "{{ dr_role_mappings }}" when: dr_role_mappings is not none - name: Set Lun Map set_fact: - dr_lun_map: "{{ dr_lun_map + [ - { - 'source_logical_unit_id': item[dr_source_map + '_logical_unit_id'] | default('EMPTY_ELEMENT', true), - 'source_storage_type': item[dr_source_map + '_storage_type'] | default('EMPTY_ELEMENT', true), - 'dest_logical_unit_id': item[dr_target_host + '_logical_unit_id'] | default('EMPTY_ELEMENT', true), - 'dest_storage_type': item[dr_target_host + '_storage_type'] | default('EMPTY_ELEMENT', true), - 'dest_logical_unit_address': item[dr_target_host + '_logical_unit_address'] | default('EMPTY_ELEMENT', true), - 'dest_logical_unit_port': item[dr_target_host + '_logical_unit_port'] | default('3260'|int, true), - 'dest_logical_unit_portal': item[dr_target_host + '_logical_unit_portal'] | default('1', true), - 'dest_logical_unit_username': item[dr_target_host + '_logical_unit_username'] | default('', true), - 'dest_logical_unit_password': item[dr_target_host + '_logical_unit_password'] | default('', true), - 'dest_logical_unit_target': item[dr_target_host + '_logical_unit_target'] | default('[]', true) - } - ] }}" + dr_lun_map: "{{ dr_lun_map + [ + { + 'source_logical_unit_id': item[dr_source_map + '_logical_unit_id'] | default('EMPTY_ELEMENT', true), + 'source_storage_type': item[dr_source_map + '_storage_type'] | default('EMPTY_ELEMENT', true), + 'dest_logical_unit_id': item[dr_target_host + '_logical_unit_id'] | default('EMPTY_ELEMENT', true), + 'dest_storage_type': item[dr_target_host + '_storage_type'] | default('EMPTY_ELEMENT', true), + 'dest_logical_unit_address': item[dr_target_host + '_logical_unit_address'] | default('EMPTY_ELEMENT', true), + 'dest_logical_unit_port': item[dr_target_host + '_logical_unit_port'] | default('3260'|int, true), + 'dest_logical_unit_portal': item[dr_target_host + '_logical_unit_portal'] | default('1', true), + 'dest_logical_unit_username': item[dr_target_host + '_logical_unit_username'] | default('', true), + 'dest_logical_unit_password': item[dr_target_host + '_logical_unit_password'] | default('', true), + 'dest_logical_unit_target': item[dr_target_host + '_logical_unit_target'] | default('[]', true) + } + ] }}" with_items: "{{ dr_lun_mappings }}" when: dr_lun_mappings is not none @@ -168,24 +168,24 @@ - name: Register templates include_tasks: recover/register_templates.yml vars: - storage: "{{ item }}" + storage: "{{ item }}" with_items: - - "{{ storage_domain_info.ovirt_storage_domains }}" + - "{{ storage_domain_info.ovirt_storage_domains }}" # Register all the unregistered VMs after we registered # all the templates from the active storage domains fetched before. - name: Register VMs include_tasks: recover/register_vms.yml vars: - storage: "{{ item }}" + storage: "{{ item }}" with_items: - - "{{ storage_domain_info.ovirt_storage_domains }}" + - "{{ storage_domain_info.ovirt_storage_domains }}" # Run all the high availability VMs. - name: Run highly available VMs include_tasks: recover/run_vms.yml vars: - vms: "{{ item }}" + vms: "{{ item }}" with_items: "{{ unreg_vms }}" when: item.status == 'up' and item.high_availability.enabled | bool @@ -193,19 +193,19 @@ - name: Run the rest of the VMs include_tasks: recover/run_vms.yml vars: - vms: "{{ item }}" + vms: "{{ item }}" with_items: "{{ unreg_vms }}" when: item.status == 'up' and not item.high_availability.enabled | bool # Default value is set in role defaults ignore_errors: "{{ dr_ignore_error_recover }}" tags: - - fail_over - - fail_back + - fail_over + - fail_back always: - - name: Print operation summary - include_tasks: recover/print_info.yml - - name: Revoke the SSO token - ovirt_auth: - state: absent - ovirt_auth: "{{ ovirt_auth }}" + - name: Print operation summary + include_tasks: recover/print_info.yml + - name: Revoke the SSO token + ovirt_auth: + state: absent + ovirt_auth: "{{ ovirt_auth }}" diff --git a/roles/disaster_recovery/tasks/run_unregistered_entities.yml b/roles/disaster_recovery/tasks/run_unregistered_entities.yml index 31b373af7..384f65362 100644 --- a/roles/disaster_recovery/tasks/run_unregistered_entities.yml +++ b/roles/disaster_recovery/tasks/run_unregistered_entities.yml @@ -1,30 +1,30 @@ - block: - name: Obtain SSO token ovirt_auth: - url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}" - username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}" - password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}" - ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}" + url: "{{ vars['dr_sites_' + dr_target_host + '_url'] }}" + username: "{{ vars['dr_sites_' + dr_target_host + '_username'] }}" + password: "{{ vars['dr_sites_' + dr_target_host + '_password'] }}" + ca_file: "{{ vars['dr_sites_' + dr_target_host + '_ca_file'] }}" - name: Read file that contains running VMs from the previous setup set_fact: running_vms_fail_back="{{ lookup('file', dr_running_vms) }}" - name: Remove dr_running_vms file after being used file: - path: "{{ dr_running_vms }}" - state: absent + path: "{{ dr_running_vms }}" + state: absent - name: Run all the high availability VMs include_tasks: recover/run_vms.yml vars: - vms: "{{ item }}" + vms: "{{ item }}" with_items: "{{ running_vms_fail_back }}" when: item.high_availability.enabled | bool - name: Run all the entire running VMs include_tasks: recover/run_vms.yml vars: - vms: "{{ item }}" + vms: "{{ item }}" with_items: "{{ running_vms_fail_back }}" when: not item.high_availability.enabled | bool @@ -32,9 +32,9 @@ ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back + - fail_back always: - name: Revoke the SSO token ovirt_auth: - state: absent - ovirt_auth: "{{ ovirt_auth }}" + state: absent + ovirt_auth: "{{ ovirt_auth }}" diff --git a/roles/disaster_recovery/tasks/unregister_entities.yml b/roles/disaster_recovery/tasks/unregister_entities.yml index b3093cf1d..55f9e98de 100644 --- a/roles/disaster_recovery/tasks/unregister_entities.yml +++ b/roles/disaster_recovery/tasks/unregister_entities.yml @@ -1,21 +1,21 @@ - block: - name: Obtain SSO token ovirt_auth: - url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}" - username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}" - password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}" - ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}" + url: "{{ vars['dr_sites_' + dr_source_map + '_url'] }}" + username: "{{ vars['dr_sites_' + dr_source_map + '_username'] }}" + password: "{{ vars['dr_sites_' + dr_source_map + '_password'] }}" + ca_file: "{{ vars['dr_sites_' + dr_source_map + '_ca_file'] }}" # Get all the running VMs and shut them down - name: Fetch running VMs in the setup ovirt_vm_info: - pattern: status = up - auth: "{{ ovirt_auth }}" + pattern: status = up + auth: "{{ ovirt_auth }}" register: vm_info - name: Check whether file with running VMs info exists stat: - path: '{{ dr_running_vms }}' + path: '{{ dr_running_vms }}' register: stat_result - name: Fetch all data of running VMs from file, if exists. @@ -25,17 +25,17 @@ - name: Init list property for running_vms set_fact: - res_ovirt_vms="[]" + res_ovirt_vms="[]" - name: Map all running vms in fact set_fact: - res_ovirt_vms: "{{ res_ovirt_vms + [ - { - 'id': item.id, - 'name': item.name, - 'high_availability': item.high_availability - } - ] }}" + res_ovirt_vms: "{{ res_ovirt_vms + [ + { + 'id': item.id, + 'name': item.name, + 'high_availability': item.high_availability + } + ] }}" with_items: "{{ vm_info.ovirt_vms }}" when: item.id is defined @@ -52,9 +52,9 @@ ignore_errors: "{{ dr_ignore_error_clean }}" tags: - - fail_back + - fail_back always: - name: Revoke the SSO token ovirt_auth: - state: absent - ovirt_auth: "{{ ovirt_auth }}" + state: absent + ovirt_auth: "{{ ovirt_auth }}"