diff options
Diffstat (limited to 'playbooks')
33 files changed, 352 insertions, 89 deletions
diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@ --- -- name: delete contiv +- name: Uninstall contiv hosts: all gather_facts: False tasks: diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 97d835eae..ddd2ecebd 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -103,7 +103,7 @@ - atomic-openshift-sdn-ovs - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - kubernetes-client - openshift @@ -317,6 +317,7 @@ - name: restart NetworkManager service: name=NetworkManager state=restarted + when: openshift_use_dnsmasq | default(true) | bool - hosts: masters become: yes @@ -346,7 +347,7 @@ - atomic-openshift-master - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - corosync - kubernetes-client diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +# tags: +# - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index fd4a9eb26..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -3,19 +3,6 @@ tags: - always -- name: Verify Requirements - hosts: OSEv3 - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: "install" - post_tasks: - - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..a9fc18958 --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,12 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/service_catalog.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 8005a17a3..5bd5d64ab 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,4 +1,6 @@ --- - include: ../initialize_groups.yml +- include: ../../../common/openshift-cluster/evaluate_groups.yml + - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..143016159 --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-etcd/migrate.yml + tags: + - always diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cfme/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cfme/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 1bee460e8..c7766ff04 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,4 +1,9 @@ --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + - name: Run OpenShift health checks hosts: OSEv3 roles: diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index e01c6f38d..7ca9f7e8b 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,4 +1,9 @@ --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + - hosts: OSEv3 name: run OpenShift pre-install checks roles: diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1482b3a3f..7224ae712 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,4 +1,23 @@ --- +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: "install" + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + - include: initialize_oo_option_facts.yml tags: - always @@ -45,6 +64,12 @@ tags: - hosted +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool + tags: + - servicecatalog + - name: Re-enable excluder if it was previously enabled hosts: oo_masters_to_config:oo_nodes_to_config tags: diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index c28ce4c14..baca72c58 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -157,3 +157,12 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no + + - name: Evaluate oo_etcd_to_migrate + add_host: + name: "{{ item }}" + groups: oo_etcd_to_migrate + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}" + changed_when: no diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index c30889d64..51b196299 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -51,3 +51,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_certificates_redeploy: true + - role: lib_utils + post_tasks: + - yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: servingInfo.namedCertificates + value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" + when: + - ('named_certificates' in openshift.master) + - openshift.master.named_certificates | default([]) | length > 0 + - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..c42e8781a --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,8 @@ +--- +- include: evaluate_groups.yml + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index b7fd2c0c5..616ba04f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -3,12 +3,12 @@ hosts: oo_etcd_hosts_to_backup roles: - role: openshift_facts - - role: etcd_upgrade - r_etcd_upgrade_action: backup - r_etcd_backup_tag: etcd_backup_tag + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_backup_tag: etcd_backup_tag r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_upgrade_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - r_etcd_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -18,7 +18,7 @@ - set_fact: etcd_backup_completed: "{{ hostvars | oo_select_keys(groups.oo_etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'r_etcd_upgrade_backup_complete': true}) }}" + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index 3e01883ae..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -16,7 +16,8 @@ tasks: - include_role: name: etcd_common - tasks_from: etcdctl.yml + vars: + r_etcd_common_action: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b980909eb..6738ce11f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,6 +3,16 @@ # Upgrade Masters ############################################################################### +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade job storage + hosts: oo_first_master + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -133,6 +143,14 @@ - set_fact: master_update_complete: True +- name: Post master upgrade - Upgrade job storage + hosts: oo_first_master + tasks: + - name: Upgrade job storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=jobs --confirm + ############################################################################## # Gate on master update complete ############################################################################## @@ -278,6 +296,7 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq post_tasks: - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 91dbc2cd4..35a50cf4e 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -34,6 +34,7 @@ - openshift_facts - docker - openshift_node_upgrade + - openshift_node_dnsmasq - role: openshift_excluder r_openshift_excluder_action: enable r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index e63b03e51..4e7c14e94 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -115,5 +115,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 74c2964aa..45b664d06 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -119,5 +119,3 @@ master_config_hook: "v3_5/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5d41b84d0..5b9ac9e8f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -115,5 +115,3 @@ - include: ../upgrade_nodes.yml - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a66fb51ff..a470c7595 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -119,5 +119,3 @@ master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml new file mode 100644 index 000000000..c655449fa --- /dev/null +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -0,0 +1,120 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + tags: + - always + +- name: Run pre-checks + hosts: oo_etcd_to_migrate + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +- include: ../openshift-cluster/initialize_facts.yml + tags: + - always + +- name: Backup v2 data + hosts: oo_etcd_to_migrate + gather_facts: no + tags: + - always + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master' }}" + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + when: + - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Migrate etcd data from v2 to v3 + hosts: oo_etcd_to_migrate + gather_facts: no + tags: + - always + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + +- name: Start masters after etcd data migration + hosts: oo_masters_to_config + tasks: + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ddc4db8f8..7d3a371e3 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -69,7 +69,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Determine if session secrets must be generated +- name: Inspect state of first master session secrets and config hosts: oo_first_master roles: - role: openshift_facts @@ -79,6 +79,24 @@ local_facts: session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - name: Generate master session secrets hosts: oo_first_master @@ -122,6 +140,8 @@ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - role: nuage_master when: openshift.common.use_nuage | bool - role: calico_master |