diff options
author | Brenton Leanhardt <bleanhar@redhat.com> | 2016-02-17 13:05:12 -0500 |
---|---|---|
committer | Brenton Leanhardt <bleanhar@redhat.com> | 2016-03-03 09:05:42 -0500 |
commit | 50b9eefd2b6266b29755e37090138096a2aebc31 (patch) | |
tree | 5bb7d786e2a8d1c1d7144004ed60b6cb9c256a2a /playbooks | |
parent | a8edeaea8e8c67f5f930b54ccda5b575340231f5 (diff) | |
download | openshift-50b9eefd2b6266b29755e37090138096a2aebc31.tar.gz openshift-50b9eefd2b6266b29755e37090138096a2aebc31.tar.bz2 openshift-50b9eefd2b6266b29755e37090138096a2aebc31.tar.xz openshift-50b9eefd2b6266b29755e37090138096a2aebc31.zip |
First past at the upgrade process
Diffstat (limited to 'playbooks')
18 files changed, 595 insertions, 11 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md new file mode 100644 index 000000000..0c31b2888 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md @@ -0,0 +1,17 @@ +# v3.1 to v3.2 upgrade playbook + +## Overview +This playbook currently performs the +following steps. + +**TODO: update for current steps** + * Upgrade and restart master services + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml new file mode 100644 index 000000000..dfeeb9397 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -0,0 +1,18 @@ +--- +- include: ../../../../common/openshift-cluster/evaluate_groups.yml + vars: + g_etcd_hosts: "{{ groups.etcd | default([]) }}" + g_master_hosts: "{{ groups.masters | default([]) }}" + g_nfs_hosts: "{{ groups.nfs | default([]) }}" + g_node_hosts: "{{ groups.nodes | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" +- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml + vars: + openshift_deployment_type: "{{ deployment_type }}" +- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml + vars: + openshift_deployment_type: "{{ deployment_type }}" +- include: ../../../openshift-master/restart.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml diff --git a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh new file mode 100644 index 000000000..eb51ce6b2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh @@ -0,0 +1,52 @@ +#!/bin/bash +set -e + +SERVICE_TYPE=$1 +DEPLOYMENT_TYPE=$2 +VERSION="v${3}" + +add_image_version_to_sysconfig () { + unit_name=$2 + sysconfig_file=/etc/sysconfig/${unit_name} + + if ! grep IMAGE_VERSION ${sysconfig_file}; then + sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file} + else + sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file} + fi +} + +add_image_version_to_unit () { + deployment_type=$1 + unit_file=$2 + + if ! grep IMAGE_VERSION $unit_file; then + image_namespace="openshift/" + if [ $deployment_type == "atomic-enterprise" ]; then + image_namespace="aep3/" + elif [ $deployment_type == "openshift-enterprise" ]; then + image_namespace="openshift3/" + fi + + sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file + fi +} + +for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service | head -n1); do + unit_name=$(basename -s .service ${unit_file}) + add_image_version_to_sysconfig $VERSION $unit_name + add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file +done + +if [ -e /etc/sysconfig/openvswitch ]; then + add_image_version_to_sysconfig $VERSION openvswitch +else + # TODO: add this to config.yml + echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch +fi +if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then + sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service +fi +add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service + +systemctl daemon-reload diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh new file mode 100644 index 000000000..4095b9829 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Here we don't really care if this is a master, api, controller or node image. +# We just need to know the version of one of them. +unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1) +installed_container_name=$(basename -s .service ${unit_file}) +installed=$(docker exec ${installed_container_name} openshift version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + +if [ ${1} == "origin" ]; then + image_name="openshift/origin" +elif grep aep $unit_file > /dev/null; then + image_name="aep3/aep" +elif grep ose $unit_file > /dev/null; then + image_name="openshift3/ose" +fi + +docker pull ${image_name} 1>&2 +available=$(docker run --rm ${image_name} version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + +echo "---" +echo "curr_version: ${installed}" +echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh new file mode 100644 index 000000000..e06f7958f --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +while getopts ":c" opt; do + case $opt in + c) + echo "-c was triggered!" >&2 + containerized="TRUE" + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + ;; + esac +done + +if [ "${containerized}" == "TRUE" ] ; then + docker exec atomic-openshift-master rpm -q atomic-openshift +else + installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ') + available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ') +fi + +echo "---" +echo "curr_version: ${installed}" +echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh deleted file mode 100644 index 3a1a8ebb1..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ') - -yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ') - -echo "---" -echo "curr_version: ${yum_installed}" -echo "avail_version: ${yum_available}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 0fb38f32e..6d7cefc8e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -66,7 +66,7 @@ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - name: Determine available versions - script: ../files/versions.sh {{ g_new_service_name }} openshift + script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift register: g_versions_result - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml index 12b9c84d3..864622a09 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -41,7 +41,7 @@ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - name: Determine available versions - script: ../files/versions.sh {{ g_new_service_name }} + script: ../files/rpm_versions.sh {{ g_new_service_name }} register: g_versions_result - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml new file mode 100644 index 000000000..9cda5c9a4 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml @@ -0,0 +1,6 @@ +- name: Update system_units + script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }} + +- name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml new file mode 100644 index 000000000..09372e3bd --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml @@ -0,0 +1,20 @@ +--- +- name: Upgrade Docker + hosts: oo_masters_to_config + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Check if Docker is installed + command: rpm -q docker + register: pkg_check + failed_when: pkg_check.rc > 1 + changed_when: no + + - name: Upgrade Docker + command: "{{ ansible_pkg_mgr}} update -y docker" + when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<') + register: docker_upgrade + + - name: Restart Docker + service: name=docker state=restarted + when: docker_upgrade | changed diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library @@ -0,0 +1 @@ +../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml new file mode 100644 index 000000000..3fd97ac14 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml @@ -0,0 +1,57 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + openshift_examples_import_command: replace + pre_tasks: + - name: Collect all routers + command: > + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers + failed_when: false + changed_when: false + + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' + --api-version=v1 + with_items: haproxy_routers + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml new file mode 100644 index 000000000..a1d3ac5cc --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -0,0 +1,220 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['origin','openshift-enterprise', 'online'] + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') + +- name: Verify upgrade can proceed + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" + roles: + - openshift_cli + tasks: + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + when: not openshift.common.is_containerized | bool + + - name: Determine available versions + script: ../files/rpm_versions.sh {{ g_new_service_name }} + register: g_versions_result + when: not openshift.common.is_containerized | bool + + - name: Determine available versions + script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }} + register: g_versions_result + when: openshift.common.is_containerized | bool + + - set_fact: + g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}" + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + when: openshift_pkg_version is not defined + + - set_fact: + g_new_version: "{{ openshift_pkg_version | replace('-','') }}" + when: openshift_pkg_version is defined + + - name: Update systemd units + script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }} + when: openshift.common.is_containerized | bool + + # TODO: Remove this, used for testing + #- pause: + + - fail: + msg: This playbook requires Origin 1.1 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') + + - fail: + msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later + when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<') + + # TODO: this may only make sense for RPM installs. We probably need another check for containerized installs. + - fail: + msg: Upgrade packages not found + when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) + + - name: Determine available Docker + script: ../files/rpm_versions.sh docker + register: g_docker_version_result + when: not openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" + + - fail: + msg: This playbook requires access to Docker 1.9 or later + when: not openshift.common.is_atomic | bool + and (g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')) + + # TODO: add check to upgrade ostree to get latest Docker + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 + +############################################################################### +# Backup etcd +############################################################################### +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml new file mode 100644 index 000000000..0d6fa871b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml @@ -0,0 +1,6 @@ +- name: Upgrade packages + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}{{ openshift_version }}" + +- name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml new file mode 100644 index 000000000..3ef9a207a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -0,0 +1,147 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +- include: docker_upgrade.yml + when: not openshift.common.is_atomic | bool + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master container + hosts: oo_masters_to_config + roles: + - openshift_cli + tasks: + - include: rpm_upgrade.yml component=master + when: not openshift.common.is_containerized | bool + + - include: containerized_upgrade.yml + when: openshift.common.is_containerized | bool + +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.1' +# to_version: '3.2' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + - include: rpm_upgrade.yml + vars: + component: "node" + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: not openshift.common.is_containerized | bool + + - include: containerized_upgrade.yml + when: openshift.common.is_containerized | bool + + - name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted + + - set_fact: + node_update_complete: True + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" + tasks: + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 |