From 31707737a5473bb5cf6545b6675b192a64a913c7 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Fri, 15 Jul 2016 15:50:31 -0300 Subject: Introduce 1.3/3.3 upgrade path. Refactored the 3.2 upgrade common files out to a path that does not indicate they are strictly for 3.2. 3.3 upgrade then becomes a relatively small copy of the byo entry point, all calling the same code as 3.2 upgrade. Thus far there are no known 3.3 specific upgrade tasks. In future we will likely want to allow hooks out to version specific pre/upgrade/post tasks. Also fixes a bug where the handlers were not restarting nodes/openvswitch containers doing upgrades, due to a change in Ansible 2+. --- .../common/openshift-cluster/enable_dnsmasq.yml | 4 +- .../upgrades/atomic-openshift-master.j2 | 1 + .../upgrades/containerized_node_upgrade.yml | 11 + .../openshift-cluster/upgrades/docker-cluster | 1 + .../openshift-cluster/upgrades/master_docker | 1 + .../openshift-cluster/upgrades/native-cluster | 1 + .../upgrades/openshift.docker.node.dep.service | 1 + .../upgrades/openshift.docker.node.service | 1 + .../upgrades/openvswitch.docker.service | 1 + .../upgrades/openvswitch.sysconfig.j2 | 1 + .../common/openshift-cluster/upgrades/post.yml | 58 ++++ .../common/openshift-cluster/upgrades/pre.yml | 315 +++++++++++++++++++ .../openshift-cluster/upgrades/rpm_upgrade.yml | 10 + .../common/openshift-cluster/upgrades/upgrade.yml | 169 ++++++++++ .../v3_1_to_v3_2/atomic-openshift-master.j2 | 1 - .../v3_1_to_v3_2/containerized_node_upgrade.yml | 11 - .../openshift-cluster/upgrades/v3_1_to_v3_2/docker | 1 - .../upgrades/v3_1_to_v3_2/docker-cluster | 1 - .../upgrades/v3_1_to_v3_2/filter_plugins | 1 - .../upgrades/v3_1_to_v3_2/library | 1 - .../upgrades/v3_1_to_v3_2/lookup_plugins | 1 - .../upgrades/v3_1_to_v3_2/native-cluster | 1 - .../v3_1_to_v3_2/openshift.docker.node.dep.service | 1 - .../v3_1_to_v3_2/openshift.docker.node.service | 1 - .../v3_1_to_v3_2/openvswitch.docker.service | 1 - .../upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 | 1 - .../upgrades/v3_1_to_v3_2/post.yml | 59 ---- .../upgrades/v3_1_to_v3_2/pre.yml | 343 --------------------- .../openshift-cluster/upgrades/v3_1_to_v3_2/roles | 1 - .../upgrades/v3_1_to_v3_2/rpm_upgrade.yml | 10 - .../upgrades/v3_1_to_v3_2/upgrade.yml | 167 ---------- 31 files changed, 574 insertions(+), 603 deletions(-) create mode 120000 playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2 create mode 100644 playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml create mode 120000 playbooks/common/openshift-cluster/upgrades/docker-cluster create mode 120000 playbooks/common/openshift-cluster/upgrades/master_docker create mode 120000 playbooks/common/openshift-cluster/upgrades/native-cluster create mode 120000 playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service create mode 120000 playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service create mode 120000 playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service create mode 120000 playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2 create mode 100644 playbooks/common/openshift-cluster/upgrades/post.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/pre.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/upgrade.yml delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml delete mode 120000 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml delete mode 100644 playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml (limited to 'playbooks/common/openshift-cluster') diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index f2bcc872f..4cfe8617e 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -8,11 +8,12 @@ post_tasks: - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool - + - name: Reconfigure masters to listen on our new dns_port hosts: oo_masters_to_config handlers: - include: ../../../roles/openshift_master/handlers/main.yml + static: yes vars: os_firewall_allow: - service: skydns tcp @@ -43,6 +44,7 @@ hosts: oo_nodes_to_config handlers: - include: ../../../roles/openshift_node/handlers/main.yml + static: yes pre_tasks: - openshift_facts: role: "{{ item.role }}" diff --git a/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2 new file mode 120000 index 000000000..2441f8887 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/atomic-openshift-master.j2 @@ -0,0 +1 @@ +../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml new file mode 100644 index 000000000..32a3636aa --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -0,0 +1,11 @@ +- include_vars: ../../../../roles/openshift_node/vars/main.yml + +- name: Update systemd units + include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} + +- name: Verifying the correct version was configured + shell: grep {{ verify_upgrade_version }} {{ item }} + with_items: + - /etc/sysconfig/openvswitch + - /etc/sysconfig/{{ openshift.common.service_type }}* + when: verify_upgrade_version is defined diff --git a/playbooks/common/openshift-cluster/upgrades/docker-cluster b/playbooks/common/openshift-cluster/upgrades/docker-cluster new file mode 120000 index 000000000..055ad09fc --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/docker-cluster @@ -0,0 +1 @@ +../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker new file mode 120000 index 000000000..6aeca2842 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/master_docker @@ -0,0 +1 @@ +../../../../roles/openshift_master/templates/master_docker \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/native-cluster b/playbooks/common/openshift-cluster/upgrades/native-cluster new file mode 120000 index 000000000..4af88e666 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/native-cluster @@ -0,0 +1 @@ +../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service new file mode 120000 index 000000000..add8b7fa9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.dep.service @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openshift.docker.node.dep.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service new file mode 120000 index 000000000..ed181633d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openshift.docker.node.service @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openshift.docker.node.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service b/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service new file mode 120000 index 000000000..c21e895f2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openvswitch.docker.service @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openvswitch.docker.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2 b/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2 new file mode 120000 index 000000000..ead6904c4 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/openvswitch.sysconfig.j2 @@ -0,0 +1 @@ +../../../../roles/openshift_node/templates/openvswitch.sysconfig.j2 \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml new file mode 100644 index 000000000..bd97d0b34 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/post.yml @@ -0,0 +1,58 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + pre_tasks: + - name: Collect all routers + command: > + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers + failed_when: false + changed_when: false + + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' + --api-version=v1 + with_items: haproxy_routers + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -n default -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml new file mode 100644 index 000000000..b5fbc4af6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre.yml @@ -0,0 +1,315 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### + +- include: ../initialize_facts.yml + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed on first master + hosts: oo_first_master + vars: + g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online'] + + - fail: + msg: > + This upgrade does not support Pacemaker: + https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html + when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' + + # Error out in situations where the user has older versions specified in their + # inventory in any of the openshift_release, openshift_image_tag, and + # openshift_pkg_version variables. These must be removed or updated to proceed + # with upgrade. + # TODO: Should we block if you're *over* the next major release version as well? + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') + + - fail: + msg: > + openshift_image_tag is {{ openshift_image_tag }} which is not a + valid version for a {{ openshift_upgrade_target }} upgrade + when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') + + - set_fact: + openshift_release: "{{ openshift_release[1:] }}" + when: openshift_release is defined and openshift_release[0] == 'v' + + - fail: + msg: > + openshift_release is {{ openshift_release }} which is not a + valid release for a {{ openshift_upgrade_target }} upgrade + when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') + +- include: ../../../common/openshift-cluster/initialize_openshift_version.yml + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + # Docker role (a dependency) should be told not to do anything to installed version + # of docker, we handle this separately during upgrade. (the inventory may have a + # docker_version defined, we don't want to actually do it until later) + docker_protect_installed_version: True + +- name: Verify master processes + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + + - name: Ensure Master is running + service: + name: "{{ openshift.common.service_type }}-master" + state: started + enabled: yes + when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-api" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + +- name: Verify node processes + hosts: oo_nodes_to_config + roles: + - openshift_facts + - openshift_docker_facts + tasks: + - name: Ensure Node is running + service: + name: "{{ openshift.common.service_type }}-node" + state: started + enabled: yes + when: openshift.common.is_containerized | bool + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + pre_tasks: + - fail: + msg: Verify OpenShift is already installed + when: openshift.common.version is not defined + + - fail: + msg: Verify the correct version was found + when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version + + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + when: not openshift.common.is_atomic | bool + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + when: not openshift.common.is_containerized | bool + + - name: Verify containers are available for upgrade + command: > + docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift.common.is_containerized | bool + + - name: Check latest available OpenShift RPM version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" + failed_when: false + changed_when: false + register: avail_openshift_version + when: not openshift.common.is_containerized | bool + + - name: Verify OpenShift RPMs are available for upgrade + fail: + msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + + - fail: + msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" + when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - name: Determine available Docker + script: ../files/rpm_versions.sh docker + register: g_docker_version_result + when: not openshift.common.is_atomic | bool + + - name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" + when: not openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + + - fail: + msg: This playbook requires access to Docker 1.10 or later + when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') + + # TODO: add check to upgrade ostree to get latest Docker + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 + +############################################################################### +# Backup etcd +############################################################################### +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml new file mode 100644 index 000000000..f5e4d807e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -0,0 +1,10 @@ +# We verified latest rpm available is suitable, so just yum update. +- name: Upgrade packages + action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" + +- name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool + +- name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml new file mode 100644 index 000000000..e7638982c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -0,0 +1,169 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: + - include: rpm_upgrade.yml component=master + when: not openshift.common.is_containerized | bool + + - include_vars: ../../../../roles/openshift_master/vars/main.yml + + - name: Update systemd units + include: ../../../../roles/openshift_master/tasks/systemd_units.yml + +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.1' +# to_version: '3.2' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### + +# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service) +- name: Perform upgrades that may require node evacuation + hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config + serial: 1 + any_errors_fatal: true + roles: + - openshift_facts + handlers: + - include: ../../../../roles/openshift_node/handlers/main.yml + static: yes + tasks: + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node + # or docker actually needs an upgrade before proceeding. Perhaps best to save this until + # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Mark unschedulable if host is a node + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Evacuate Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config + + # Only check if docker upgrade is required if docker_upgrade is not + # already set to False. + - include: docker/upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool + + - include: docker/upgrade.yml + when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + + - include: rpm_upgrade.yml + vars: + component: "node" + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool + + - include: containerized_node_upgrade.yml + when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + + - name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool + + +############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 deleted file mode 120000 index cf20e8959..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml deleted file mode 100644 index 60ea84f8e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_node_upgrade.yml +++ /dev/null @@ -1,11 +0,0 @@ -- include_vars: ../../../../../roles/openshift_node/vars/main.yml - -- name: Update systemd units - include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -- name: Verifying the correct version was configured - shell: grep {{ verify_upgrade_version }} {{ item }} - with_items: - - /etc/sysconfig/openvswitch - - /etc/sysconfig/{{ openshift.common.service_type }}* - when: verify_upgrade_version is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker deleted file mode 120000 index 5a3dd12b3..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_master/templates/docker \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster deleted file mode 120000 index 3ee319365..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins deleted file mode 120000 index 27ddaa18b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library deleted file mode 120000 index 53bed9684..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library +++ /dev/null @@ -1 +0,0 @@ -../library \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins deleted file mode 120000 index cf407f69b..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../lookup_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster deleted file mode 120000 index f44f8eb4f..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service deleted file mode 120000 index b384a3f4d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_node/templates/openshift.docker.node.dep.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service deleted file mode 120000 index a2f140144..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_node/templates/openshift.docker.node.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service deleted file mode 120000 index 61946ff91..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_node/templates/openvswitch.docker.service \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 deleted file mode 120000 index 3adc56e4e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2 +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles/openshift_node/templates/openvswitch.sysconfig.j2 \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml deleted file mode 100644 index ccf9514f1..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade default router, default registry and examples -############################################################################### -- name: Upgrade default router and default registry - hosts: oo_first_master - vars: - openshift_deployment_type: "{{ deployment_type }}" - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - roles: - - openshift_manageiq - # Create the new templates shipped in 3.2, existing templates are left - # unmodified. This prevents the subsequent role definition for - # openshift_examples from failing when trying to replace templates that do - # not already exist. We could have potentially done a replace --force to - # create and update in one step. - - openshift_examples - # Update the existing templates - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - openshift_examples_import_command: replace - pre_tasks: - - name: Collect all routers - command: > - {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json - register: all_routers - failed_when: false - changed_when: false - - - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" - when: all_routers.rc == 0 - - - set_fact: haproxy_routers=[] - when: all_routers.rc != 0 - - - name: Update router image to current version - when: all_routers.rc == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: haproxy_routers - - - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry - register: _default_registry - failed_when: false - changed_when: false - - - name: Update registry image to current version - when: _default_registry.rc == 0 - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml deleted file mode 100644 index a32123952..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ /dev/null @@ -1,343 +0,0 @@ ---- -############################################################################### -# Evaluate host groups and gather facts -############################################################################### - -- include: ../../initialize_facts.yml - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_config - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master - -############################################################################### -# Pre-upgrade checks -############################################################################### -- name: Verify upgrade can proceed on first master - hosts: oo_first_master - vars: - target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}" - gather_facts: no - tasks: - - fail: - msg: > - This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online - deployment types - when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online'] - - - fail: - msg: > - This upgrade does not support Pacemaker: - https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html - when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' - - # Error out in situations where the user has older versions specified in their - # inventory in any of the openshift_release, openshift_image_tag, and - # openshift_pkg_version variables. These must be removed or updated to proceed - # with upgrade. - # TODO: Should we block if you're *over* the next major release version as well? - - fail: - msg: > - openshift_pkg_version is {{ openshift_pkg_version }} which is not a - valid version for a {{ target_version }} upgrade - when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') - - - fail: - msg: > - openshift_image_tag is {{ openshift_image_tag }} which is not a - valid version for a {{ target_version }} upgrade - when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<') - - - set_fact: - openshift_release: "{{ openshift_release[1:] }}" - when: openshift_release is defined and openshift_release[0] == 'v' - - - fail: - msg: > - openshift_release is {{ openshift_release }} which is not a - valid release for a {{ target_version }} upgrade - when: openshift_release is defined and not openshift_release | version_compare(target_version ,'=') - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - vars: - # Request openshift_release 3.2 and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - openshift_protect_installed_version: False - # Docker role (a dependency) should be told not to do anything to installed version - # of docker, we handle this separately during upgrade. (the inventory may have a - # docker_version defined, we don't want to actually do it until later) - docker_protect_installed_version: True - -- name: Verify master processes - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool - -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config - vars: - target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: - msg: Verify OpenShift is already installed - when: openshift.common.version is not defined - - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version - - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool - - - name: Verify containers are available for upgrade - command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool - - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool - - - name: Verify OpenShift 3.2 RPMs are available for upgrade - fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required" - when: deployment_type != 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') - - - name: Verify Origin 1.2 RPMs are available for upgrade - fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 1.2 or greater is required" - when: deployment_type == 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') - - # TODO: Are these two grep checks necessary anymore? - # Note: the version number is hardcoded here in hopes of catching potential - # bugs in how g_aos_versions.curr_version is set - - name: Verifying the correct version is installed for upgrade - shell: grep 3.1.1.6 {{ item }} - with_items: - - /etc/sysconfig/openvswitch - - /etc/sysconfig/{{ openshift.common.service_type }}* - when: verify_upgrade_version is defined - - - name: Verifying the image version is used in the systemd unit - shell: grep IMAGE_VERSION {{ item }} - with_items: - - /etc/systemd/system/openvswitch.service - - /etc/systemd/system/{{ openshift.common.service_type }}*.service - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - fail: - msg: This upgrade playbook must be run on Origin 1.1 or later - when: deployment_type == 'origin' and openshift.common.version | version_compare('1.1','<') - - - fail: - msg: This upgrade playbook must be run on OpenShift Enterprise 3.1 or later - when: deployment_type == 'atomic-openshift' and openshift.common.version | version_compare('3.1','<') - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config - tasks: - - name: Determine available Docker - script: ../files/rpm_versions.sh docker - register: g_docker_version_result - when: not openshift.common.is_atomic | bool - - - name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool - - - set_fact: - g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" - when: not openshift.common.is_atomic | bool - - - set_fact: - g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool - - - fail: - msg: This playbook requires access to Docker 1.10 or later - when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<') - - # TODO: add check to upgrade ostree to get latest Docker - - - set_fact: - pre_upgrade_complete: True - - -############################################################################## -# Gate on pre-upgrade checks -############################################################################## -- name: Gate on pre-upgrade checks - hosts: localhost - connection: local - become: no - vars: - pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" - tasks: - - set_fact: - pre_upgrade_completed: "{{ hostvars - | oo_select_keys(pre_upgrade_hosts) - | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" - - set_fact: - pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" - when: pre_upgrade_failed | length > 0 - -############################################################################### -# Backup etcd -############################################################################### -- name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - - name: Install etcd (for etcdctl) - action: "{{ ansible_pkg_mgr }} name=etcd state=latest" - when: not openshift.common.is_atomic | bool - - - name: Generate etcd backup - command: > - etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - - -############################################################################## -# Gate on etcd backup -############################################################################## -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml deleted file mode 100644 index f5e4d807e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ -# We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages - action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" - -- name: Ensure python-yaml present for config upgrade - action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" - when: not openshift.common.is_atomic | bool - -- name: Restart node service - service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml deleted file mode 100644 index 59cedc839..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml +++ /dev/null @@ -1,167 +0,0 @@ ---- -############################################################################### -# The restart playbook should be run after this playbook completes. -############################################################################### - -############################################################################### -# Upgrade Masters -############################################################################### -- name: Upgrade master - hosts: oo_masters_to_config - handlers: - - include: ../../../../../roles/openshift_master/handlers/main.yml - roles: - - openshift_facts - tasks: - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - - - include_vars: ../../../../../roles/openshift_master/vars/main.yml - - - name: Update systemd units - include: ../../../../../roles/openshift_master/tasks/systemd_units.yml - -# - name: Upgrade master configuration -# openshift_upgrade_config: -# from_version: '3.1' -# to_version: '3.2' -# role: master -# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" - -- name: Set master update status to complete - hosts: oo_masters_to_config - tasks: - - set_fact: - master_update_complete: True - -############################################################################## -# Gate on master update complete -############################################################################## -- name: Gate on master update - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - master_update_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - - set_fact: - master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" - when: master_update_failed | length > 0 - -############################################################################### -# Upgrade Nodes -############################################################################### - -# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service) -- name: Perform upgrades that may require node evacuation - hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config - serial: 1 - any_errors_fatal: true - roles: - - openshift_facts - handlers: - - include: ../../../../../roles/openshift_node/handlers/main.yml - tasks: - # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node - # or docker actually needs an upgrade before proceeding. Perhaps best to save this until - # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - - name: Mark unschedulable if host is a node - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - - name: Evacuate Node for Kubelet upgrade - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - - - include: ../docker/upgrade.yml - when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - - - include: rpm_upgrade.yml - vars: - component: "node" - openshift_version: "{{ openshift_pkg_version | default('') }}" - when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool - - - include: containerized_node_upgrade.yml - when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool - - - name: Set node schedulability - command: > - {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool - - -############################################################################### -# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints -############################################################################### - -- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints - hosts: oo_masters_to_config - roles: - - { role: openshift_cli } - vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - name: Reconcile Cluster Roles - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm - run_once: true - - - name: Reconcile Cluster Role Bindings - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:authenticated:oauth - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool - run_once: true - - - name: Reconcile Security Context Constraints - command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true - run_once: true - - - set_fact: - reconcile_complete: True - -############################################################################## -# Gate on reconcile -############################################################################## -- name: Gate on reconcile - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - reconcile_completed: "{{ hostvars - | oo_select_keys(groups.oo_masters_to_config) - | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - - set_fact: - reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" - - fail: - msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" - when: reconcile_failed | length > 0 -- cgit v1.2.3 From cf84520dc0d9e5a3b8b88ca0b8d8e1720080f4da Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Thu, 28 Jul 2016 14:25:05 -0300 Subject: Upgrade configs for protobuf support. --- .../common/openshift-cluster/upgrades/upgrade.yml | 5 +++ .../upgrades/v3_3/master_config_upgrade.yml | 40 ++++++++++++++++++++++ .../upgrades/v3_3/node_config_upgrade.yml | 21 ++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml create mode 100644 playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml (limited to 'playbooks/common/openshift-cluster') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index e7638982c..2e7635021 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -17,6 +17,9 @@ - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool + - include: "{{ master_config_hook }}" + when: master_config_hook is defined + - include_vars: ../../../../roles/openshift_master/vars/main.yml - name: Update systemd units @@ -90,6 +93,8 @@ - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool + - include: "{{ node_config_hook }}" + when: node_config_hook is defined - include: rpm_upgrade.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml new file mode 100644 index 000000000..638ef23a8 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -0,0 +1,40 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' + yaml_value: 'application/vnd.kubernetes.protobuf,application/json' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' + yaml_value: 'application/vnd.kubernetes.protobuf' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' + yaml_value: 400 + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.ops' + yaml_value: 200 + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' + yaml_value: 'application/vnd.kubernetes.protobuf,application/json' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' + yaml_value: 'application/vnd.kubernetes.protobuf' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' + yaml_value: 600 + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.ops' + yaml_value: 300 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml new file mode 100644 index 000000000..1297938bc --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml @@ -0,0 +1,21 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/node/node-config.yaml" + yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' + yaml_value: 'application/vnd.kubernetes.protobuf,application/json' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/node/node-config.yaml" + yaml_key: 'masterClientConnectionOverrides.contentType' + yaml_value: 'application/vnd.kubernetes.protobuf' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/node/node-config.yaml" + yaml_key: 'masterClientConnectionOverrides.burst' + yaml_value: 40 + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/node/node-config.yaml" + yaml_key: 'masterClientConnectionOverrides.ops' + yaml_value: 20 + -- cgit v1.2.3 From b3d04f1a54c0109ce38be103ddc7c83f1992c10e Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Sat, 6 Aug 2016 10:41:11 -0400 Subject: Migrate ca.crt to ca-bundle.crt --- .../common/openshift-cluster/upgrades/upgrade.yml | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'playbooks/common/openshift-cluster') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index 2e7635021..dee086cf5 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -32,6 +32,31 @@ # role: master # config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + - name: Check for ca-bundle.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + register: ca_bundle_stat + failed_when: false + + - name: Check for ca.crt + stat: + path: "{{ openshift.common.config_base }}/master/ca.crt" + register: ca_crt_stat + failed_when: false + + - name: Migrate ca.crt to ca-bundle.crt + command: mv ca.crt ca-bundle.crt + args: + chdir: "{{ openshift.common.config_base }}/master" + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + + - name: Link ca.crt to ca-bundle.crt + file: + src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" + path: "{{ openshift.common.config_base }}/master/ca.crt" + state: link + when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + - name: Set master update status to complete hosts: oo_masters_to_config tasks: -- cgit v1.2.3