diff options
author | Scott Dodson <sdodson@redhat.com> | 2017-12-12 09:15:52 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-12-12 09:15:52 -0500 |
commit | 01ae634a0bf89fe840006f9139f8ffd20e09afc8 (patch) | |
tree | d53a7c03e4855885e124559018fff32adeaaf4ea /playbooks/common/openshift-cluster | |
parent | 0b6fa3dcd40c2dd1f42fcceb52f82c2a28ed96e2 (diff) | |
parent | 35c1abb6050f2cd1f31396edd42618a2998bd546 (diff) | |
download | openshift-01ae634a0bf89fe840006f9139f8ffd20e09afc8.tar.gz openshift-01ae634a0bf89fe840006f9139f8ffd20e09afc8.tar.bz2 openshift-01ae634a0bf89fe840006f9139f8ffd20e09afc8.tar.xz openshift-01ae634a0bf89fe840006f9139f8ffd20e09afc8.zip |
Merge pull request #6335 from kwoodson/node_groups_refactor
Node group management update.
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r-- | playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 47410dff3..4fc897a57 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -11,25 +11,19 @@ msg: "Ensure that new scale groups were provisioned before proceeding to update." when: - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0" + - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0" + - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes - name: initialize upgrade bits import_playbook: init.yml -- name: Drain and upgrade nodes +- name: unschedule nodes hosts: oo_sg_current_nodes - # This var must be set with -e on invocation, as it is not a per-host inventory var - # and is evaluated early. Values such as "20%" can also be used. - serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" - - pre_tasks: + tasks: - name: Load lib_openshift modules - include_role: + import_role: name: ../roles/lib_openshift - # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node - # or docker actually needs an upgrade before proceeding. Perhaps best to save this until - # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" @@ -40,14 +34,27 @@ register: node_unschedulable until: node_unschedulable|succeeded +- name: Drain nodes + hosts: oo_sg_current_nodes + # This var must be set with -e on invocation, as it is not a per-host inventory var + # and is evaluated early. Values such as "20%" can also be used. + serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" + tasks: - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not l_upgrade_nodes_drain_result | failed - retries: 60 - delay: 60 + retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result | failed + - openshift_upgrade_nodes_drain_timeout | default(0) == '0' # Alright, let's clean up! - name: clean up the old scale group |