From 82f4e4eaeaaf3059013e9ea23d87dcf89fd8455e Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Tue, 21 Jun 2016 15:01:01 -0300 Subject: Refactor 3.2 upgrade to avoid killing nodes without evac. We now handle the two pieces of upgrade that require a node evac in the same play. (docker, and node itself) --- .../upgrades/docker/docker_upgrade.yml | 88 +--------------------- .../upgrades/docker/files/nuke_images.sh | 23 ------ 2 files changed, 3 insertions(+), 108 deletions(-) delete mode 100644 playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh (limited to 'playbooks/byo/openshift-cluster/upgrades') diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 9434d8e15..96c9fb15d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -4,6 +4,7 @@ roles: - openshift_facts tasks: + - set_fact: repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery' }}" @@ -11,42 +12,7 @@ msg: Cannot upgrade Docker on Atomic hosts when: openshift.common.is_atomic | bool - - name: Determine available Docker version - script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker - register: g_docker_version_result - - - name: Check if Docker is installed - command: rpm -q docker - register: pkg_check - failed_when: pkg_check.rc > 1 - changed_when: no - - - name: Get current version of Docker - command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" - register: curr_docker_version - changed_when: false - - - name: Get latest available version of Docker - command: > - {{ repoquery_cmd }} --qf '%{version}' "docker" - register: avail_docker_version - failed_when: false - changed_when: false - - - fail: - msg: This playbook requires access to Docker 1.10 or later - # Disable the 1.10 requirement if the user set a specific Docker version - when: avail_docker_version.stdout | version_compare('1.10','<') and docker_version is not defined - - - name: Flag for upgrade if Docker version does not equal latest - set_fact: - docker_upgrade: true - when: docker_version is not defined and pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(avail_docker_version.stdout,'<') - - - name: Flag for upgrade if Docker version does not equal requested version - set_fact: - docker_upgrade: true - when: docker_version is defined and pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<') + - include: ../../../common/openshift-cluster/upgrades/docker/upgrade_check-yml # If a node fails, halt everything, the admin will need to clean up and we @@ -69,55 +35,7 @@ delegate_to: "{{ groups.oo_first_master.0 }}" when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config - - name: Stop containerized services - service: name={{ item }} state=stopped - with_items: - - "{{ openshift.common.service_type }}-master" - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - - etcd_container - - openvswitch - failed_when: false - when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool - - - name: Remove all containers and images - script: files/nuke_images.sh docker - register: nuke_images_result - when: docker_upgrade is defined and docker_upgrade | bool - - # TODO: should we use the docker role to actually do the upgrade? - - name: Upgrade to specified Docker version - action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present" - register: docker_upgrade_result - when: docker_upgrade is defined and docker_upgrade | bool and docker_version is defined - - - name: Upgrade to latest Docker version - action: "{{ ansible_pkg_mgr }} name=docker state=latest" - register: docker_upgrade_result - when: docker_upgrade is defined and docker_upgrade | bool and docker_version is not defined - - - name: Restart containerized services - service: name={{ item }} state=started - with_items: - - etcd_container - - openvswitch - - "{{ openshift.common.service_type }}-master" - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - failed_when: false - when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool - - - name: Wait for master API to come back online - become: no - local_action: - module: wait_for - host="{{ inventory_hostname }}" - state=started - delay=10 - port="{{ openshift.master.api_port }}" - when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config + - include: ../../../common/openshift-cluster/upgrades/docker/upgrade.yml - name: Set node schedulability command: > diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh deleted file mode 100644 index 9a5ee2276..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Stop any running containers -running_container_count=`docker ps -q | wc -l` -if test $running_container_count -gt 0 -then - docker stop $(docker ps -q) -fi - -# Delete all containers -container_count=`docker ps -a -q | wc -l` -if test $container_count -gt 0 -then - docker rm -f -v $(docker ps -a -q) -fi - -# Delete all images (forcefully) -image_count=`docker images -q | wc -l` -if test $image_count -gt 0 -then - # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144 - docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge." -fi -- cgit v1.2.3