summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc/upgrades
diff options
context:
space:
mode:
authorBrenton Leanhardt <bleanhar@redhat.com>2015-11-11 10:48:32 -0500
committerBrenton Leanhardt <bleanhar@redhat.com>2015-11-11 10:48:32 -0500
commit962d6bbfeca2998a850cf976f9177080c0b9e594 (patch)
tree84d6eeaf16dfe8ce89d33adfdea58e9634d9407d /playbooks/adhoc/upgrades
parentaa57786422cbf164edc1d1a9041b73606d39bd4d (diff)
parent4c1b0dd4ab8f3a5d4fcfa4ba1501ed374793e77a (diff)
downloadopenshift-962d6bbfeca2998a850cf976f9177080c0b9e594.tar.gz
openshift-962d6bbfeca2998a850cf976f9177080c0b9e594.tar.bz2
openshift-962d6bbfeca2998a850cf976f9177080c0b9e594.tar.xz
openshift-962d6bbfeca2998a850cf976f9177080c0b9e594.zip
Merge pull request #839 from detiber/refactorUpgrade
Refactor upgrade
Diffstat (limited to 'playbooks/adhoc/upgrades')
-rw-r--r--playbooks/adhoc/upgrades/README.md21
-rw-r--r--playbooks/adhoc/upgrades/files/pre-upgrade-check188
-rw-r--r--playbooks/adhoc/upgrades/files/versions.sh10
l---------playbooks/adhoc/upgrades/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/upgrades/library/openshift_upgrade_config.py154
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml407
8 files changed, 0 insertions, 783 deletions
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md
deleted file mode 100644
index 6de8a970f..000000000
--- a/playbooks/adhoc/upgrades/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# [NOTE]
-This playbook will re-run installation steps overwriting any local
-modifications. You should ensure that your inventory has been updated with any
-modifications you've made after your initial installation. If you find any items
-that cannot be configured via ansible please open an issue at
-https://github.com/openshift/openshift-ansible
-
-# Overview
-This playbook is available as a technical preview. It currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Applies latest configuration by re-running the installation playbook
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-# Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
diff --git a/playbooks/adhoc/upgrades/files/pre-upgrade-check b/playbooks/adhoc/upgrades/files/pre-upgrade-check
deleted file mode 100644
index ed4ab6d1b..000000000
--- a/playbooks/adhoc/upgrades/files/pre-upgrade-check
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/env python
-"""
-Pre-upgrade checks that must be run on a master before proceeding with upgrade.
-"""
-# This is a script not a python module:
-# pylint: disable=invalid-name
-
-# NOTE: This script should not require any python libs other than what is
-# in the standard library.
-
-__license__ = "ASL 2.0"
-
-import json
-import os
-import subprocess
-import re
-
-# The maximum length of container.ports.name
-ALLOWED_LENGTH = 15
-# The valid structure of container.ports.name
-ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
-AT_LEAST_ONE_LETTER = re.compile('[a-z]')
-# look at OS_PATH for the full path. Default ot 'oc'
-OC_PATH = os.getenv('OC_PATH', 'oc')
-
-
-def validate(value):
- """
- validate verifies that value matches required conventions
-
- Rules of container.ports.name validation:
-
- * must be less that 16 chars
- * at least one letter
- * only a-z0-9-
- * hyphens can not be leading or trailing or next to each other
-
- :Parameters:
- - `value`: Value to validate
- """
- if len(value) > ALLOWED_LENGTH:
- return False
-
- if '--' in value:
- return False
-
- # We search since it can be anywhere
- if not AT_LEAST_ONE_LETTER.search(value):
- return False
-
- # We match because it must start at the beginning
- if not ALLOWED_CHARS.match(value):
- return False
- return True
-
-
-def list_items(kind):
- """
- list_items returns a list of items from the api
-
- :Parameters:
- - `kind`: Kind of item to access
- """
- response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
- items = json.loads(response)
- return items.get("items", [])
-
-
-def get(obj, *paths):
- """
- Gets an object
-
- :Parameters:
- - `obj`: A dictionary structure
- - `path`: All other non-keyword arguments
- """
- ret_obj = obj
- for path in paths:
- if ret_obj.get(path, None) is None:
- return []
- ret_obj = ret_obj[path]
- return ret_obj
-
-
-# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
- """
- Prints out results in human friendly way.
-
- :Parameters:
- - `namespace`: Namespace of the resource
- - `kind`: Kind of the resource
- - `item_name`: Name of the resource
- - `container_name`: Name of the container. May be "" when kind=Service.
- - `port_name`: Name of the port
- - `valid`: True if the port is valid
- """
- if not valid:
- if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" Port="%s")' % (
- kind, item_name, namespace, container_name, port_name))
- else:
- print('%s/%s -n %s (Port="%s")' % (
- kind, item_name, namespace, port_name))
-
-
-def print_validation_header():
- """
- Prints the error header. Should run on the first error to avoid
- overwhelming the user.
- """
- print """\
-At least one port name does not validate. Valid port names:
-
- * must be less that 16 chars
- * have at least one letter
- * only a-z0-9-
- * do not start or end with -
- * Dashes may not be next to eachother ('--')
-"""
-
-
-def main():
- """
- main is the main entry point to this script
- """
- try:
- # the comma at the end suppresses the newline
- print "Checking for oc ...",
- subprocess.check_output([OC_PATH, 'whoami'])
- print "found"
- except:
- print(
- 'Unable to run "%s whoami"\n'
- 'Please ensure OpenShift is running, and "oc" is on your system '
- 'path.\n'
- 'You can override the path with the OC_PATH environment variable.'
- % OC_PATH)
- raise SystemExit(1)
-
- # Where the magic happens
- first_error = True
- for kind, path in [
- ('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers")),
- ('deploymentconfigs', ("spec", "template", "spec", "containers"))]:
- for item in list_items(kind):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for container in get(item, *path):
- container_name = container["name"]
- for port in get(container, "ports"):
- port_name = port.get("name", None)
- if not port_name:
- # Unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, kind, item_name,
- container_name, port_name, valid)
-
- # Services follow a different flow
- for item in list_items('services'):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for port in get(item, "spec", "ports"):
- port_name = port.get("targetPort", None)
- if isinstance(port_name, int) or port_name is None:
- # Integer only or unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, "services", item_name, "", port_name, valid)
-
- # If we had at least 1 error then exit with 1
- if not first_error:
- raise SystemExit(1)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/adhoc/upgrades/files/versions.sh b/playbooks/adhoc/upgrades/files/versions.sh
deleted file mode 100644
index f90719cab..000000000
--- a/playbooks/adhoc/upgrades/files/versions.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-
-yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-
-
-echo "---"
-echo "curr_version: ${yum_installed}"
-echo "avail_version: ${yum_available}"
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/upgrades/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
deleted file mode 100755
index a6721bb92..000000000
--- a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-"""Ansible module for modifying OpenShift configs during an upgrade"""
-
-import os
-import yaml
-
-DOCUMENTATION = '''
----
-module: openshift_upgrade_config
-short_description: OpenShift Upgrade Config
-author: Jason DeTiberus
-requirements: [ ]
-'''
-EXAMPLES = '''
-'''
-
-def modify_api_levels(level_list, remove, ensure, msg_prepend='',
- msg_append=''):
- """ modify_api_levels """
- changed = False
- changes = []
-
- if not isinstance(remove, list):
- remove = []
-
- if not isinstance(ensure, list):
- ensure = []
-
- if not isinstance(level_list, list):
- new_list = []
- changed = True
- changes.append("%s created missing %s" % (msg_prepend, msg_append))
- else:
- new_list = level_list
- for level in remove:
- if level in new_list:
- new_list.remove(level)
- changed = True
- changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
-
- for level in ensure:
- if level not in new_list:
- new_list.append(level)
- changed = True
- changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
-
- return {'new_list': new_list, 'changed': changed, 'changes': changes}
-
-
-def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
- """Main upgrade method for 3.0 to 3.1."""
- changes = []
-
- # Facts do not get transferred to the hosts where custom modules run,
- # need to make some assumptions here.
- master_config = os.path.join(config_base, 'master/master-config.yaml')
-
- master_cfg_file = open(master_config, 'r')
- config = yaml.safe_load(master_cfg_file.read())
- master_cfg_file.close()
-
-
- # Remove unsupported api versions and ensure supported api versions from
- # master config
- unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
- supported_levels = ['v1']
-
- result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
- supported_levels, 'master-config.yaml:', 'from apiLevels')
- if result['changed']:
- config['apiLevels'] = result['new_list']
- changes.append(result['changes'])
-
- if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
- config['kubernetesMasterConfig'].pop('apiLevels')
- changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
-
- # Add proxyClientInfo to master-config
- if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
- config['kubernetesMasterConfig']['proxyClientInfo'] = {
- 'certFile': 'master.proxy-client.crt',
- 'keyFile': 'master.proxy-client.key'
- }
- changes.append("master-config.yaml: added proxyClientInfo")
-
- if len(changes) > 0:
- if backup:
- # TODO: Check success:
- ansible_module.backup_local(master_config)
-
- # Write the modified config:
- out_file = open(master_config, 'w')
- out_file.write(yaml.safe_dump(config, default_flow_style=False))
- out_file.close()
-
- return changes
-
-
-def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
- """Upgrade entry point."""
- if from_version == '3.0':
- if to_version == '3.1':
- return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
-
-
-def main():
- """ main """
- # disabling pylint errors for global-variable-undefined and invalid-name
- # for 'global module' usage, since it is required to use ansible_facts
- # pylint: disable=global-variable-undefined, invalid-name,
- # redefined-outer-name
- global module
-
- module = AnsibleModule(
- argument_spec=dict(
- config_base=dict(required=True),
- from_version=dict(required=True, choices=['3.0']),
- to_version=dict(required=True, choices=['3.1']),
- role=dict(required=True, choices=['master']),
- backup=dict(required=False, default=True, type='bool')
- ),
- supports_check_mode=True,
- )
-
- from_version = module.params['from_version']
- to_version = module.params['to_version']
- role = module.params['role']
- backup = module.params['backup']
- config_base = module.params['config_base']
-
- try:
- changes = []
- if role == 'master':
- changes = upgrade_master(module, config_base, from_version,
- to_version, backup)
-
- changed = len(changes) > 0
- return module.exit_json(changed=changed, changes=changes)
-
- # ignore broad-except error to avoid stack trace to ansible user
- # pylint: disable=broad-except
- except Exception, e:
- return module.fail_json(msg=str(e))
-
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-
-if __name__ == '__main__':
- main()
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
deleted file mode 120000
index 73cafffe5..000000000
--- a/playbooks/adhoc/upgrades/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
deleted file mode 120000
index e2b799b9d..000000000
--- a/playbooks/adhoc/upgrades/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
deleted file mode 100644
index 324f5fba3..000000000
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ /dev/null
@@ -1,407 +0,0 @@
----
-- name: Load master facts
- hosts: masters
- roles:
- - openshift_facts
-
-- name: Verify upgrade can proceed
- hosts: masters[0]
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- gather_facts: no
- tasks:
- # Pacemaker is currently the only supported upgrade path for multiple masters
- - fail:
- msg: "openshift_master_cluster_method must be set to 'pacemaker'"
- when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
-
-- name: Run pre-upgrade checks on first master
- hosts: masters[0]
- tasks:
- # If this script errors out ansible will show the default stdout/stderr
- # which contains details for the user:
- - script: files/pre-upgrade-check
-
-- name: Evaluate etcd_hosts
- hosts: localhost
- tasks:
- - name: Evaluate etcd hosts
- add_host:
- name: "{{ groups.masters.0 }}"
- groups: etcd_hosts
- when: hostvars[groups.masters.0].openshift.master.embedded_etcd | bool
- - name: Evaluate etcd hosts
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts
- with_items: groups.etcd
- when: not hostvars[groups.masters.0].openshift.master.embedded_etcd | bool
-
-- name: Backup etcd
- hosts: etcd_hosts
- vars:
- embedded_etcd: "{{ openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- - name: Check available disk space for etcd backup
- # We assume to be using the data dir for all backups.
- shell: >
- df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- - name: Check current embedded etcd disk usage
- shell: >
- du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- yum:
- pkg: etcd
- state: latest
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-- name: Update deployment type
- hosts: OSEv3
- roles:
- - openshift_facts
- post_tasks:
- - openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ deployment_type }}"
-
-
-- name: Perform upgrade version checking
- hosts: masters[0]
- tasks:
- - name: Clean yum cache
- command: yum clean all
-
- - name: Determine available versions
- script: files/versions.sh {{ openshift.common.service_type }} openshift
- register: g_versions_result
-
- - set_fact:
- g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
- - set_fact:
- g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-
- - fail:
- msg: This playbook requires Origin 1.0.6 or later
- when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
-
- # TODO: This should be specific to the 3.1 upgrade playbook (coming in future refactor), otherwise we are blocking 3.0.1 to 3.0.2 here.
- - fail:
- msg: Atomic OpenShift 3.1 packages not found
- when: deployment_type in ['openshift-enterprise', 'atomic-openshift'] and g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
- # Deployment type 'enterprise' is no longer valid if we're upgrading to 3.1 or beyond.
- # (still valid for 3.0.x to 3.0.y however) Using the global deployment_type here as
- # we're checking what was requested by the upgrade, not the current type on the system.
- - fail:
- msg: "Deployment type enterprise not supported for upgrade"
- when: deployment_type == "enterprise" and g_aos_versions.curr_version | version_compare('3.1', '>=')
-
-
-- name: Upgrade masters
- hosts: masters
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- tasks:
- - name: Upgrade to latest available kernel
- yum:
- pkg: kernel
- state: latest
-
- - name: Upgrade master packages
- command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
-
- - name: Ensure python-yaml present for config upgrade
- yum:
- pkg: PyYAML
- state: installed
-
- - name: Upgrade master configuration
- openshift_upgrade_config:
- from_version: '3.0'
- to_version: '3.1'
- role: master
- config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
- when: deployment_type in ['openshift-enterprise', 'atomic-enterprise'] and g_aos_versions.curr_version | version_compare('3.1', '>=')
-
- - set_fact:
- master_certs_missing: True
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: False
-
-- name: Generate missing master certificates
- hosts: masters[0]
- vars:
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups.masters)
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups.masters)
- | difference([groups.masters.0]) }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - masters_needing_certs
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- with_items: masters_needing_certs
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: masters_needing_certs
-
-- name: Sync certs and restart masters post configuration change
- hosts: masters
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- tasks:
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: inventory_hostname != groups.masters.0
-
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
-- name: Destroy cluster
- hosts: masters[0]
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- pre_tasks:
- - name: Check for configured cluster
- stat:
- path: /etc/corosync/corosync.conf
- register: corosync_conf
- when: openshift_master_ha | bool
- - name: Destroy cluster
- command: pcs cluster destroy --all
- when: openshift_master_ha | bool and corosync_conf.stat.exists == true
-
-- name: Start pcsd on masters
- hosts: masters
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- tasks:
- - name: Start pcsd
- service: name=pcsd enabled=yes state=started
- when: openshift_master_ha | bool
-
-- name: Re-create cluster
- hosts: masters[0]
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- openshift_deployment_type: "{{ deployment_type }}"
- omc_cluster_hosts: "{{ groups.masters | join(' ') }}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- gather_facts: no
- tasks:
- - file: name={{ g_master_mktemp.stdout }} state=absent
- changed_when: False
-
-
-- name: Upgrade nodes
- hosts: nodes
- vars:
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- roles:
- - openshift_facts
- tasks:
- - name: Upgrade node packages
- command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
- - name: Restart node services
- service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-- name: Update cluster policy and policy bindings
- hosts: masters[0]
- vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: "{{ deployment_type in ['openshift-enterprise', 'atomic-enterprise'] and g_new_version | version_compare('3.0.2','>') }}"
- tasks:
- - name: oadm policy reconcile-cluster-roles --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
-
- - name: oadm policy reconcile-cluster-role-bindings --confirm
- command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-role-bindings
- --exclude-groups=system:authenticated
- --exclude-groups=system:unauthenticated
- --exclude-users=system:anonymous
- --additive-only=true --confirm
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
-
-
-- name: Restart masters post reconcile
- hosts: masters
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- tasks:
- - name: Restart master services
- service: name="{{ openshift.common.service_type}}-master" state=restarted
- when: not openshift_master_ha | bool
-
-- name: Restart cluster post reconcile
- hosts: masters[0]
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- tasks:
- - name: Restart master cluster
- command: pcs resource restart master
- when: openshift_master_ha | bool
- - name: Wait for the clustered master service to be available
- wait_for:
- host: "{{ openshift_master_cluster_vip }}"
- port: 8443
- state: started
- timeout: 180
- delay: 90
- when: openshift_master_ha | bool
-
-- name: Upgrade default router and registry
- hosts: masters[0]
- vars:
- - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
- - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
- tasks:
- - name: Check for default router
- command: >
- {{ oc_cmd }} get -n default dc/router
- register: _default_router
- failed_when: false
- changed_when: false
- - name: Check for allowHostNetwork and allowHostPorts
- when: _default_router.rc == 0
- shell: >
- {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
- register: _scc
- - name: Grant allowHostNetwork and allowHostPorts
- when:
- - _default_router.rc == 0
- - "'false' in _scc.stdout"
- command: >
- {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
- - name: Update deployment config to 1.0.4/3.0.1 spec
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
- - name: Switch to hostNetwork=true
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
- - name: Update router image to current version
- when: _default_router.rc == 0
- command: >
- {{ oc_cmd }} patch dc/router -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
-
- - name: Check for default registry
- command: >
- {{ oc_cmd }} get -n default dc/docker-registry
- register: _default_registry
- failed_when: false
- changed_when: false
- - name: Update registry image to current version
- when: _default_registry.rc == 0
- command: >
- {{ oc_cmd }} patch dc/docker-registry -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-
-- name: Update image streams and templates
- hosts: masters[0]
- vars:
- openshift_examples_import_command: "update"
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- - openshift_examples
-
-- name: Ensure master services enabled
- hosts: masters
- vars:
- openshift_master_ha: "{{ groups['masters'] | length > 1 }}"
- tasks:
- - name: Enable master services
- service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
- when: not openshift_master_ha | bool
-
-- name: Ensure node services enabled
- hosts: nodes
- tasks:
- - name: Restart node services
- service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
-