diff options
Diffstat (limited to 'playbooks/byo')
71 files changed, 601 insertions, 1329 deletions
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +# tags: +# - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md new file mode 100644 index 000000000..b26e7d7ed --- /dev/null +++ b/playbooks/byo/openshift-checks/README.md @@ -0,0 +1,104 @@ +# OpenShift health checks + +This directory contains Ansible playbooks for detecting potential problems prior +to an install, as well as health checks to run on existing OpenShift clusters. + +Ansible's default operation mode is to fail fast, on the first error. However, +when performing checks, it is useful to gather as much information about +problems as possible in a single run. + +Thus, the playbooks run a battery of checks against the inventory hosts and +gather intermediate errors, giving a more complete diagnostic of the state of +each host. If any check failed, the playbook run will be marked as failed. + +To facilitate understanding the problems that were encountered, a custom +callback plugin summarizes execution errors at the end of a playbook run. + +## Available playbooks + +1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system + requirements and look for common problems that can prevent a successful + installation of a production cluster. + +2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster + for known signs of problems. + +3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) - + check that certificates in use are valid and not expiring soon. + +4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to + list existing checks. + See the [next section](#the-adhoc-playbook) for a usage example. + +## Running + +With a [recent installation of Ansible](../../../README.md#setup), run the playbook +against your inventory file. Here is the step-by-step: + +1. If you haven't done it yet, clone this repository: + + ```console + $ git clone https://github.com/openshift/openshift-ansible + $ cd openshift-ansible + ``` + +2. Install the [dependencies](../../../README.md#setup) + +3. Run the appropriate playbook: + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v + ``` + +### The adhoc playbook + +The adhoc playbook gives flexibility to run any check or a custom group of +checks. What will be run is determined by the `openshift_checks` variable, +which, among other ways supported by Ansible, can be set on the command line +using the `-e` flag. + +For example, to run the `docker_storage` check: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage +``` + +To run more checks, use a comma-separated list of check names: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability +``` + +To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight +``` + +It is valid to specify multiple check tags and individual check names together +in a comma-separated list. + +To list all of the available checks and tags, run the adhoc playbook without +setting the `openshift_checks` variable: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml +``` + +## Running in a container + +This repository is built into a Docker image including Ansible so that it can +be run anywhere Docker is available, without the need to manually install dependencies. +Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md). diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml new file mode 100644 index 000000000..226bed732 --- /dev/null +++ b/playbooks/byo/openshift-checks/adhoc.yml @@ -0,0 +1,27 @@ +--- +# NOTE: ideally this would be just part of a single play in +# common/openshift-checks/adhoc.yml that lists the existing checks when +# openshift_checks is not set or run the requested checks. However, to actually +# run the checks we need to have the included dependencies to run first and that +# takes time. To speed up listing checks, we use this separate play that runs +# before the include of dependencies to save time and improve the UX. +- name: OpenShift health checks + # NOTE: though the openshift_checks variable could be potentially defined on + # individual hosts while not defined for localhost, we do not support that + # usage. Running this play only in localhost speeds up execution. + hosts: localhost + connection: local + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + pre_tasks: + - name: List known health checks + action: openshift_health_check + when: openshift_checks is undefined or not openshift_checks + +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/adhoc.yml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml new file mode 100644 index 000000000..630135cae --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml @@ -0,0 +1,10 @@ +--- +# Default behavior, you will need to ensure you run ansible with the +# -v option to see report results: + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml new file mode 100644 index 000000000..378d1f154 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml @@ -0,0 +1,40 @@ +# This example generates HTML and JSON reports and +# +# Copies of the generated HTML and JSON reports are uploaded to the masters, +# which is particularly useful when this playbook is run from a container. +# +# All certificates (healthy or not) are included in the results +# +# Optional environment variables to alter the behaviour of the playbook: +# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45) +# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report) +--- +- name: Generate certificate expiration reports + hosts: nodes:masters:etcd + gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}" + roles: + - role: openshift_certificate_expiry + +- name: Upload reports to master + hosts: masters + gather_facts: no + vars: + destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + tasks: + - name: Ensure that the target directory exists + file: + path: "{{ destination_path }}" + state: directory + - name: Copy the reports + copy: + dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}" + src: "/tmp/{{ item }}" + with_items: + - "cert-expiry-report.html" + - "cert-expiry-report.json" diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml new file mode 100644 index 000000000..ae41c7c14 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml @@ -0,0 +1,18 @@ +--- +# This example playbook is great if you're just wanting to try the +# role out. +# +# This example enables HTML and JSON reports +# +# All certificates (healthy or not) are included in the results + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml new file mode 100644 index 000000000..d80cb6ff4 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml @@ -0,0 +1,12 @@ +--- +# Generate HTML and JSON artifacts in their default paths: + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml new file mode 100644 index 000000000..2189455b7 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml @@ -0,0 +1,16 @@ +--- +# Generate timestamped HTML and JSON reports in /var/lib/certcheck + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_show_all: yes + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html" + openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json" + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml new file mode 100644 index 000000000..87a0f3be4 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml @@ -0,0 +1,13 @@ +--- +# Change the expiration warning window to 1500 days (good for testing +# the module out) and save the results as a JSON file: + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_warning_days: 1500 + openshift_certificate_expiry_save_json_results: yes + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml new file mode 100644 index 000000000..960457c4b --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml @@ -0,0 +1,12 @@ +--- +# Change the expiration warning window to 1500 days (good for testing +# the module out): + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_warning_days: 1500 + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles new file mode 120000 index 000000000..4bdbcbad3 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/roles @@ -0,0 +1 @@ +../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml new file mode 100644 index 000000000..96a71e4dc --- /dev/null +++ b/playbooks/byo/openshift-checks/health.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/health.yml diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml new file mode 100644 index 000000000..dd93df0bb --- /dev/null +++ b/playbooks/byo/openshift-checks/pre-install.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/byo/openshift-checks/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/byo/openshift-preflight/roles +++ b/playbooks/byo/openshift-checks/roles diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index 268a65415..e807ac004 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -1,6 +1,8 @@ --- g_etcd_hosts: "{{ groups.etcd | default([]) }}" +g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" g_master_hosts: "{{ groups.masters | default([]) }}" @@ -15,7 +17,10 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}" g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) - | union(g_lb_hosts) | union(g_nfs_hosts) + | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) + | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts) | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index acf5469bf..60fa44c5b 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -9,6 +9,4 @@ - include: ../../common/openshift-cluster/config.yml vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml index 2785dcc3b..2a725510a 100644 --- a/playbooks/byo/openshift-cluster/initialize_groups.yml +++ b/playbooks/byo/openshift-cluster/initialize_groups.yml @@ -8,17 +8,3 @@ - always tasks: - include_vars: cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index 76f165c6d..a523bb47f 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -5,8 +5,11 @@ # currently supported method. # - include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always - include: ../../common/openshift-cluster/openshift_logging.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml index 5ad3a1a01..1135c8c11 100644 --- a/playbooks/byo/openshift-cluster/openshift-metrics.yml +++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml @@ -1,4 +1,10 @@ --- - include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always - include: ../../common/openshift-cluster/openshift_metrics.yml diff --git a/playbooks/byo/openshift-cluster/openshift-prometheus.yml b/playbooks/byo/openshift-cluster/openshift-prometheus.yml new file mode 100644 index 000000000..15917078d --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-prometheus.yml @@ -0,0 +1,4 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/openshift_prometheus.yml diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml new file mode 100644 index 000000000..8e80f158b --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml @@ -0,0 +1,6 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-cluster/openshift_provisioners.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index 012ce69ec..a3894e243 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -20,5 +20,7 @@ - include: ../../common/openshift-node/restart.yml - include: ../../common/openshift-cluster/redeploy-certificates/router.yml + when: openshift_hosted_manage_router | default(true) | bool - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml + when: openshift_hosted_manage_registry | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml new file mode 100644 index 000000000..29f821eda --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index 3b33e0d6f..6e11a111b 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -7,4 +7,4 @@ tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml +- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..40a7606e7 --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,15 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/service_catalog.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 8005a17a3..5bd5d64ab 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,4 +1,6 @@ --- - include: ../initialize_groups.yml +- include: ../../../common/openshift-cluster/evaluate_groups.yml + - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index 690b663f4..697a18c4d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -4,106 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index fca2c04f3..4d284c279 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index d171ac3cd..180a2821f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -6,103 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 217163802..8cce91b3f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -4,104 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index d21c195bf..8e5d0f5f9 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -13,101 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 7bb66611c..d5329b858 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index f0900e04e..f44d55ad2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index e8d834a04..2377713fa 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index c2a4debc8..5b3f6ab06 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles b/playbooks/byo/openshift-cluster/upgrades/v3_6/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml index 763e79e01..40120b3e8 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -4,110 +4,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -# Note: During upgrade the openshift excluder is not unexcluded inside the initialize_openshift_version.yml play. -# So it is necassary to run the play after running disable_excluder.yml. -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 7a1377be2..408a4c631 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -13,105 +13,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/validator.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 065746493..b5f42b804 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -6,101 +6,4 @@ # - include: ../../initialize_groups.yml -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade - -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md new file mode 100644 index 000000000..4bf53be81 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -0,0 +1,20 @@ +# v3.6 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..e41c29682 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..21e0fd815 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..0e09d996e --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,9 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml index dd3f47a4d..1342bd60c 100644 --- a/playbooks/byo/openshift-etcd/config.yml +++ b/playbooks/byo/openshift-etcd/config.yml @@ -1,14 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-etcd/config.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..2dec2bef6 --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/migrate.yml diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index d43533641..034bba4b4 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-etcd/restart.yml diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..a2a5856a9 --- /dev/null +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/scaleup.yml diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-glusterfs/roles b/playbooks/byo/openshift-glusterfs/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..32c828f97 --- /dev/null +++ b/playbooks/byo/openshift-loadbalancer/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-loadbalancer/config.yml diff --git a/playbooks/byo/openshift-master/additional_config.yml b/playbooks/byo/openshift-master/additional_config.yml new file mode 100644 index 000000000..b3d7b5731 --- /dev/null +++ b/playbooks/byo/openshift-master/additional_config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/additional_config.yml diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml new file mode 100644 index 000000000..98be0c448 --- /dev/null +++ b/playbooks/byo/openshift-master/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/config.yml diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 7988863f3..8950efd00 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-master/restart.yml diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 8aa07a664..a09edd55a 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,7 +1,20 @@ --- - include: ../openshift-cluster/initialize_groups.yml +- name: Ensure there are new_masters or new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_masters or no new_nodes in inventory. Please + add hosts to the new_masters and new_nodes host groups to add + masters. + when: + - (g_new_master_hosts | default([]) | length == 0) and (g_new_node_hosts | default([]) | length == 0) + +- include: ../../common/openshift-cluster/std_include.yml + - include: ../../common/openshift-master/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml new file mode 100644 index 000000000..93b24411e --- /dev/null +++ b/playbooks/byo/openshift-nfs/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-nfs/config.yml diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml new file mode 100644 index 000000000..839dc36ff --- /dev/null +++ b/playbooks/byo/openshift-node/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index 92665d71d..ccf9e82da 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -1,10 +1,6 @@ --- - include: ../openshift-cluster/initialize_groups.yml - tags: - - always - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-node/restart.yml diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index c6965fd6f..e0c36fb69 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,9 +1,19 @@ --- - include: ../openshift-cluster/initialize_groups.yml -- include: ../../common/openshift-node/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}" - openshift_master_etcd_port: 2379 +- name: Ensure there are new_nodes + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_nodes in inventory. Please add hosts to the + new_nodes host group to add nodes. + when: + - g_new_node_hosts | default([]) | length == 0 + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md deleted file mode 100644 index b50292eac..000000000 --- a/playbooks/byo/openshift-preflight/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# OpenShift preflight checks - -Here we provide an Ansible playbook for detecting potential roadblocks prior to -an install or upgrade. - -Ansible's default operation mode is to fail fast, on the first error. However, -when performing checks, it is useful to gather as much information about -problems as possible in a single run. - -The `check.yml` playbook runs a battery of checks against the inventory hosts -and tells Ansible to ignore intermediate errors, thus giving a more complete -diagnostic of the state of each host. Still, if any check failed, the playbook -run will be marked as having failed. - -To facilitate understanding the problems that were encountered, we provide a -custom callback plugin to summarize execution errors at the end of a playbook -run. - ---- - -*Note that currently the `check.yml` playbook is only useful for RPM-based -installations. Containerized installs are excluded from checks for now, but -might be included in the future if there is demand for that.* - ---- - -## Running - -With an installation of Ansible 2.2 or greater, run the playbook directly -against your inventory file. Here is the step-by-step: - -1. If you haven't done it yet, clone this repository: - - ```console - $ git clone https://github.com/openshift/openshift-ansible - $ cd openshift-ansible - ``` - -2. Run the playbook: - - ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml - ``` diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml index c5f05d0f0..2e53452a6 100644 --- a/playbooks/byo/openshift-preflight/check.yml +++ b/playbooks/byo/openshift-preflight/check.yml @@ -1,12 +1,3 @@ --- -- hosts: OSEv3 - name: run OpenShift health checks - roles: - - openshift_health_checker - post_tasks: - # NOTE: we need to use the old "action: name" syntax until - # https://github.com/ansible/ansible/issues/20513 is fixed. - - action: openshift_health_check - args: - checks: - - '@preflight' +# location is moved; this file remains so existing instructions keep working +- include: ../openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index 3b10323d6..a8c1c3a88 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -8,6 +8,7 @@ - always - name: Gather Cluster facts + # Temporarily reverting to OSEv3 until group standardization is complete hosts: OSEv3 roles: - openshift_facts diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 777743def..1b14ff32e 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -3,12 +3,9 @@ tags: - always -- include: ../common/openshift-cluster/std_include.yml - tags: - - always - - name: Subscribe hosts, update repos and update OS packages - hosts: l_oo_all_hosts + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml deleted file mode 100644 index 76246e7b0..000000000 --- a/playbooks/byo/vagrant.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: rhel_subscribe.yml - -- include: config.yml |