summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml11
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml13
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml23
-rw-r--r--playbooks/common/openshift-cluster/cockpit-ui.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml58
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml38
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml149
-rw-r--r--playbooks/common/openshift-cluster/initialize_oo_option_facts.yml9
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml30
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml96
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml7
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_registry.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted_router.yml13
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml23
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml33
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml6
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml12
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml134
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml74
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml63
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml3
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml32
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml32
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml1
l---------playbooks/common/openshift-cluster/upgrades/master_docker1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml (renamed from playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml11
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml16
l---------playbooks/common/openshift-cluster/upgrades/v3_7/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml130
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml115
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml23
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml15
-rw-r--r--playbooks/common/openshift-etcd/config.yml23
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml151
-rw-r--r--playbooks/common/openshift-etcd/restart.yml18
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml76
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml59
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml24
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-master/additional_config.yml48
-rw-r--r--playbooks/common/openshift-master/config.yml145
-rw-r--r--playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js2
-rw-r--r--playbooks/common/openshift-master/restart.yml2
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml6
-rw-r--r--playbooks/common/openshift-master/scaleup.yml37
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-master/set_network_facts.yml28
-rw-r--r--playbooks/common/openshift-master/tasks/wire_aggregator.yml215
-rw-r--r--playbooks/common/openshift-nfs/config.yml24
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/config.yml85
-rw-r--r--playbooks/common/openshift-node/network_manager.yml2
-rw-r--r--playbooks/common/openshift-node/restart.yml6
-rw-r--r--playbooks/common/openshift-node/scaleup.yml50
-rw-r--r--playbooks/common/openshift-node/service.yml26
104 files changed, 2071 insertions, 884 deletions
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..dfcef8435
--- /dev/null
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -0,0 +1,12 @@
+---
+- name: OpenShift health checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ post_tasks:
+ - name: Run health checks
+ action: openshift_health_check
+ args:
+ checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index c7766ff04..21ea785ef 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,16 +1,11 @@
---
-# openshift_health_checker depends on openshift_version which now requires group eval.
-- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
-
- name: Run OpenShift health checks
- hosts: OSEv3
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "health"
+ - r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 7ca9f7e8b..88e6f9120 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,16 +1,11 @@
---
-# openshift_health_checker depends on openshift_version which now requires group eval.
-- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
-
-- hosts: OSEv3
- name: run OpenShift pre-install checks
+- name: run OpenShift pre-install checks
+ hosts: oo_all_hosts
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "pre-install"
+ - r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ - action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
deleted file mode 100644
index c0ea93d2c..000000000
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Additional master configuration
- hosts: oo_first_master
- vars:
- cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
- omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- - role: openshift_examples
- registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- - role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
- - role: cockpit
- when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- - role: flannel_register
- when: openshift.common.use_flannel | bool
diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml
new file mode 100644
index 000000000..5ddafdb07
--- /dev/null
+++ b/playbooks/common/openshift-cluster/cockpit-ui.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - cockpit-ui
+ hosts: oo_first_master
+ roles:
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 7224ae712..bf6f4e7cd 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -6,7 +6,7 @@
roles:
- openshift_health_checker
vars:
- - r_openshift_health_checker_playbook_context: "install"
+ - r_openshift_health_checker_playbook_context: install
post_tasks:
- action: openshift_health_check
args:
@@ -22,60 +22,38 @@
tags:
- always
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
- tags:
- - nfs
+ when: groups.oo_nfs_to_config | default([]) | count > 0
- include: ../openshift-loadbalancer/config.yml
- tags:
- - loadbalancer
+ when: groups.oo_lb_to_config | default([]) | count > 0
- include: ../openshift-master/config.yml
- tags:
- - master
-- include: additional_config.yml
- tags:
- - master
+- include: ../openshift-master/additional_config.yml
- include: ../openshift-node/config.yml
- tags:
- - node
- include: ../openshift-glusterfs/config.yml
- tags:
- - glusterfs
+ when: groups.oo_glusterfs_to_config | default([]) | count > 0
- include: openshift_hosted.yml
- tags:
- - hosted
+
+- include: openshift_metrics.yml
+ when: openshift_metrics_install_metrics | default(false) | bool
+
+- include: openshift_logging.yml
+ when: openshift_logging_install_logging | default(false) | bool
- include: service_catalog.yml
- when:
- - openshift_enable_service_catalog | default(false) | bool
- tags:
- - servicecatalog
+ when: openshift_enable_service_catalog | default(false) | bool
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- tags:
- - always
+- name: Print deprecated variable warning message if necessary
+ hosts: oo_first_master
gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ tasks:
+ - debug: msg="{{__deprecation_message}}"
+ when:
+ - __deprecation_message | default ('') | length > 0
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
new file mode 100644
index 000000000..8a60a30b8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -0,0 +1,9 @@
+---
+- name: Create Hosted Resources - persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 5425f448f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -37,7 +34,7 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: dnsConfig.bindAddress
yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master
+ notify: restart master api
- meta: flush_handlers
- name: Configure nodes for dnsmasq
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index baca72c58..e55b2f964 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,20 +5,20 @@
become: no
gather_facts: no
tasks:
- - name: Evaluate groups - g_etcd_hosts required
+ - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
fail:
- msg: This playbook requires g_etcd_hosts to be set
- when: g_etcd_hosts is not defined
+ msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
+ when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
- name: Evaluate groups - g_master_hosts or g_new_master_hosts required
fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined or g_new_master_hosts is not defined
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- name: Evaluate groups - g_node_hosts or g_new_node_hosts required
fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined or g_new_node_hosts is not defined
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- name: Evaluate groups - g_lb_hosts required
fail:
@@ -33,13 +33,26 @@
- name: Evaluate groups - g_nfs_hosts is single host
fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: g_nfs_hosts | default([]) | length > 1
- name: Evaluate groups - g_glusterfs_hosts required
fail:
msg: This playbook requires g_glusterfs_hosts to be set
when: g_glusterfs_hosts is not defined
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
+ when:
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
+ - not openshift_node_bootstrap | default(False)
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -67,6 +80,15 @@
when: g_master_hosts|length > 0
changed_when: no
+ - name: Evaluate oo_new_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_new_etcd_hosts | default([]) }}"
+ changed_when: no
+
- name: Evaluate oo_masters_to_config
add_host:
name: "{{ item }}"
@@ -108,7 +130,7 @@
add_host:
name: "{{ item }}"
groups: oo_etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
changed_when: False
- name: Evaluate oo_nodes_to_config
@@ -164,5 +186,5 @@
groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 9cebecd68..be2f8b5f4 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -6,12 +6,151 @@
- name: Initialize host facts
hosts: oo_all_hosts
- roles:
- - openshift_facts
tasks:
- - openshift_facts:
+ - name: load openshift_facts module
+ include_role:
+ name: openshift_facts
+
+ # TODO: Should this role be refactored into health_checks??
+ - name: Run openshift_sanitize_inventory to set variables
+ include_role:
+ name: openshift_sanitize_inventory
+
+ - name: Detecting Operating System from ostree_booted
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+ # Locally setup containerized facts for now
+ - name: initialize_facts set fact l_is_atomic
+ set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+
+ - name: initialize_facts set fact for containerized and l_is_*_system_container
+ set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+ - name: initialize_facts set facts for l_any_system_container
+ set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+
+ - name: initialize_facts set fact for l_etcd_runtime
+ set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist is fedora and python is v3
+ fail:
+ msg: |
+ openshift-ansible requires Python 3 for {{ ansible_distribution }};
+ For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist not Fedora and python must be v2
+ fail:
+ msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ # Fail as early as possible if Atomic and old version of Docker
+ - when:
+ - l_is_atomic | bool
+ block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - name: assert atomic host docker version is 1.12 or later
+ assert:
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+ - when:
+ - not l_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"
+ - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
+ - yum-utils
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - l_any_system_container | bool
+
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
role: common
local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cli_image: "{{ osm_image | default(None) }}"
hostname: "{{ openshift_hostname | default(None) }}"
- - set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
+ system_images_registry: "{{ system_images_registry }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: initialize_facts set_fact repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
+ set_fact:
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
index ac3c702a0..dab17aaa9 100644
--- a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
@@ -5,15 +5,6 @@
- always
tasks:
- set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
when: openshift_docker_options is not defined
- set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
new file mode 100644
index 000000000..a7114fc80
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup yum repositories for all hosts
+ hosts: oo_all_hosts
+ gather_facts: no
+ tasks:
+ - name: initialize openshift repos
+ include_role:
+ name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index f4e52869e..1b186f181 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,24 +1,13 @@
---
-# NOTE: requires openshift_facts be run
-- name: Verify compatible yum/subscription-manager combination
- hosts: oo_all_hosts
- gather_facts: no
+- name: Set version_install_base_package true on masters and nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
tasks:
- # See:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
- # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
- # https://github.com/openshift/openshift-ansible/issues/1138
- # Consider the repoquery module for this work
- - name: Check for bad combinations of yum and subscription-manager
- command: >
- {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
- register: yum_ver_test
- changed_when: false
- when: not openshift.common.is_atomic | bool
- - fail:
- msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
+ - name: Set version_install_base_package true
+ set_fact:
+ version_install_base_package: True
+ when: version_install_base_package is not defined
+# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
@@ -31,5 +20,10 @@
hosts: oo_all_hosts:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ pre_tasks:
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when: openshift_pkg_version is not defined
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
roles:
- openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
new file mode 100644
index 000000000..4b4f19690
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - openshift_default_storage_class
+ hosts: oo_first_master
+ roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 8d94b6509..b9eb380d3 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,72 +1,36 @@
---
-- name: Create persistent volumes
- hosts: oo_first_master
- tags:
- - hosted
- vars:
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+- name: Hosted Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_hosted: "In Progress"
+ aggregate: false
-- name: Create Hosted Resources
- hosts: oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
- when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- - set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
- logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
- logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
- roles:
- - role: openshift_hosted
- - role: openshift_metrics
- when: openshift_hosted_metrics_deploy | default(false) | bool
- - role: openshift_logging
- when: openshift_hosted_logging_deploy | default(false) | bool
- openshift_hosted_logging_hostname: "{{ logging_hostname }}"
- openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
- openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
- openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}"
+- include: create_persistent_volumes.yml
- - role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
- - role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+- include: openshift_default_storage_class.yml
-- name: Update master-config for publicLoggingURL
- hosts: oo_masters_to_config:!oo_first_master
- tags:
- - hosted
- pre_tasks:
- - set_fact:
- openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- - set_fact:
- openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- tasks:
+- include: openshift_hosted_create_projects.yml
+
+- include: openshift_hosted_router.yml
- - block:
- - include_role:
- name: openshift_logging
- tasks_from: update_master_config
- when: openshift_hosted_logging_deploy | default(false) | bool
+- include: openshift_hosted_registry.yml
- - block:
- - include_role:
- name: openshift_metrics
- tasks_from: update_master_config
- when: openshift_hosted_metrics_deploy | default(false) | bool
+- include: cockpit-ui.yml
+
+- include: openshift_prometheus.yml
+
+- name: Hosted Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Hosted install 'Complete'
+ set_stats:
+ data:
+ installer_phase_hosted: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
new file mode 100644
index 000000000..d5ca5185c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml
@@ -0,0 +1,7 @@
+---
+- name: Create Hosted Resources - openshift projects
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_hosted
+ tasks_from: create_projects.yml
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
new file mode 100644
index 000000000..2a91a827c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - registry
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: registry.yml
+ when:
+ - openshift_hosted_manage_registry | default(True) | bool
+ - openshift_hosted_registry_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
new file mode 100644
index 000000000..bcb5a34a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted_router.yml
@@ -0,0 +1,13 @@
+---
+- name: Create Hosted Resources - router
+ hosts: oo_first_master
+ tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - include_role:
+ name: openshift_hosted
+ tasks_from: router.yml
+ when:
+ - openshift_hosted_manage_router | default(True) | bool
+ - openshift_hosted_router_registryurl is defined
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 57580406c..69f50fbcd 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,5 +1,14 @@
---
-- include: evaluate_groups.yml
+- name: Logging Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_logging: "In Progress"
+ aggregate: false
- name: OpenShift Aggregated Logging
hosts: oo_first_master
@@ -13,4 +22,14 @@
- include_role:
name: openshift_logging
tasks_from: update_master_config
- when: openshift_logging_install_logging | default(false) | bool
+
+- name: Logging Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Logging install 'Complete'
+ set_stats:
+ data:
+ installer_phase_logging: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index bcff4a1a1..e369dcd86 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,36 @@
---
-- include: evaluate_groups.yml
+- name: Metrics Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_metrics: "In Progress"
+ aggregate: false
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- - openshift_metrics
+ - role: openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
+
+- name: Metrics Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Metrics install 'Complete'
+ set_stats:
+ data:
+ installer_phase_metrics: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..ed89d3bde
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,6 @@
+---
+- name: Create Hosted Resources - openshift_prometheus
+ hosts: oo_first_master
+ roles:
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
new file mode 100644
index 000000000..4a9fbf7eb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml
@@ -0,0 +1,12 @@
+---
+- name: Check cert expirys
+ hosts: "{{ g_check_expiry_hosts }}"
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
index 6964e8567..3da22bce6 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -13,34 +13,28 @@
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - include_role:
+ name: etcd
+ tasks_from: backup_ca_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_ca_certificates
- name: Generate new etcd CA
hosts: oo_first_etcd
roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ - role: openshift_etcd_facts
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: ca
+ vars:
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ when:
+ - etcd_ca_setup | default(True) | bool
- name: Create temp directory for syncing certs
hosts: localhost
@@ -55,52 +49,14 @@
- name: Distribute etcd CA to etcd hosts
hosts: oo_etcd_to_config
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
+ - include_role:
+ name: etcd
+ tasks_from: distribute_ca
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- include: ../../openshift-etcd/restart.yml
# Do not restart etcd when etcd certificates were previously expired.
@@ -111,17 +67,13 @@
- name: Retrieve etcd CA certificate
hosts: oo_first_etcd
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
+ - include_role:
+ name: etcd
+ tasks_from: retrieve_ca_certificates
+ vars:
+ etcd_sync_cert_dir: hostvars['localhost'].g_etcd_mktemp.stdout
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Distribute etcd CA to masters
hosts: oo_masters_to_config
@@ -146,13 +98,19 @@
changed_when: false
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
index 6b5c805e6..48a5a13ac 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
@@ -2,51 +2,34 @@
- name: Backup and remove generated etcd certificates
hosts: oo_first_etcd
any_errors_fatal: true
- roles:
- - role: etcd_common
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Determine if generated etcd certificates exist
- stat:
- path: "{{ etcd_conf_dir }}/generated_certs"
- register: etcd_generated_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/generated_certs
- args:
- warn: no
- when: etcd_generated_certs_dir_stat.stat.exists | bool
- - name: Remove generated etcd certificates
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - "{{ etcd_conf_dir }}/generated_certs"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_generated_certificates
+ - include_role:
+ name: etcd
+ tasks_from: remove_generated_certificates
- name: Backup and removed deployed etcd certificates
hosts: oo_etcd_to_config
any_errors_fatal: true
- roles:
- - role: etcd_common
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_server_certificates
+ vars:
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- post_tasks:
- - name: Backup etcd certificates
- command: >
- tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_conf_dir }}/ca.crt
- {{ etcd_conf_dir }}/server.crt
- {{ etcd_conf_dir }}/server.key
- {{ etcd_conf_dir }}/peer.crt
- {{ etcd_conf_dir }}/peer.key
- args:
- warn: no
- name: Redeploy etcd certificates
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - role: openshift_etcd_server_certificates
+ - role: openshift_etcd_facts
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
etcd_certificates_redeploy: true
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
@@ -58,13 +41,14 @@
hosts: oo_masters_to_config
any_errors_fatal: true
roles:
- - role: openshift_etcd_client_certificates
- etcd_certificates_redeploy: true
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_certificates_redeploy: true
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 089ae6bbc..b54acae6c 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -7,7 +7,7 @@
when: not openshift.common.version_gte_3_2_or_1_2 | bool
- name: Check cert expirys
- hosts: oo_nodes_to_config:oo_masters_to_config
+ hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config
vars:
openshift_certificate_expiry_show_all: yes
roles:
@@ -209,16 +209,22 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
- # Do not restart masters when master certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # Do not restart masters when master or etcd certificates were previously expired.
+ when:
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -268,13 +274,28 @@
changed_when: false
- include: ../../openshift-node/restart.yml
- # Do not restart nodes when node certificates were previously expired.
- when: ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
- and
- ('expired' not in hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('check_results.check_results.ocp_certs')
- | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # Do not restart nodes when node, master or etcd certificates were previously expired.
+ when:
+ # nodes
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
+ # masters
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ - ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
+ # etcd
+ - ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 8c8062585..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -66,6 +66,7 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 9f14f2d69..748bbbf91 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -116,8 +116,9 @@
tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
--type=kubernetes.io/tls
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--confirm
- -o json | {{ openshift.common.client_binary }} replace -f -
+ -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
- name: Remove temporary router certificate and key files
file:
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
+
+ - fail:
+ msg: openshift_public_hostname must be 63 characters or less
+ when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 6c12875fe..95a8f601c 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,21 +1,31 @@
---
-- include: evaluate_groups.yml
-
-- name: Update Master configs
- hosts: oo_masters
- serial: 1
+- name: Service Catalog Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
tasks:
- - block:
- - include_role:
- name: openshift_service_catalog
- tasks_from: wire_aggregator
- vars:
- first_master: "{{ groups.oo_first_master[0] }}"
+ - name: Set Service Catalog install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "In Progress"
+ aggregate: false
- name: Service Catalog
hosts: oo_first_master
roles:
- openshift_service_catalog
- ansible_service_broker
+ - template_service_broker
vars:
first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Service Catalog install 'Complete'
+ set_stats:
+ data:
+ installer_phase_servicecatalog: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6ed31a644..090ad6445 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,4 +1,17 @@
---
+- name: Initialization Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ roles:
+ - installer_checkpoint
+ tasks:
+ - name: Set install initialization 'In Progress'
+ set_stats:
+ data:
+ installer_phase_initialize: "In Progress"
+ aggregate: false
+
- include: evaluate_groups.yml
tags:
- always
@@ -7,10 +20,29 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
+- include: initialize_openshift_repos.yml
+ tags:
+ - always
+
- include: initialize_openshift_version.yml
tags:
- always
+
+- name: Initialization Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set install initialization 'Complete'
+ set_stats:
+ data:
+ installer_phase_initialize: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
deleted file mode 100644
index 9f7961614..000000000
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 07db071ce..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
@@ -52,11 +51,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 1b418920f..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -11,7 +15,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -24,4 +27,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 17f8fc6e9..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,7 +4,6 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -32,7 +31,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 616ba04f8..d086cad00 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -2,13 +2,16 @@
- name: Backup etcd
hosts: oo_etcd_hosts_to_backup
roles:
- - role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_backup_tag: etcd_backup_tag
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_backup_tag: "{{ etcd_backup_tag }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -20,7 +23,7 @@
| oo_select_keys(groups.oo_etcd_hosts_to_backup)
| oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index 64abc54e7..5b8ba3bb2 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -15,10 +15,15 @@
hosts: oo_etcd_hosts_to_upgrade
tasks:
- include_role:
- name: etcd_common
- vars:
- r_etcd_common_action: drop_etcdctl
+ name: etcd
+ tasks_from: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
when: openshift_etcd_upgrade | default(true) | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ etcd_backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 39e82498d..d71c96cd7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -98,13 +98,11 @@
serial: 1
tasks:
- include_role:
- name: etcd_upgrade
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- ansible_distribution == 'Fedora'
- not openshift.common.is_containerized | bool
-
-- name: Backup etcd
- include: backup.yml
- vars:
- etcd_backup_tag: "post-3.0-"
- when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
index 831ca8f57..e5e895775 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: image
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_image
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
- openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
index 2e79451e0..a2a26bad4 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -5,13 +5,14 @@
- name: Upgrade to {{ etcd_upgrade_version }}
hosts: oo_etcd_hosts_to_upgrade
serial: 1
- roles:
- - role: etcd_upgrade
- r_etcd_upgrade_action: upgrade
- r_etcd_upgrade_mechanism: rpm
- r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
- r_etcd_common_etcd_runtime: "host"
- etcd_peer: "{{ openshift.common.hostname }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: upgrade_rpm
+ vars:
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
when:
- etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
- ansible_distribution == 'RedHat'
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 0f421928b..c98065cf4 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_oo_option_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker
deleted file mode 120000
index 6aeca2842..000000000
--- a/playbooks/common/openshift-cluster/upgrades/master_docker
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles/openshift_master/templates/master_docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index d9ddf3860..07e521a89 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -90,10 +90,12 @@
# openshift_examples from failing when trying to replace templates that do
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- - openshift_examples
+ - role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
- openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
+ when: openshift_install_examples | default(true,true) | bool
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
- role: openshift_hosted_templates
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
index 9d8b73cff..6d8503879 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -1,8 +1,10 @@
---
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
-- include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+- include: ../../docker/upgrade_check.yml
+ when:
+ - docker_upgrade is not defined or (docker_upgrade | bool)
+ - not (openshift.common.is_atomic | bool)
# Additional checks for Atomic hosts:
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 06eb5f936..45022cd61 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,23 +9,16 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-api"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
new file mode 100644
index 000000000..f75ae3b15
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
@@ -0,0 +1,22 @@
+---
+- name: Verify all masters has etcd3 storage backend set
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - lib_utils
+ tasks:
+ - name: Read master storage backend setting
+ yedit:
+ state: list
+ src: /etc/origin/master/master-config.yaml
+ key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ register: _storage_backend
+
+ - fail:
+ msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+ when:
+ # assuming the master-config.yml is properly configured, i.e. the value is a list
+ - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+ - debug:
+ msg: "Storage backend is set to etcd3"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
new file mode 100644
index 000000000..ad6325ca0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -0,0 +1,14 @@
+---
+- name: Verify Host Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - docker_image_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
index 9a959a959..3c0017891 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -5,9 +5,9 @@
tasks:
- fail:
msg: >
- This upgrade is only supported for origin, openshift-enterprise, and online
+ This upgrade is only supported for origin and openshift-enterprise
deployment types
- when: deployment_type not in ['origin','openshift-enterprise', 'online']
+ when: deployment_type not in ['origin','openshift-enterprise']
# Error out in situations where the user has older versions specified in their
# inventory in any of the openshift_release, openshift_image_tag, and
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9b4a8e413..142ce5f3d 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -27,13 +27,17 @@
- name: Set fact avail_openshift_version
set_fact:
- avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ - name: Set openshift_pkg_version when not specified
+ set_fact:
+ openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}"
+ when: openshift_pkg_version | default('') == ''
- name: Verify OpenShift RPMs are available for upgrade
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 164baca81..8cc46ab68 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -8,7 +8,6 @@
# TODO: If the sdn package isn't already installed this will install it, we
# should fix that
-
- name: Upgrade master packages
package: name={{ master_pkgs | join(',') }} state=present
vars:
@@ -16,7 +15,7 @@
- "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
- "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- PyYAML
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 227fbf60a..da47491c1 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -12,6 +12,12 @@
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -85,7 +91,7 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Update systemd units
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -140,16 +146,21 @@
- include: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
- - set_fact:
- master_update_complete: True
-
-- name: Post master upgrade - Upgrade clusterpolicies storage
- hosts: oo_first_master
- tasks:
- - name: Upgrade clusterpolicies storage
+ - name: Post master upgrade - Upgrade clusterpolicies storage
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=clusterpolicies --confirm
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+ run_once: true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+ - set_fact:
+ master_update_complete: True
##############################################################################
# Gate on master update complete
@@ -164,7 +175,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
- set_fact:
- master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
@@ -178,8 +189,6 @@
roles:
- { role: openshift_cli }
vars:
- origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
- ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
@@ -190,6 +199,7 @@
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
+ when: not openshift.common.version_gte_3_7 | bool
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -204,7 +214,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ when: not openshift.common.version_gte_3_7 | bool
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -219,22 +229,28 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: openshift.common.version_gte_3_4_or_1_4 | bool
+ when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
- reconcile_scc_result.rc == 0
run_once: true
- - name: Upgrade job storage
+ - name: Migrate storage post policy reconciliation
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
migrate storage --include=* --confirm
run_once: true
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
- set_fact:
reconcile_complete: True
@@ -252,7 +268,7 @@
| oo_select_keys(groups.oo_masters_to_config)
| oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
- set_fact:
- reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
@@ -264,7 +280,7 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
@@ -294,8 +310,12 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 1d1e440d4..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -26,8 +26,12 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 83d2cec81..8558bf3e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -74,18 +74,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
- openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != default_predicates_no_region
+ - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
when: openshift_master_scheduler_predicates | default(none) is none
@@ -131,18 +134,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
- openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != default_priorities_no_zone
+ - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
when: openshift_master_scheduler_priorities | default(none) is none
@@ -162,5 +168,6 @@
content: "{{ scheduler_config | to_nice_json }}"
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
- when: "{{ openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined }}"
+ when: >
+ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index f1245aa2e..a241ef039 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -39,8 +39,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b693ab55c..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 4fd029107..cee4e9087 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index 965e39482..ae217ba2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -39,8 +39,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7830f462c..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index 4364ff8e3..8531e6045 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 4e7c14e94..30e719d8f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -39,13 +39,18 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -70,10 +75,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -88,7 +89,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 45b664d06..5fee56615 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -92,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 036d3fcf5..e29d0f8e6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -89,7 +90,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5b9ac9e8f..920dc2ffc 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -39,13 +39,22 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
- pre_upgrade
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
- include: ../disable_master_excluders.yml
tags:
- pre_upgrade
@@ -70,10 +79,6 @@
# docker is configured and running.
skip_docker_role: True
-- include: ../pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
- include: ../../../openshift-master/validate_restart.yml
tags:
- pre_upgrade
@@ -88,7 +93,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index a470c7595..7c72564b6 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -47,8 +47,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -74,6 +75,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- include: ../pre/verify_control_plane_running.yml
tags:
- pre_upgrade
@@ -92,7 +97,7 @@
- name: Verify docker upgrade targets
hosts: oo_masters_to_config:oo_etcd_to_config
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 25eceaf90..6c1c7c921 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -40,8 +40,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- include: ../pre/verify_inventory_vars.yml
tags:
@@ -67,6 +68,10 @@
# docker is configured and running.
skip_docker_role: True
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
- name: Verify masters are already upgraded
hosts: oo_masters_to_config
tags:
@@ -89,7 +94,7 @@
- name: Verify docker upgrade targets
hosts: oo_nodes_to_upgrade
tasks:
- - include: ../pre/verify_docker_upgrade_targets.yml
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
new file mode 100644
index 000000000..87621dc85
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -0,0 +1,122 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
new file mode 100644
index 000000000..6cd3bd3e5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -0,0 +1,130 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_etcd3_backend.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
new file mode 100644
index 000000000..e5e04e643
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -0,0 +1,115 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
new file mode 100644
index 000000000..f76fc68d1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -0,0 +1,23 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.7 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: not openshift.common.version_gte_3_7 | bool
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 33fc5630f..be2e6a15a 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,17 +1,22 @@
---
-- name: Gather and set facts for node hosts
+- name: Validate node hostnames
hosts: oo_nodes_to_config
- roles:
- - openshift_facts
tasks:
- - shell:
+ - name: Query DNS for IP address of {{ openshift.common.hostname }}
+ shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
changed_when: false
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ prompt:
+ The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+ doesn't resolve to an IP address owned by this host. Please set
+ openshift_hostname variable to a hostname that when resolved on the host
+ in question resolves to an IP address matching an interface on this
+ host. This host will fail liveness checks for pods utilizing hostPorts,
+ press ENTER to continue or CTRL-C to abort.
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
when:
- lookupip.stdout != '127.0.0.1'
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cb6197d1..2cae231b4 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,11 +1,34 @@
---
+- name: etcd Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_etcd: "In Progress"
+ aggregate: false
+
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
+
+- name: etcd Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set etcd install 'Complete'
+ set_stats:
+ data:
+ installer_phase_etcd: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index c655449fa..2456ad3a8 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,35 +1,44 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
- tags:
- - always
-
- name: Run pre-checks
hosts: oo_etcd_to_migrate
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: check
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.pre_check
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
-- include: ../openshift-cluster/initialize_facts.yml
- tags:
- - always
+# TODO: This will be different for release-3.6 branch
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
- name: Backup v2 data
hosts: oo_etcd_to_migrate
gather_facts: no
- tags:
- - always
roles:
- role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -41,42 +50,62 @@
| oo_select_keys(groups.oo_etcd_to_migrate)
| oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}"
- fail:
msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when:
- etcd_backup_failed | length > 0
-- name: Prepare masters for etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master' }}"
+- name: Stop etcd
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ pre_tasks:
- set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
- when:
- - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
- - debug:
- msg: "master service name: {{ master_services }}"
- - name: Stop masters
+ l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+ - name: Disable etcd members
service:
- name: "{{ item }}"
+ name: "{{ l_etcd_service }}"
state: stopped
- with_items: "{{ master_services }}"
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_migrate
+- name: Migrate data on first etcd
+ hosts: oo_etcd_to_migrate[0]
gather_facts: no
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: migrate
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcd_peer: "{{ ansible_default_ipv4.address }}"
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+- name: Clean data stores on remaining etcd hosts
+ hosts: oo_etcd_to_migrate[1:]
+ gather_facts: no
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ - name: Add etcd hosts
+ delegate_to: localhost
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}"
+ changed_when: no
+ - name: Set success
+ set_fact:
+ r_etcd_migrate_success: true
+
+- include: ./scaleup.yml
- name: Gate on etcd migration
hosts: oo_masters_to_config
@@ -87,23 +116,31 @@
| oo_select_keys(groups.oo_etcd_to_migrate)
| oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- set_fact:
- etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}"
+
+- name: Add TTLs on the first master
+ hosts: oo_first_master[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.add_ttls
+ vars:
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ when: etcd_migration_failed | length == 0
- name: Configure masters if etcd data migration is succesfull
hosts: oo_masters_to_config
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: configure
- when: etcd_migration_failed | length == 0
tasks:
+ - include_role:
+ name: etcd
+ tasks_from: migrate.configure_master
+ when: etcd_migration_failed | length == 0
- debug:
msg: "Skipping master re-configuration since migration failed."
when:
- etcd_migration_failed | length > 0
-
-- name: Start masters after etcd data migration
- hosts: oo_masters_to_config
- tasks:
- name: Start master services
service:
name: "{{ item }}"
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index af1ef245a..5eaea5ae8 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -7,3 +7,21 @@
service:
name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: Restart etcd
+ hosts: oo_etcd_to_config
+ tasks:
+ - name: stop etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+ - name: start etcd
+ service:
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
new file mode 100644
index 000000000..4f83264d0
--- /dev/null
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -0,0 +1,76 @@
+---
+- name: Gather facts
+ hosts: oo_etcd_to_config:oo_new_etcd_to_config
+ roles:
+ - openshift_etcd_facts
+ post_tasks:
+ - set_fact:
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
+
+- name: Configure etcd
+ hosts: oo_new_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ pre_tasks:
+ - name: Add new etcd members to cluster
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
+ delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
+ register: etcd_add_check
+ retries: 3
+ delay: 10
+ until: etcd_add_check.rc == 0
+ roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
+ - role: openshift_etcd
+ when: etcd_add_check.rc == 0
+ etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_initial_cluster_state: "existing"
+ initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_ca_setup: False
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - role: nickhammond.logrotate
+ when: etcd_add_check.rc == 0
+ post_tasks:
+ - name: Verify cluster is stable
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ cluster-health
+ register: scaleup_health
+ retries: 3
+ delay: 30
+ until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
+ roles:
+ - role: openshift_master_facts
+ post_tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 1efdfb336..516618de2 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,23 +1,48 @@
---
-- name: Open firewall ports for GlusterFS
- hosts: oo_glusterfs_to_config
- vars:
- os_firewall_allow:
- - service: glusterfs_sshd
- port: "2222/tcp"
- - service: glusterfs_daemon
- port: "24007/tcp"
- - service: glusterfs_management
- port: "24008/tcp"
- - service: glusterfs_bricks
- port: "49152-49251/tcp"
- roles:
- - role: os_firewall
+- name: GlusterFS Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "In Progress"
+ aggregate: false
+
+- name: Open firewall ports for GlusterFS nodes
+ hosts: glusterfs
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+
+- name: Open firewall ports for GlusterFS registry nodes
+ hosts: glusterfs_registry
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
when:
- - openshift_storage_glusterfs_is_native | default(True)
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
- roles:
- - role: openshift_storage_glusterfs
+ tasks:
+ - name: setup glusterfs
+ include_role:
+ name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
+
+- name: GlusterFS Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set GlusterFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_glusterfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index c414913bf..ecbb092bc 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,4 +1,15 @@
---
+- name: Load Balancer Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "In Progress"
+ aggregate: false
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -12,5 +23,18 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
+ - role: os_firewall
- role: openshift_loadbalancer
+
+- name: Load Balancer Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set load balancer install 'Complete'
+ set_stats:
+ data:
+ installer_phase_loadbalancer: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
new file mode 100644
index 000000000..ee76e2ed7
--- /dev/null
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -0,0 +1,48 @@
+---
+- name: Master Additional Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master_additional: "In Progress"
+ aggregate: false
+
+- name: Additional master configuration
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ etcd_urls: "{{ openshift.master.etcd_urls }}"
+ openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
+ omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
+ roles:
+ - role: openshift_master_cluster
+ when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
+ - role: openshift_examples
+ when: openshift_install_examples | default(true, true) | bool
+ registry_url: "{{ openshift.master.registry_url }}"
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ - role: openshift_manageiq
+ when: openshift_use_manageiq | default(false) | bool
+ - role: cockpit
+ when:
+ - openshift.common.is_atomic
+ - deployment_type == 'openshift-enterprise'
+ - osm_use_cockpit is undefined or osm_use_cockpit | bool
+ - openshift.common.deployment_subtype != 'registry'
+ - role: flannel_register
+ when: openshift_use_flannel | default(false) | bool
+
+- name: Master Additional Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Master Additional install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master_additional: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7d3a371e3..38257b803 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,10 +1,42 @@
---
+- name: Master Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_master: "In Progress"
+ aggregate: false
+
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
pre_tasks:
+ # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
+ #
+ # When scaling up a cluster upgraded from OCP <= 3.5, ensure that
+ # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing
+ # masters, or absent if such is the case.
+ - name: Detect if this host is a new master in a scale up
+ set_fact:
+ g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}"
+
+ - name: Scaleup Detection
+ debug:
+ var: g_openshift_master_is_scaleup
+
- name: Check for RPM generated config marker file .config_managed
stat:
path: /etc/origin/.config_managed
@@ -14,7 +46,9 @@
file:
path: "/etc/origin/{{ item }}"
state: absent
- when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ when:
+ - rpmgenerated_config.stat.exists == true
+ - deployment_type == 'openshift-enterprise'
with_items:
- master
- node
@@ -69,7 +103,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Inspect state of first master session secrets and config
+- name: Inspect state of first master config settings
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +132,42 @@
set_fact:
l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
+ - name: Check if atomic-openshift-master sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master
+ register: l_aom_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master
+ register: l_default_registry_defined
+ when: l_aom_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-api sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-api
+ register: l_aom_api_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api
+ register: l_default_registry_defined_api
+ when: l_aom_api_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-controllers sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_aom_controllers_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_default_registry_defined_controllers
+ when: l_aom_controllers_exists.stat.exists | bool
+
+ - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value
+ set_fact:
+ l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}"
+ l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}"
+ l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}"
+
- name: Generate master session secrets
hosts: oo_first_master
vars:
@@ -122,32 +192,77 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- roles:
- - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_master_certificates
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
+ - role: openshift_master
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
+ openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
+ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
+ openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
+ openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: nuage_ca
+ when: openshift_use_nuage | default(false) | bool
+ - role: nuage_common
+ when: openshift_use_nuage | default(false) | bool
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
-
+ when: openshift_use_calico | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Configure API Aggregation on masters
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - include: tasks/wire_aggregator.yml
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Master Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Master install 'Complete'
+ set_stats:
+ data:
+ installer_phase_master: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..d0a9f11dc
--- /dev/null
+++ b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+// empty file so that the master-config can still point to a file that exists
+// this file will be replaced by the template service broker role if enabled
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 6fec346c3..4d73b8124 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -7,7 +7,7 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: roles/openshift_master/handlers/main.yml
+ - include: ../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 67ba0aa2e..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -37,3 +37,4 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 508b5a3ac..4f8b758fd 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,9 +1,4 @@
---
-- name: Restart master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: restarted
- when: not openshift_master_ha | bool
- name: Restart master API
service:
name: "{{ openshift.common.service_type }}-master-api"
@@ -15,6 +10,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: openshift_master_ha | bool
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index bc61ee9bb..8c366e038 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,38 +43,10 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+- include: ../openshift-master/set_network_facts.yml
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml
new file mode 100644
index 000000000..2ad805858
--- /dev/null
+++ b/playbooks/common/openshift-master/set_network_facts.yml
@@ -0,0 +1,28 @@
+---
+- name: Read first master\'s config
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - stat:
+ path: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_stat
+ - slurp:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ register: g_master_config_slurp
+
+- name: Set network facts for masters
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - block:
+ - set_fact:
+ osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}"
+ when: osm_cluster_network_cidr is not defined
+ - set_fact:
+ osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}"
+ when: osm_host_subnet_length is not defined
+ - set_fact:
+ openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}"
+ when: openshift_portal_net is not defined
+ when:
+ - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool
diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..560eea785
--- /dev/null
+++ b/playbooks/common/openshift-master/tasks/wire_aggregator.yml
@@ -0,0 +1,215 @@
+---
+- name: Make temp cert dir
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: certtemp
+ changed_when: False
+
+- name: Check for First Master Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_crt
+ changed_when: false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Check for First Master Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: first_proxy_ca_key
+ changed_when: false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+# TODO: this currently has a bug where hostnames are required
+- name: Creating First Master Aggregator signer certs
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when:
+ - not first_proxy_ca_crt.stat.exists
+ - not first_proxy_ca_key.stat.exists
+
+- name: Check for Aggregator Signer cert
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_crt
+ changed_when: false
+
+- name: Check for Aggregator Signer key
+ stat:
+ path: /etc/origin/master/front-proxy-ca.crt
+ register: proxy_ca_key
+ changed_when: false
+
+- name: Copy Aggregator Signer certs from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+- name: Copy Aggregator Signer certs to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - front-proxy-ca.crt
+ - front-proxy-ca.key
+ when:
+ - not proxy_ca_key.stat.exists
+ - not proxy_ca_crt.stat.exists
+
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Check for first master api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: first_front_proxy_kubeconfig
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the aggregator
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create first master api-client config for Aggregator
+ command: >
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir={{ certtemp.stdout }}
+ --signer-serial=/etc/origin/master/ca.serial.txt
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+ - name: Copy first master api-client config for Aggregator
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/"
+ remote_src: true
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ run_once: true
+ when:
+ - not first_front_proxy_kubeconfig.stat.exists
+
+- name: Check for api-client config
+ stat:
+ path: /etc/origin/master/aggregator-front-proxy.kubeconfig
+ register: front_proxy_kubeconfig
+
+- name: Copy api-client config from first master
+ fetch:
+ src: "/etc/origin/master/{{ item }}"
+ dest: "{{ certtemp.stdout }}/{{ item }}"
+ flat: yes
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: Copy api-client config to host
+ copy:
+ src: "{{ certtemp.stdout }}/{{ item }}"
+ dest: "/etc/origin/master/{{ item }}"
+ with_items:
+ - aggregator-front-proxy.crt
+ - aggregator-front-proxy.key
+ - aggregator-front-proxy.kubeconfig
+ when:
+ - not front_proxy_kubeconfig.stat.exists
+
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ - key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ - key: kubernetesMasterConfig.apiServerArguments.runtime-config
+ value: [apis/settings.k8s.io/v1alpha1=true]
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
+ value: DefaultAdmissionConfig
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
+ value: v1
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
+ value: false
+ register: yedit_output
+
+#restart master serially here
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
+
+- name: Delete temp directory
+ file:
+ name: "{{ certtemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..66303d6f7 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,6 +1,28 @@
---
+- name: NFS Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_nfs: "In Progress"
+ aggregate: false
+
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
+
+- name: NFS Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set NFS install 'Complete'
+ set_stats:
+ data:
+ installer_phase_nfs: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index acebabc91..15693e633 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,24 +1,22 @@
---
-- name: Gather and set facts for node hosts
+- name: Node Install Checkpoint Start
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Node install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_node: "In Progress"
+ aggregate: false
+
+- name: Disable excluders
hosts: oo_nodes_to_config
- vars:
- t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- pre_tasks:
- - set_fact:
- openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
- when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
+ gather_facts: no
roles:
- - openshift_facts
- tasks:
- # Since the master is generating the node certificates before they are
- # configured, we need to make sure to set the node properties beforehand if
- # we do not want the defaults
- - openshift_facts:
- role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
@@ -32,7 +30,11 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when:
+ - hostvars[item].openshift is defined
+ - hostvars[item].openshift.common is defined
+ - hostvars[item].openshift.common.is_containerized | bool
+ - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
@@ -47,9 +49,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -64,9 +66,8 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -75,24 +76,48 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
roles:
- - role: flannel
- etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ - role: openshift_facts
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_cert_prefix: flannel.etcd-
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
+ - role: flannel
+ etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ when: openshift_use_flannel | default(false) | bool
- role: calico
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
- role: nuage_node
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: contiv
contiv_role: netplugin
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(false) | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(False)
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
+- name: Node Install Checkpoint End
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Set Node install 'Complete'
+ set_stats:
+ data:
+ installer_phase_node: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index 0014a5dbd..b3a7399dc 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,4 +1,6 @@
---
+- include: ../openshift-cluster/evaluate_groups.yml
+
- name: Install and configure NetworkManager
hosts: oo_all_hosts
become: yes
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
index 01cf948e0..c3beb59b7 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/common/openshift-node/restart.yml
@@ -11,6 +11,10 @@
service:
name: docker
state: restarted
+ register: l_docker_restart_docker_in_node_result
+ until: not l_docker_restart_docker_in_node_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -23,7 +27,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -36,6 +39,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
- name: restart node
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
deleted file mode 100644
index 40da8990d..000000000
--- a/playbooks/common/openshift-node/scaleup.yml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- name: Disable excluders
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: disable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
-
-- include: ../openshift-node/config.yml
-
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_nodes_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"