diff options
Diffstat (limited to 'playbooks/common')
151 files changed, 323 insertions, 4558 deletions
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml deleted file mode 100644 index d0deaeb65..000000000 --- a/playbooks/common/openshift-checks/adhoc.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: OpenShift Health Checks - hosts: oo_all_hosts - - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: adhoc - post_tasks: - - name: Run health checks (adhoc) - action: openshift_health_check - args: - checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml deleted file mode 100644 index d0921b9d3..000000000 --- a/playbooks/common/openshift-checks/health.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: OpenShift Health Checks - hosts: oo_all_hosts - - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: health - post_tasks: - - name: Run health checks (@health) - action: openshift_health_check - args: - checks: ['@health'] diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml deleted file mode 100644 index 6701a2e15..000000000 --- a/playbooks/common/openshift-checks/install.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Health Check Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Health Check 'In Progress' - set_stats: - data: - installer_phase_health: "In Progress" - aggregate: false - -- name: OpenShift Health Checks - hosts: oo_all_hosts - any_errors_fatal: true - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: install - post_tasks: - - name: Run health checks (install) - EL - when: ansible_distribution != "Fedora" - action: openshift_health_check - args: - checks: - - disk_availability - - memory_availability - - package_availability - - package_version - - docker_image_availability - - docker_storage - - - name: Run health checks (install) - Fedora - when: ansible_distribution == "Fedora" - action: openshift_health_check - args: - checks: - - docker_image_availability - -- name: Health Check Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Health Check 'Complete' - set_stats: - data: - installer_phase_health: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml deleted file mode 100644 index 32449d4e4..000000000 --- a/playbooks/common/openshift-checks/pre-install.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: OpenShift Health Checks - hosts: oo_all_hosts - - roles: - - openshift_health_checker - vars: - - r_openshift_health_checker_playbook_context: pre-install - post_tasks: - - name: Run health checks (@preflight) - action: openshift_health_check - args: - checks: ['@preflight'] diff --git a/playbooks/common/openshift-checks/roles b/playbooks/common/openshift-checks/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/common/openshift-checks/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml deleted file mode 100644 index 5ddafdb07..000000000 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create Hosted Resources - cockpit-ui - hosts: oo_first_master - roles: - - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 3b4d6f9a6..a8ca5e686 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,35 +1,38 @@ --- -- include: ../openshift-checks/install.yml +- include: ../../openshift-checks/private/install.yml -- include: ../openshift-etcd/config.yml +- include: ../../openshift-etcd/private/config.yml -- include: ../openshift-nfs/config.yml +- include: ../../openshift-nfs/private/config.yml when: groups.oo_nfs_to_config | default([]) | count > 0 -- include: ../openshift-loadbalancer/config.yml +- include: ../../openshift-loadbalancer/private/config.yml when: groups.oo_lb_to_config | default([]) | count > 0 -- include: ../openshift-master/config.yml +- include: ../../openshift-master/private/config.yml -- include: ../openshift-master/additional_config.yml +- include: ../../openshift-master/private/additional_config.yml -- include: ../openshift-node/config.yml +- include: ../../openshift-node/private/config.yml -- include: ../openshift-glusterfs/config.yml +- include: ../../openshift-glusterfs/private/config.yml when: groups.oo_glusterfs_to_config | default([]) | count > 0 -- include: openshift_hosted.yml +- include: ../../openshift-hosted/private/config.yml -- include: openshift_metrics.yml +- include: ../../openshift-metrics/private/config.yml when: openshift_metrics_install_metrics | default(false) | bool -- include: openshift_logging.yml +- include: ../../openshift-logging/private/config.yml when: openshift_logging_install_logging | default(false) | bool -- include: service_catalog.yml +- include: ../../openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- include: ../../openshift-service-catalog/private/config.yml when: openshift_enable_service_catalog | default(true) | bool -- include: ../openshift-management/config.yml +- include: ../../openshift-management/private/config.yml when: openshift_management_install_management | default(false) | bool - name: Print deprecated variable warning message if necessary diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml deleted file mode 100644 index ec6f2c52c..000000000 --- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- name: Create persistent volumes - hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - tasks: - - debug: var=persistent_volumes - - debug: var=persistent_volume_claims - -- name: Create Hosted Resources - persistent volumes - hosts: oo_first_master - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - roles: - - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml deleted file mode 100644 index be14b06f0..000000000 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- include: evaluate_groups.yml - -- name: Load openshift_facts - hosts: oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - post_tasks: - - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" - when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool - -- name: Reconfigure masters to listen on our new dns_port - hosts: oo_masters_to_config - handlers: - - include: ../../../roles/openshift_master/handlers/main.yml - static: yes - vars: - os_firewall_allow: - - service: skydns tcp - port: "{{ openshift.master.dns_port }}/tcp" - - service: skydns udp - port: "{{ openshift.master.dns_port }}/udp" - roles: - - os_firewall - tasks: - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: master - local_facts: - dns_port: '8053' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: dnsConfig.bindAddress - yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" - notify: restart master api - - meta: flush_handlers - -- name: Configure nodes for dnsmasq - hosts: oo_nodes_to_config - handlers: - - include: ../../../roles/openshift_node/handlers/main.yml - static: yes - pre_tasks: - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: node - local_facts: - dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" - roles: - - openshift_node_dnsmasq - post_tasks: - - modify_yaml: - dest: "{{ openshift.common.config_base }}/node/node-config.yaml" - yaml_key: dnsIP - yaml_value: "{{ openshift.node.dns_ip }}" - notify: restart node diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml deleted file mode 100644 index 78b552279..000000000 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ /dev/null @@ -1,190 +0,0 @@ ---- -- name: Populate config host groups - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required - fail: - msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set - when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined - - - name: Evaluate groups - g_master_hosts or g_new_master_hosts required - fail: - msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: g_master_hosts is not defined and g_new_master_hosts is not defined - - - name: Evaluate groups - g_node_hosts or g_new_node_hosts required - fail: - msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: g_node_hosts is not defined and g_new_node_hosts is not defined - - - name: Evaluate groups - g_lb_hosts required - fail: - msg: This playbook requires g_lb_hosts to be set - when: g_lb_hosts is not defined - - - name: Evaluate groups - g_nfs_hosts required - fail: - msg: This playbook requires g_nfs_hosts to be set - when: g_nfs_hosts is not defined - - - name: Evaluate groups - g_nfs_hosts is single host - fail: - msg: The nfs group must be limited to one host - when: g_nfs_hosts | default([]) | length > 1 - - - name: Evaluate groups - g_glusterfs_hosts required - fail: - msg: This playbook requires g_glusterfs_hosts to be set - when: g_glusterfs_hosts is not defined - - - name: Evaluate groups - Fail if no etcd hosts group is defined - fail: - msg: > - Running etcd as an embedded service is no longer supported. If this is a - new install please define an 'etcd' group with either one or three - hosts. These hosts may be the same hosts as your masters. If this is an - upgrade you may set openshift_master_unsupported_embedded_etcd=true - until a migration playbook becomes available. - when: - - g_etcd_hosts | default([]) | length not in [3,1] - - not openshift_master_unsupported_embedded_etcd | default(False) - - not (openshift_node_bootstrap | default(False)) - - - name: Evaluate oo_all_hosts - add_host: - name: "{{ item }}" - groups: oo_all_hosts - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - - - name: Evaluate oo_masters - add_host: - name: "{{ item }}" - groups: oo_masters - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" - changed_when: no - - - name: Evaluate oo_first_master - add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - when: g_master_hosts|length > 0 - changed_when: no - - - name: Evaluate oo_new_etcd_to_config - add_host: - name: "{{ item }}" - groups: oo_new_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_etcd_hosts | default([]) }}" - changed_when: no - - - name: Evaluate oo_masters_to_config - add_host: - name: "{{ item }}" - groups: oo_masters_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" - changed_when: no - - - name: Evaluate oo_etcd_to_config - add_host: - name: "{{ item }}" - groups: oo_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" - changed_when: no - - - name: Evaluate oo_first_etcd - add_host: - name: "{{ g_etcd_hosts[0] }}" - groups: oo_first_etcd - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - when: g_etcd_hosts|length > 0 - changed_when: no - - # We use two groups one for hosts we're upgrading which doesn't include embedded etcd - # The other for backing up which includes the embedded etcd host, there's no need to - # upgrade embedded etcd that just happens when the master is updated. - - name: Evaluate oo_etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: oo_etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate oo_etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: oo_etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}" - changed_when: False - - - name: Evaluate oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" - changed_when: no - - # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is - - name: Add master to oo_nodes_to_config - add_host: - name: "{{ item }}" - groups: oo_nodes_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_master_hosts | default([]) }}" - when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool - changed_when: no - - - name: Evaluate oo_lb_to_config - add_host: - name: "{{ item }}" - groups: oo_lb_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_lb_hosts | default([]) }}" - changed_when: no - - - name: Evaluate oo_nfs_to_config - add_host: - name: "{{ item }}" - groups: oo_nfs_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_nfs_hosts | default([]) }}" - changed_when: no - - - name: Evaluate oo_glusterfs_to_config - add_host: - name: "{{ item }}" - groups: oo_glusterfs_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" - changed_when: no - - - name: Evaluate oo_etcd_to_migrate - add_host: - name: "{{ item }}" - groups: oo_etcd_to_migrate - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}" - changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml deleted file mode 100644 index 91223d368..000000000 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ /dev/null @@ -1,169 +0,0 @@ ---- -- name: Ensure that all non-node hosts are accessible - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config - any_errors_fatal: true - tasks: - -- name: Initialize host facts - hosts: oo_all_hosts - tasks: - - name: load openshift_facts module - include_role: - name: openshift_facts - static: yes - - # TODO: Should this role be refactored into health_checks?? - - name: Run openshift_sanitize_inventory to set variables - include_role: - name: openshift_sanitize_inventory - - - name: Detecting Operating System from ostree_booted - stat: - path: /run/ostree-booted - register: ostree_booted - - # Locally setup containerized facts for now - - name: initialize_facts set fact l_is_atomic - set_fact: - l_is_atomic: "{{ ostree_booted.stat.exists }}" - - - name: initialize_facts set fact for containerized and l_is_*_system_container - set_fact: - l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" - l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" - l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" - l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" - l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" - - - name: initialize_facts set facts for l_any_system_container - set_fact: - l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" - - - name: initialize_facts set fact for l_etcd_runtime - set_fact: - l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist is fedora and python is v3 - fail: - msg: | - openshift-ansible requires Python 3 for {{ ansible_distribution }}; - For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html - when: - - ansible_distribution == 'Fedora' - - ansible_python['version']['major'] != 3 - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist not Fedora and python must be v2 - fail: - msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" - when: - - ansible_distribution != 'Fedora' - - ansible_python['version']['major'] != 2 - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - # Fail as early as possible if Atomic and old version of Docker - - when: - - l_is_atomic | bool - block: - - # See https://access.redhat.com/articles/2317361 - # and https://github.com/ansible/ansible/issues/15892 - # NOTE: the "'s can not be removed at this level else the docker command will fail - # NOTE: When ansible >2.2.1.x is used this can be updated per - # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 - - name: Determine Atomic Host Docker Version - shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' - register: l_atomic_docker_version - - - name: assert atomic host docker version is 1.12 or later - assert: - that: - - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') - msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - - - when: - - not l_is_atomic | bool - block: - - name: Ensure openshift-ansible installer package deps are installed - package: - name: "{{ item }}" - state: present - with_items: - - iproute - - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - yum-utils - - - name: Ensure various deps for running system containers are installed - package: - name: "{{ item }}" - state: present - with_items: - - atomic - - ostree - - runc - when: - - l_any_system_container | bool - - - name: Default system_images_registry to a enterprise registry - set_fact: - system_images_registry: "registry.access.redhat.com" - when: - - system_images_registry is not defined - - openshift_deployment_type == "openshift-enterprise" - - - name: Default system_images_registry to community registry - set_fact: - system_images_registry: "docker.io" - when: - - system_images_registry is not defined - - openshift_deployment_type == "origin" - - - name: Gather Cluster facts and set is_containerized if needed - openshift_facts: - role: common - local_facts: - deployment_type: "{{ openshift_deployment_type }}" - deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" - cli_image: "{{ osm_image | default(None) }}" - hostname: "{{ openshift_hostname | default(None) }}" - ip: "{{ openshift_ip | default(None) }}" - is_containerized: "{{ l_is_containerized | default(None) }}" - is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" - is_node_system_container: "{{ l_is_node_system_container | default(false) }}" - is_master_system_container: "{{ l_is_master_system_container | default(false) }}" - is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" - etcd_runtime: "{{ l_etcd_runtime }}" - system_images_registry: "{{ system_images_registry }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - public_ip: "{{ openshift_public_ip | default(None) }}" - portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" - http_proxy: "{{ openshift_http_proxy | default(None) }}" - https_proxy: "{{ openshift_https_proxy | default(None) }}" - no_proxy: "{{ openshift_no_proxy | default(None) }}" - generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - - - name: Set fact of no_proxy_internal_hostnames - openshift_facts: - role: common - local_facts: - no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - - - name: initialize_facts set_fact repoquery command - set_fact: - repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - - - name: initialize_facts set_fact on openshift_docker_hosted_registry_network - set_fact: - openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml deleted file mode 100644 index a7114fc80..000000000 --- a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Setup yum repositories for all hosts - hosts: oo_all_hosts - gather_facts: no - tasks: - - name: initialize openshift repos - include_role: - name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml deleted file mode 100644 index 37a5284d5..000000000 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# NOTE: requires openshift_facts be run -- name: Determine openshift_version to configure on first master - hosts: oo_first_master - roles: - - openshift_version - -# NOTE: We set this even on etcd hosts as they may also later run as masters, -# and we don't want to install wrong version of docker and have to downgrade -# later. -- name: Set openshift_version for etcd, node, and master hosts - hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master - vars: - openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" - pre_tasks: - - set_fact: - openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined - - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" - roles: - - openshift_version diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml deleted file mode 100644 index 1e3dfee07..000000000 --- a/playbooks/common/openshift-cluster/install_docker_gc.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Install docker gc - hosts: oo_first_master - gather_facts: false - tasks: - - include_role: - name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml deleted file mode 100644 index 62fe0dd60..000000000 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create Hosted Resources - openshift_default_storage_class - hosts: oo_first_master - roles: - - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml deleted file mode 100644 index 281ccce2e..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Hosted Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Hosted install 'In Progress' - set_stats: - data: - installer_phase_hosted: "In Progress" - aggregate: false - -- include: create_persistent_volumes.yml - -- include: openshift_default_storage_class.yml - -- include: openshift_hosted_create_projects.yml - -- include: openshift_hosted_router.yml - -- include: openshift_hosted_registry.yml - -- include: cockpit-ui.yml - -- include: openshift_prometheus.yml - when: openshift_hosted_prometheus_deploy | default(False) | bool - -- include: install_docker_gc.yml - when: - - openshift_use_crio | default(False) | bool - - openshift_crio_enable_docker_gc | default(False) | bool - -- name: Hosted Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Hosted install 'Complete' - set_stats: - data: - installer_phase_hosted: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml deleted file mode 100644 index d5ca5185c..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Create Hosted Resources - openshift projects - hosts: oo_first_master - tasks: - - include_role: - name: openshift_hosted - tasks_from: create_projects.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml deleted file mode 100644 index 2a91a827c..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Create Hosted Resources - registry - hosts: oo_first_master - tasks: - - set_fact: - openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: - name: openshift_hosted - tasks_from: registry.yml - when: - - openshift_hosted_manage_registry | default(True) | bool - - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml deleted file mode 100644 index bcb5a34a4..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_router.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Create Hosted Resources - router - hosts: oo_first_master - tasks: - - set_fact: - openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: - name: openshift_hosted - tasks_from: router.yml - when: - - openshift_hosted_manage_router | default(True) | bool - - openshift_hosted_router_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml deleted file mode 100644 index 529a4c939..000000000 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: Logging Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Logging install 'In Progress' - set_stats: - data: - installer_phase_logging: "In Progress" - aggregate: false - -- name: OpenShift Aggregated Logging - hosts: oo_first_master - roles: - - openshift_logging - -- name: Update Master configs - hosts: oo_masters:!oo_first_master - tasks: - - block: - - include_role: - name: openshift_logging - tasks_from: update_master_config - -- name: Logging Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Logging install 'Complete' - set_stats: - data: - installer_phase_logging: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml deleted file mode 100644 index 9c0bd489b..000000000 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Metrics Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Metrics install 'In Progress' - set_stats: - data: - installer_phase_metrics: "In Progress" - aggregate: false - -- name: OpenShift Metrics - hosts: oo_first_master - roles: - - role: openshift_metrics - -- name: OpenShift Metrics - hosts: oo_masters:!oo_first_master - serial: 1 - tasks: - - name: Setup the non-first masters configs - include_role: - name: openshift_metrics - tasks_from: update_master_config.yaml - -- name: Metrics Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Metrics install 'Complete' - set_stats: - data: - installer_phase_metrics: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml deleted file mode 100644 index a73b294a5..000000000 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Prometheus Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Prometheus install 'In Progress' - set_stats: - data: - installer_phase_prometheus: "In Progress" - aggregate: false - -- name: Create Hosted Resources - openshift_prometheus - hosts: oo_first_master - roles: - - role: openshift_prometheus - -- name: Prometheus Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Prometheus install 'Complete' - set_stats: - data: - installer_phase_prometheus: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml deleted file mode 100644 index b1ca6f606..000000000 --- a/playbooks/common/openshift-cluster/openshift_provisioners.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -- name: OpenShift Provisioners - hosts: oo_first_master - roles: - - openshift_provisioners diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml deleted file mode 100644 index 4a9fbf7eb..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Check cert expirys - hosts: "{{ g_check_expiry_hosts }}" - vars: - openshift_certificate_expiry_show_all: yes - roles: - # Sets 'check_results' per host which contains health status for - # etcd, master and node certificates. We will use 'check_results' - # to determine if any certificates were expired prior to running - # this playbook. Service restarts will be skipped if any - # certificates were previously expired. - - role: openshift_certificate_expiry diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml deleted file mode 100644 index d738c8207..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Backup and remove generated etcd certificates - hosts: oo_first_etcd - any_errors_fatal: true - tasks: - - include_role: - name: etcd - tasks_from: backup_generated_certificates - - include_role: - name: etcd - tasks_from: remove_generated_certificates - -- name: Backup deployed etcd certificates - hosts: oo_etcd_to_config - any_errors_fatal: true - tasks: - - include_role: - name: etcd - tasks_from: backup_server_certificates diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml deleted file mode 100644 index 044875d1c..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -- name: Check cert expirys - hosts: oo_etcd_to_config:oo_masters_to_config - vars: - openshift_certificate_expiry_show_all: yes - roles: - # Sets 'check_results' per host which contains health status for - # etcd, master and node certificates. We will use 'check_results' - # to determine if any certificates were expired prior to running - # this playbook. Service restarts will be skipped if any - # certificates were previously expired. - - role: openshift_certificate_expiry - -- name: Backup existing etcd CA certificate directories - hosts: oo_etcd_to_config - tasks: - - include_role: - name: etcd - tasks_from: backup_ca_certificates - - include_role: - name: etcd - tasks_from: remove_ca_certificates - -- include: ../../openshift-etcd/ca.yml - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_etcd_mktemp - changed_when: false - -- name: Distribute etcd CA to etcd hosts - hosts: oo_etcd_to_config - tasks: - - include_role: - name: etcd - tasks_from: distribute_ca - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - -- include: ../../openshift-etcd/restart.yml - # Do not restart etcd when etcd certificates were previously expired. - when: ('expired' not in (hostvars - | oo_select_keys(groups['etcd']) - | oo_collect('check_results.check_results.etcd') - | oo_collect('health'))) - -- name: Retrieve etcd CA certificate - hosts: oo_first_etcd - tasks: - - include_role: - name: etcd - tasks_from: retrieve_ca_certificates - vars: - etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -- name: Distribute etcd CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy etcd CA - copy: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" - dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" - when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_etcd_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../openshift-master/restart.yml - # Do not restart masters when master or etcd certificates were previously expired. - when: - # masters - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - # etcd - - ('expired' not in (hostvars - | oo_select_keys(groups['etcd']) - | oo_collect('check_results.check_results.etcd') - | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins deleted file mode 120000 index b1213dedb..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/library b/playbooks/common/openshift-cluster/redeploy-certificates/library deleted file mode 120000 index 9a53f009d..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/library +++ /dev/null @@ -1 +0,0 @@ -../../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins b/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins deleted file mode 120000 index aff753026..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml deleted file mode 100644 index 4dbc041b0..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Backup and remove master cerftificates - hosts: oo_masters_to_config - any_errors_fatal: true - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" - pre_tasks: - - stat: - path: "{{ openshift.common.config_base }}/generated-configs" - register: openshift_generated_configs_dir_stat - - name: Backup generated certificate and config directories - command: > - tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz - {{ openshift.common.config_base }}/generated-configs - {{ openshift.common.config_base }}/master - when: openshift_generated_configs_dir_stat.stat.exists - delegate_to: "{{ openshift_ca_host }}" - run_once: true - - name: Remove generated certificate directories - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ openshift.common.config_base }}/generated-configs" - - name: Remove generated certificates - file: - path: "{{ openshift.common.config_base }}/master/{{ item }}" - state: absent - with_items: - - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - - "etcd.server.crt" - - "etcd.server.key" - - "master.server.crt" - - "master.server.key" - - "openshift-master.crt" - - "openshift-master.key" - - "openshift-master.kubeconfig" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml deleted file mode 100644 index 2ad84b3b9..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Ensure node directory is absent from generated configs - hosts: oo_first_master - tasks: - # The generated configs directory (/etc/origin/generated-configs) is - # backed up during redeployment of the control plane certificates. - # We need to ensure that the generated config directory for - # individual nodes has been deleted before continuing, so verify - # that it is missing here. - - name: Ensure node directories and tarballs are absent from generated configs - shell: > - rm -rf {{ openshift.common.config_base }}/generated-configs/node-* - args: - warn: no - -- name: Redeploy node certificates - hosts: oo_nodes_to_config - pre_tasks: - - name: Remove CA certificate - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ openshift.common.config_base }}/node/ca.crt" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml deleted file mode 100644 index 2068ed199..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ /dev/null @@ -1,303 +0,0 @@ ---- -- name: Verify OpenShift version is greater than or equal to 1.2 or 3.2 - hosts: oo_first_master - tasks: - - fail: - msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." - when: not openshift.common.version_gte_3_2_or_1_2 | bool - -- name: Check cert expirys - hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config - vars: - openshift_certificate_expiry_show_all: yes - roles: - # Sets 'check_results' per host which contains health status for - # etcd, master and node certificates. We will use 'check_results' - # to determine if any certificates were expired prior to running - # this playbook. Service restarts will be skipped if any - # certificates were previously expired. - - role: openshift_certificate_expiry - -# Update master config when ca-bundle not referenced. Services will be -# restarted below after new CA certificate has been distributed. -- name: Ensure ca-bundle.crt is referenced in master configuration - hosts: oo_masters_to_config - tasks: - - slurp: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_output - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: kubeletClientInfo.ca - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).kubeletClientInfo.ca != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: serviceAccountConfig.masterCA - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).serviceAccountConfig.masterCA != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: oauthConfig.masterCA - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).oauthConfig.masterCA != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: etcdClientInfo.ca - yaml_value: ca-bundle.crt - when: - - groups.oo_etcd_to_config | default([]) | length == 0 - - (g_master_config_output.content|b64decode|from_yaml).etcdClientInfo.ca != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: etcdConfig.peerServingInfo.clientCA - yaml_value: ca-bundle.crt - when: - - groups.oo_etcd_to_config | default([]) | length == 0 - - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.peerServingInfo.clientCA != 'ca-bundle.crt' - - modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: etcdConfig.servingInfo.clientCA - yaml_value: ca-bundle.crt - when: - - groups.oo_etcd_to_config | default([]) | length == 0 - - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt' - -- name: Copy current OpenShift CA to legacy directory - hosts: oo_masters_to_config - pre_tasks: - - name: Create legacy-ca directory - file: - path: "{{ openshift.common.config_base }}/master/legacy-ca" - state: directory - mode: 0700 - owner: root - group: root - - command: mktemp -u XXXXXX - register: g_legacy_ca_mktemp - changed_when: false - # Copy CA certificate, key, serial and bundle to legacy-ca with a - # prefix generated by mktemp, ie. XXXXXX-ca.crt. - # - # The following roles will pick up all CA certificates matching - # /.*-ca.crt/ in the legacy-ca directory and ensure they are present - # in the OpenShift CA bundle. - # - openshift_ca - # - openshift_master_certificates - # - openshift_node_certificates - - name: Copy current OpenShift CA to legacy directory - copy: - src: "{{ openshift.common.config_base }}/master/{{ item }}" - dest: "{{ openshift.common.config_base }}/master/legacy-ca/{{ g_legacy_ca_mktemp.stdout }}-{{ item }}" - remote_src: true - # It is possible that redeploying failed and files may be missing. - # Ignore errors in this case. Files should have been copied to - # legacy-ca directory in previous run. - ignore_errors: true - with_items: - - "ca.crt" - - "ca.key" - - "ca.serial.txt" - - "ca-bundle.crt" - -- name: Create temporary directory for creating new CA certificate - hosts: oo_first_master - tasks: - - name: Create temporary directory for creating new CA certificate - command: > - mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_new_openshift_ca_mktemp - changed_when: false - -- name: Create OpenShift CA - hosts: oo_first_master - vars: - # Set openshift_ca_config_dir to a temporary directory where CA - # will be created. We'll replace the existing CA with the CA - # created in the temporary directory. - openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" - roles: - - role: openshift_master_facts - - role: openshift_named_certificates - - role: openshift_ca - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_master_mktemp - changed_when: false - -- name: Retrieve OpenShift CA - hosts: oo_first_master - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Retrieve CA certificate, key, bundle and serial - fetch: - src: "{{ hostvars[openshift_ca_host].g_new_openshift_ca_mktemp.stdout }}/{{ item }}" - dest: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - with_items: - - ca.crt - - ca.key - - ca-bundle.crt - - ca.serial.txt - delegate_to: "{{ openshift_ca_host }}" - run_once: true - changed_when: false - -- name: Distribute OpenShift CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy CA certificate, key, bundle and serial - copy: - src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/{{ item }}" - dest: "{{ openshift.common.config_base }}/master/" - with_items: - - ca.crt - - ca.key - - ca-bundle.crt - - ca.serial.txt - - name: Update master client kubeconfig CA data - kubeclient_ca: - client_path: "{{ openshift.common.config_base }}/master/openshift-master.kubeconfig" - ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - - name: Update admin client kubeconfig CA data - kubeclient_ca: - client_path: "{{ openshift.common.config_base }}/master/admin.kubeconfig" - ca_path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - - name: Lookup default group for ansible_ssh_user - command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}" - changed_when: false - register: _ansible_ssh_user_gid - - set_fact: - client_users: "{{ [ansible_ssh_user, 'root'] | unique }}" - - name: Create the client config dir(s) - file: - path: "~{{ item }}/.kube" - state: directory - mode: 0700 - owner: "{{ item }}" - group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" - with_items: "{{ client_users }}" - - name: Copy the admin client config(s) - copy: - src: "{{ openshift.common.config_base }}/master/admin.kubeconfig" - dest: "~{{ item }}/.kube/config" - remote_src: yes - with_items: "{{ client_users }}" - - name: Update the permissions on the admin client config(s) - file: - path: "~{{ item }}/.kube/config" - state: file - mode: 0700 - owner: "{{ item }}" - group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" - with_items: "{{ client_users }}" - -- include: ../../openshift-master/restart.yml - # Do not restart masters when master or etcd certificates were previously expired. - when: - # masters - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - # etcd - - ('expired' not in (hostvars - | oo_select_keys(groups['etcd']) - | oo_collect('check_results.check_results.etcd') - | oo_collect('health'))) - -- name: Distribute OpenShift CA certificate to nodes - hosts: oo_nodes_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - copy: - src: "{{ hostvars['localhost'].g_master_mktemp.stdout }}/ca-bundle.crt" - dest: "{{ openshift.common.config_base }}/node/ca.crt" - - name: Copy OpenShift CA to system CA trust - copy: - src: "{{ item.cert }}" - dest: "/etc/pki/ca-trust/source/anchors/{{ item.id }}-{{ item.cert | basename }}" - remote_src: yes - with_items: - - id: openshift - cert: "{{ openshift.common.config_base }}/node/ca.crt" - notify: - - update ca trust - - name: Update node client kubeconfig CA data - kubeclient_ca: - client_path: "{{ openshift.common.config_base }}/node/system:node:{{ openshift.common.hostname }}.kubeconfig" - ca_path: "{{ openshift.common.config_base }}/node/ca.crt" - handlers: - # Normally this handler would restart docker after updating ca - # trust. We'll do that when we restart nodes to avoid restarting - # docker on all nodes in parallel. - - name: update ca trust - command: update-ca-trust - -- name: Delete temporary directory on CA host - hosts: oo_first_master - tasks: - - file: - path: "{{ g_new_openshift_ca_mktemp.stdout }}" - state: absent - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_master_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../openshift-node/restart.yml - # Do not restart nodes when node, master or etcd certificates were previously expired. - when: - # nodes - - ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) - - ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) - # masters - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - # etcd - - ('expired' not in (hostvars - | oo_select_keys(groups['etcd']) - | oo_collect('check_results.check_results.etcd') - | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml deleted file mode 100644 index afd5463b2..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ /dev/null @@ -1,102 +0,0 @@ ---- -- name: Update registry certificates - hosts: oo_first_master - vars: - roles: - - lib_openshift - tasks: - - name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp - changed_when: false - - - name: Copy admin client config(s) - command: > - cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig - changed_when: false - - - name: Determine if docker-registry exists - command: > - {{ openshift.common.client_binary }} get dc/docker-registry -o json - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default - register: l_docker_registry_dc - failed_when: false - changed_when: false - - - set_fact: - docker_registry_env_vars: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env'] - | oo_collect('name')) - | default([]) }}" - docker_registry_secrets: "{{ ((l_docker_registry_dc.stdout | from_json)['spec']['template']['spec']['volumes'] - | oo_collect('secret') - | oo_collect('secretName')) - | default([]) }}" - changed_when: false - when: l_docker_registry_dc.rc == 0 - - # Replace dc/docker-registry environment variable certificate data if set. - - name: Update docker-registry environment variables - shell: > - {{ openshift.common.client_binary }} env dc/docker-registry - OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" - OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-registry.crt)" - OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-registry.key)" - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default - when: l_docker_registry_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in docker_registry_env_vars and 'OPENSHIFT_CERT_DATA' in docker_registry_env_vars and 'OPENSHIFT_KEY_DATA' in docker_registry_env_vars - - # Replace dc/docker-registry certificate secret contents if set. - - block: - - name: Retrieve registry service IP - oc_service: - namespace: default - name: docker-registry - state: list - register: docker_registry_service_ip - changed_when: false - - - set_fact: - docker_registry_route_hostname: "{{ 'docker-registry-default.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true)) }}" - changed_when: false - - - name: Generate registry certificate - command: > - {{ openshift.common.client_binary }} adm ca create-server-cert - --signer-cert={{ openshift.common.config_base }}/master/ca.crt - --signer-key={{ openshift.common.config_base }}/master/ca.key - --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt - --config={{ mktemp.stdout }}/admin.kubeconfig - --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" - --cert={{ openshift.common.config_base }}/master/registry.crt - --key={{ openshift.common.config_base }}/master/registry.key - {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} - --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} - {% endif %} - - - name: Update registry certificates secret - oc_secret: - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" - name: registry-certificates - namespace: default - state: present - files: - - name: registry.crt - path: "{{ openshift.common.config_base }}/master/registry.crt" - - name: registry.key - path: "{{ openshift.common.config_base }}/master/registry.key" - run_once: true - when: l_docker_registry_dc.rc == 0 and 'registry-certificates' in docker_registry_secrets and 'REGISTRY_HTTP_TLS_CERTIFICATE' in docker_registry_env_vars and 'REGISTRY_HTTP_TLS_KEY' in docker_registry_env_vars - - - name: Redeploy docker registry - command: > - {{ openshift.common.client_binary }} deploy dc/docker-registry - --latest - --config={{ mktemp.stdout }}/admin.kubeconfig - -n default - - - name: Delete temp directory - file: - name: "{{ mktemp.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/roles b/playbooks/common/openshift-cluster/redeploy-certificates/roles deleted file mode 120000 index 4bdbcbad3..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml deleted file mode 100644 index 2116c745c..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -- name: Update router certificates - hosts: oo_first_master - vars: - roles: - - lib_openshift - tasks: - - name: Create temp directory for kubeconfig - command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: router_cert_redeploy_tempdir - changed_when: false - - - name: Copy admin client config(s) - command: > - cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - changed_when: false - - - name: Determine if router exists - command: > - {{ openshift.common.client_binary }} get dc/router -o json - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - register: l_router_dc - failed_when: false - changed_when: false - - - name: Determine if router service exists - command: > - {{ openshift.common.client_binary }} get svc/router -o json - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - register: l_router_svc - failed_when: false - changed_when: false - - - name: Collect router environment variables and secrets - set_fact: - router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env'] - | oo_collect('name')) - | default([]) }}" - router_secrets: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['volumes'] - | oo_collect('secret') - | oo_collect('secretName')) - | default([]) }}" - changed_when: false - when: l_router_dc.rc == 0 - - - name: Collect router service annotations - set_fact: - router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}" - when: l_router_svc.rc == 0 - - - name: Update router environment variables - shell: > - {{ openshift.common.client_binary }} env dc/router - OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" - OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)" - OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)" - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - when: - - l_router_dc.rc == 0 - - ('OPENSHIFT_CA_DATA' in router_env_vars) - - ('OPENSHIFT_CERT_DATA' in router_env_vars) - - ('OPENSHIFT_KEY_DATA' in router_env_vars) - - # When the router service contains service signer annotations we - # will delete the existing certificate secret and allow OpenShift to - # replace the secret. - - block: - - name: Delete existing router certificate secret - oc_secret: - kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig" - name: router-certs - namespace: default - state: absent - run_once: true - - - name: Remove router service annotations - command: > - {{ openshift.common.client_binary }} annotate service/router - service.alpha.openshift.io/serving-cert-secret-name- - service.alpha.openshift.io/serving-cert-signed-by- - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - - - name: Add serving-cert-secret annotation to router service - command: > - {{ openshift.common.client_binary }} annotate service/router - service.alpha.openshift.io/serving-cert-secret-name=router-certs - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - when: - - l_router_dc.rc == 0 - - l_router_svc.rc == 0 - - ('router-certs' in router_secrets) - - openshift_hosted_router_certificate is undefined - - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations - - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations - - # When there are no annotations on the router service we will allow - # the openshift_hosted role to either create a new wildcard - # certificate (since we deleted the original) or reapply a custom - # openshift_hosted_router_certificate. - - file: - path: "{{ item }}" - state: absent - with_items: - - /etc/origin/master/openshift-router.crt - - /etc/origin/master/openshift-router.key - when: - - l_router_dc.rc == 0 - - l_router_svc.rc == 0 - - ('router-certs' in router_secrets) - - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations - - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - - include_role: - name: openshift_hosted - tasks_from: main - vars: - openshift_hosted_manage_registry: false - when: - - l_router_dc.rc == 0 - - l_router_svc.rc == 0 - - ('router-certs' in router_secrets) - - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations - - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - - name: Redeploy router - command: > - {{ openshift.common.client_binary }} deploy dc/router - --latest - --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig - -n default - - - name: Delete temp directory - file: - name: "{{ router_cert_redeploy_tempdir.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml deleted file mode 100644 index 26716a92d..000000000 --- a/playbooks/common/openshift-cluster/sanity_checks.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- name: Verify Requirements - hosts: oo_all_hosts - tasks: - - fail: - msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with nuage - when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - - - fail: - msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. - when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: openshift_hostname must be 63 characters or less - when: openshift_hostname is defined and openshift_hostname | length > 63 - - - fail: - msg: openshift_public_hostname must be 63 characters or less - when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml deleted file mode 100644 index bd964b2ce..000000000 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Service Catalog Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Service Catalog install 'In Progress' - set_stats: - data: - installer_phase_servicecatalog: "In Progress" - aggregate: false - -- name: Service Catalog - hosts: oo_first_master - roles: - - openshift_service_catalog - - ansible_service_broker - - template_service_broker - vars: - first_master: "{{ groups.oo_first_master[0] }}" - -- name: Service Catalog Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Service Catalog install 'Complete' - set_stats: - data: - installer_phase_servicecatalog: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml deleted file mode 100644 index 45b34c8bd..000000000 --- a/playbooks/common/openshift-cluster/std_include.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Initialization Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - roles: - - installer_checkpoint - tasks: - - name: Set install initialization 'In Progress' - set_stats: - data: - installer_phase_initialize: "In Progress" - aggregate: false - -- include: evaluate_groups.yml - tags: - - always - -- include: initialize_facts.yml - tags: - - always - -- include: sanity_checks.yml - tags: - - always - -- include: validate_hostnames.yml - tags: - - node - -- include: initialize_openshift_repos.yml - tags: - - always - -- include: initialize_openshift_version.yml - tags: - - always - -- name: Initialization Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set install initialization 'Complete' - set_stats: - data: - installer_phase_initialize: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml deleted file mode 100644 index eb118365a..000000000 --- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- set_fact: k8s_type="etcd" - -- name: Generate etcd instance names(s) - set_fact: - scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" - register: etcd_names_output - with_sequence: count={{ num_etcd }} - -- set_fact: - etcd_names: "{{ etcd_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml deleted file mode 100644 index 783f70f50..000000000 --- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- set_fact: k8s_type="master" - -- name: Generate master instance names(s) - set_fact: - scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" - register: master_names_output - with_sequence: count={{ num_masters }} - -- set_fact: - master_names: "{{ master_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml deleted file mode 100644 index c103e40a9..000000000 --- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- set_fact: k8s_type=node -- set_fact: sub_host_type="{{ type }}" -- set_fact: number_nodes="{{ count }}" - -- name: Generate node instance names(s) - set_fact: - scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" - register: node_names_output - with_sequence: count={{ number_nodes }} - -- set_fact: - node_names: "{{ node_names_output.results | default([]) - | oo_collect('ansible_facts') - | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 98953f72e..6d4ddf011 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,5 +1,5 @@ --- -- include: ../../evaluate_groups.yml +- include: ../../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 83f16ac0d..3b779becb 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -6,10 +6,6 @@ retries: 3 delay: 30 -- name: Update docker facts - openshift_facts: - role: docker - - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 808cc562c..83be290e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -41,6 +41,8 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present + register: result + until: result | success - include: restart.yml when: not skip_docker_restart | default(False) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 52345a9ba..2e3a7ae8b 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -16,7 +16,7 @@ changed_when: no - name: Get current version of Docker - command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" + command: "{{ repoquery_installed }} --qf '%{version}' docker" register: curr_docker_version retries: 4 until: curr_docker_version | succeeded diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index d086cad00..531175c85 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -9,7 +9,6 @@ tasks_from: backup vars: r_etcd_common_backup_tag: "{{ etcd_backup_tag }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index d71c96cd7..c5ff4133c 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -2,60 +2,9 @@ - name: Determine etcd version hosts: oo_etcd_hosts_to_upgrade tasks: - - block: - - name: Record RPM based etcd version - command: rpm -qa --qf '%{version}' etcd\* - args: - warn: no - register: etcd_rpm_version - failed_when: false - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - debug: - msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" - when: - - not openshift.common.is_containerized | bool - - - block: - - name: Record containerized etcd version (docker) - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version_docker - failed_when: false - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - when: - - not openshift.common.is_etcd_system_container | bool - - # Given a register variables is set even if the whwen condition - # is false, we need to set etcd_container_version separately - - set_fact: - etcd_container_version: "{{ etcd_container_version_docker.stdout }}" - when: - - not openshift.common.is_etcd_system_container | bool - - - name: Record containerized etcd version (runc) - command: runc exec etcd rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version_runc - failed_when: false - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - when: - - openshift.common.is_etcd_system_container | bool - - # Given a register variables is set even if the whwen condition - # is false, we need to set etcd_container_version separately - - set_fact: - etcd_container_version: "{{ etcd_container_version_runc.stdout }}" - when: - - openshift.common.is_etcd_system_container | bool - - - debug: - msg: "Etcd containerized version {{ etcd_container_version }} detected" - when: - - openshift.common.is_containerized | bool + - include_role: + name: etcd + tasks_from: version_detect.yml - include: upgrade_rpm_members.yml vars: @@ -93,6 +42,14 @@ vars: etcd_upgrade_version: '3.1.3' +- include: upgrade_rpm_members.yml + vars: + etcd_upgrade_version: '3.2' + +- include: upgrade_image_members.yml + vars: + etcd_upgrade_version: '3.2.7' + - name: Upgrade fedora to latest hosts: oo_etcd_hosts_to_upgrade serial: 1 @@ -101,7 +58,6 @@ name: etcd tasks_from: upgrade_image vars: - r_etcd_common_etcd_runtime: "host" etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml index e5e895775..6fca42bd0 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -11,7 +11,6 @@ tasks_from: upgrade_image vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml index a2a26bad4..51e8786b3 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -11,7 +11,6 @@ tasks_from: upgrade_rpm vars: r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "host" etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 2826951e6..9981d905b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,15 +1,20 @@ --- -- include: ../evaluate_groups.yml +- include: ../../../init/evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] -- include: ../initialize_facts.yml +- include: ../../../init/facts.yml - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts + vars: + openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}" tasks: + - name: set currently installed version + set_fact: + openshift_currently_installed_version: "{{ openshift_master_installed_version }}" - name: Check if iptables is running command: systemctl status iptables changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 122066955..c458184c9 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -14,8 +14,9 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift + - name: Collect all routers oc_obj: state: list @@ -85,17 +86,19 @@ roles: - openshift_manageiq + - role: openshift_project_request_template + when: openshift_project_request_template_manage # Create the new templates shipped in 3.2, existing templates are left # unmodified. This prevents the subsequent role definition for # openshift_examples from failing when trying to replace templates that do # not already exist. We could have potentially done a replace --force to # create and update in one step. - role: openshift_examples - when: openshift_install_examples | default(true,true) | bool + when: openshift_install_examples | default(true) | bool - openshift_hosted_templates # Update the existing templates - role: openshift_examples - when: openshift_install_examples | default(true,true) | bool + when: openshift_install_examples | default(true) | bool registry_url: "{{ openshift.master.registry_url }}" openshift_examples_import_command: replace - role: openshift_hosted_templates @@ -117,7 +120,6 @@ - name: grep pluginOrderOverride command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml register: grep_plugin_order_override - when: openshift.common.version_gte_3_3_or_1_3 | bool changed_when: false failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 13fa37b09..84b740227 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -6,13 +6,13 @@ - name: Update oreg_auth docker login credentials if necessary include_role: - name: docker + name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined - name: Verify containers are available for upgrade command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }} register: pull_result changed_when: "'Downloaded newer image' in pull_result.stdout" when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml deleted file mode 100644 index 8cc46ab68..000000000 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# When we update package "a-${version}" and a requires b >= ${version} if we -# don't specify the version of b yum will choose the latest version of b -# available and the whole set of dependencies end up at the latest version. -# Since the package module, unlike the yum module, doesn't flatten a list -# of packages into one transaction we need to do that explicitly. The ansible -# core team tells us not to rely on yum module transaction flattening anyway. - -# TODO: If the sdn package isn't already installed this will install it, we -# should fix that -- name: Upgrade master packages - package: name={{ master_pkgs | join(',') }} state=present - vars: - master_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "master" - - not openshift.common.is_atomic | bool - -- name: Upgrade node packages - package: name={{ node_pkgs | join(',') }} state=present - vars: - node_pkgs: - - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - PyYAML - when: - - component == "node" - - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index a5e2f7940..503d75ba0 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,22 +3,6 @@ # Upgrade Masters ############################################################################### -# oc adm migrate storage should be run prior to etcd v3 upgrade -# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade all storage - hosts: oo_first_master - tasks: - - name: Upgrade all storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=* --confirm - register: l_pb_upgrade_control_plane_pre_upgrade_storage - when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool - # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection # so we must first make sure this is set correctly before attempting the backup. @@ -48,6 +32,22 @@ - include: create_service_signer_cert.yml +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master + tasks: + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool + # Set openshift_master_facts separately. In order to reconcile # admission_config's, we currently must run openshift_master_facts and # then run openshift_facts. @@ -63,13 +63,9 @@ vars: openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - include: ../../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - - lib_utils - post_tasks: + tasks: + - include_role: + name: openshift_facts # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" @@ -78,55 +74,9 @@ - include: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include: rpm_upgrade.yml component=master - when: not openshift.common.is_containerized | bool - - - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml - - - include: upgrade_scheduler.yml - - - include: "{{ master_config_hook }}" - when: master_config_hook is defined - - - include_vars: ../../../../roles/openshift_master/vars/main.yml - - - name: Update journald config - include: ../../../../roles/openshift_master/tasks/journald.yml - - - name: Remove any legacy systemd units and update systemd units - include: ../../../../roles/openshift_master/tasks/systemd_units.yml - - - name: Check for ca-bundle.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - register: ca_bundle_stat - failed_when: false - - - name: Check for ca.crt - stat: - path: "{{ openshift.common.config_base }}/master/ca.crt" - register: ca_crt_stat - failed_when: false - - - name: Migrate ca.crt to ca-bundle.crt - command: mv ca.crt ca-bundle.crt - args: - chdir: "{{ openshift.common.config_base }}/master" - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Link ca.crt to ca-bundle.crt - file: - src: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - path: "{{ openshift.common.config_base }}/master/ca.crt" - state: link - when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists - - - name: Update oreg value - yedit: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - key: 'imageConfig.format' - value: "{{ oreg_url | default(oreg_url_master) }}" - when: oreg_url is defined or oreg_url_master is defined + - include_role: + name: openshift_master + tasks_from: upgrade.yml # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" @@ -135,10 +85,10 @@ - include: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined - - include: ../../openshift-master/restart_hosts.yml + - include: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' - - include: ../../openshift-master/restart_services.yml + - include: ../../../openshift-master/private/tasks/restart_services.yml when: openshift.common.rolling_restart_mode == 'services' # Run the post-upgrade hook if defined: @@ -153,7 +103,9 @@ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm register: l_pb_upgrade_control_plane_post_upgrade_storage - when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - openshift_version | version_compare('3.7','<') failed_when: - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -191,10 +143,6 @@ roles: - { role: openshift_cli } vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles @@ -234,7 +182,6 @@ - reconcile_jenkins_role_binding_result.rc == 0 when: - openshift_version | version_compare('3.7','<') - - openshift_version | version_compare('3.4','>=') - when: openshift_upgrade_target | version_compare('3.7','<') block: @@ -340,7 +287,7 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node @@ -366,13 +313,13 @@ delay: 60 roles: - - lib_openshift - openshift_facts - - docker - - openshift_node_dnsmasq - - openshift_node_upgrade - post_tasks: + - include_role: + name: openshift_node + tasks_from: upgrade.yml + vars: + openshift_node_upgrade_in_progress: True - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index c93a5d89c..75ffd3fe9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -8,7 +8,7 @@ pre_tasks: - name: Load lib_openshift modules - include_role: + import_role: name: lib_openshift # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node @@ -34,16 +34,18 @@ delay: 60 roles: - - lib_openshift - openshift_facts - - docker - - openshift_node_dnsmasq - - openshift_node_upgrade - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - post_tasks: + - include_role: + name: openshift_node + tasks_from: upgrade.yml + vars: + openshift_node_upgrade_in_progress: True + - include_role: + name: openshift_excluder + vars: + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml new file mode 100644 index 000000000..d9ce3a7e3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -0,0 +1,59 @@ +--- +- name: create new scale group + hosts: localhost + tasks: + - name: build upgrade scale groups + include_role: + name: openshift_aws + tasks_from: upgrade_node_group.yml + + - fail: + msg: "Ensure that new scale groups were provisioned before proceeding to update." + when: + - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0" + +- name: initialize upgrade bits + include: init.yml + +- name: Drain and upgrade nodes + hosts: oo_sg_current_nodes + # This var must be set with -e on invocation, as it is not a per-host inventory var + # and is evaluated early. Values such as "20%" can also be used. + serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" + + pre_tasks: + - name: Load lib_openshift modules + include_role: + name: ../roles/lib_openshift + + # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node + # or docker actually needs an upgrade before proceeding. Perhaps best to save this until + # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Mark node unschedulable + oc_adm_manage_node: + node: "{{ openshift.node.nodename | lower }}" + schedulable: False + delegate_to: "{{ groups.oo_first_master.0 }}" + retries: 10 + delay: 5 + register: node_unschedulable + until: node_unschedulable|succeeded + + - name: Drain Node for Kubelet upgrade + command: > + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 + +# Alright, let's clean up! +- name: clean up the old scale group + hosts: localhost + tasks: + - name: clean up scale group + include_role: + name: openshift_aws + tasks_from: remove_scale_group.yml diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml deleted file mode 100644 index 8558bf3e9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ /dev/null @@ -1,173 +0,0 @@ ---- -# Upgrade predicates -- vars: - prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}" - default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}" - # older_predicates are the set of predicates that have previously been - # hard-coded into openshift_facts - older_predicates: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: Region - argument: - serviceAffinity: - labels: - - region - # older_predicates_no_region are the set of predicates that have previously - # been hard-coded into openshift_facts, with the Region predicate removed - older_predicates_no_region: - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - name: MaxEBSVolumeCount - - name: MaxGCEPDVolumeCount - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - - name: NoVolumeZoneConflict - - - name: MatchNodeSelector - - name: PodFitsResources - - name: PodFitsPorts - - name: NoDiskConflict - block: - - # Handle case where openshift_master_predicates is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - - - debug: - msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates - when: openshift_master_scheduler_predicates | default(none) is not none - - # Handle cases where openshift_master_predicates is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: - - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates - - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - - - set_fact: - openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: - - openshift_master_scheduler_current_predicates != default_predicates_no_region - - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - - when: openshift_master_scheduler_predicates | default(none) is none - - -# Upgrade priorities -- vars: - prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}" - prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}" - default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}" - # older_priorities are the set of priorities that have previously been - # hard-coded into openshift_facts - older_priorities: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - - name: Zone - weight: 2 - argument: - serviceAntiAffinity: - label: zone - # older_priorities_no_region are the set of priorities that have previously - # been hard-coded into openshift_facts, with the Zone priority removed - older_priorities_no_zone: - - - name: LeastRequestedPriority - weight: 1 - - name: SelectorSpreadPriority - weight: 1 - block: - - # Handle case where openshift_master_priorities is defined - - block: - - debug: - msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - - - debug: - msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities - when: openshift_master_scheduler_priorities | default(none) is not none - - # Handle cases where openshift_master_priorities is not defined - - block: - - debug: - msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: - - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities - - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - - - set_fact: - openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: - - openshift_master_scheduler_current_priorities != default_priorities_no_zone - - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - - when: openshift_master_scheduler_priorities | default(none) is none - - -# Update scheduler -- vars: - scheduler_config: - kind: Policy - apiVersion: v1 - predicates: "{{ openshift_upgrade_scheduler_predicates - | default(openshift_master_scheduler_current_predicates) }}" - priorities: "{{ openshift_upgrade_scheduler_priorities - | default(openshift_master_scheduler_current_priorities) }}" - block: - - name: Update scheduler config - copy: - content: "{{ scheduler_config | to_nice_json }}" - dest: "{{ openshift_master_scheduler_conf }}" - backup: true - when: > - openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml deleted file mode 100644 index 5e7a66171..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.burst' - yaml_value: 400 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' - yaml_value: 200 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.burst' - yaml_value: 600 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' - yaml_value: 300 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml deleted file mode 100644 index 89b524f14..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.acceptContentTypes' - yaml_value: 'application/vnd.kubernetes.protobuf,application/json' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.contentType' - yaml_value: 'application/vnd.kubernetes.protobuf' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.burst' - yaml_value: 40 - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.qps' - yaml_value: 20 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml deleted file mode 100644 index a241ef039..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Full Control Plane + Nodes Upgrade -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml deleted file mode 100644 index 54c85f0fb..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -# -# Control Plane Upgrade Playbook -# -# Upgrades masters and Docker (only on standalone etcd hosts) -# -# This upgrade does not include: -# - node service running on masters -# - docker running on masters -# - node service running on dedicated nodes -# -# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_master_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_masters_to_config - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml deleted file mode 100644 index cee4e9087..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ /dev/null @@ -1,113 +0,0 @@ ---- -# -# Node Upgrade Playbook -# -# Upgrades nodes only, but requires the control plane to have already been upgraded. -# -- include: ../init.yml - tags: - - pre_upgrade - -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - -- include: ../pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../disable_node_excluders.yml - tags: - - pre_upgrade - -- include: ../../initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- name: Verify upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- name: Verify docker upgrade targets - hosts: oo_nodes_to_upgrade - tasks: - - include: ../pre/tasks/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../cleanup_unused_images.yml - -- include: ../upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml deleted file mode 100644 index 52458e03c..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml deleted file mode 100644 index ae63c9ca9..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -############################################################################### -# Pre upgrade checks for known data problems, if this playbook fails you should -# contact support. If you're not supported contact users@lists.openshift.com -# -# oc_objectvalidator provides these two checks -# 1 - SDN Data issues, never seen in the wild but known possible due to code audits -# https://github.com/openshift/origin/issues/12697 -# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 -# -############################################################################### -- name: Verify 3.5 specific upgrade checks - hosts: oo_first_master - roles: - - { role: lib_openshift } - tasks: - - name: Check for invalid namespaces and SDN errors - oc_objectvalidator: - - # What's all this PetSet business about? - # - # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are - # no longer supported. The BETA resource 'StatefulSets' replaces - # them. We can't migrate clients PetSets to - # StatefulSets. Additionally, Red Hat has never officially supported - # these resource types. Sorry users, but if you were using - # unsupported resources from the Kube documentation then we can't - # help you at this time. - # - # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 - - name: Check if legacy PetSets exist - oc_obj: - state: list - all_namespaces: true - kind: petsets - register: l_do_petsets_exist - - - name: Fail on unsupported resource migration 'PetSets' - fail: - msg: > - PetSet objects were detected in your cluster. These are an - Alpha feature in upstream Kubernetes 1.4 and are not supported - by Red Hat. In Kubernetes 1.5, they are replaced by the Beta - feature StatefulSets. Red Hat currently does not offer support - for either PetSets or StatefulSets. - - Automatically migrating PetSets to StatefulSets in OpenShift - Container Platform (OCP) 3.5 is not supported. See the - Kubernetes "Upgrading from PetSets to StatefulSets" - documentation for additional information: - - https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ - - PetSets MUST be removed before upgrading to OCP 3.5. Red Hat - strongly recommends reading the above referenced documentation - in its entirety before taking any destructive actions. - - If you want to simply remove all PetSets without manually - migrating to StatefulSets, run this command as a user with - cluster-admin privileges: - - $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false - when: - # Search did not fail, valid resource type found - - l_do_petsets_exist.results.returncode == 0 - # Items do exist in the search results - - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index dd109cfa9..5f9c56867 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -63,7 +63,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -73,13 +73,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 8ab68002d..1aac3d014 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -67,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -77,13 +77,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index ba6fcc3f8..306b76422 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -56,7 +56,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index f4862e321..6d4949542 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -67,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -77,13 +77,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index b905d6d86..0a592896b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -71,7 +71,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -81,13 +81,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index bc080f9a3..b381d606a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -56,7 +56,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -66,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 8e4f99c91..74d0cd8ad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -11,13 +11,15 @@ tasks: - name: Check for invalid namespaces and SDN errors oc_objectvalidator: - + # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO - name: Confirm OpenShift authorization objects are in sync command: > {{ openshift.common.client_binary }} adm migrate authorization - when: openshift_version | version_compare('3.7','<') + when: + - openshift_currently_installed_version | version_compare('3.7','<') + - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool changed_when: false register: l_oc_result until: l_oc_result.rc == 0 - retries: 4 + retries: 2 delay: 15 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins index 7de3c1dd7..7de3c1dd7 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/filter_plugins +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml index 1d4d1919c..1d4d1919c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/master_config_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/roles b/playbooks/common/openshift-cluster/upgrades/v3_8/roles index 415645be6..415645be6 120000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index ae217ba2e..e7d7756d1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -12,8 +12,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: @@ -47,6 +51,14 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -55,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -65,17 +77,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade @@ -93,6 +95,10 @@ tags: - pre_upgrade +- include: validator.yml + tags: + - pre_upgrade + - include: ../pre/gate_checks.yml tags: - pre_upgrade @@ -109,7 +115,21 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_4/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index d7cb38d03..be362e3ff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -21,14 +21,18 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: @@ -55,11 +59,19 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -69,17 +81,7 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade @@ -97,6 +99,10 @@ tags: - pre_upgrade +- include: validator.yml + tags: + - pre_upgrade + - include: ../pre/gate_checks.yml tags: - pre_upgrade @@ -113,6 +119,20 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_4/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index 8531e6045..6e68116b0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -14,8 +14,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_target: '3.8' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml @@ -48,11 +48,15 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -62,12 +66,6 @@ openshift_release: "{{ openshift_upgrade_target }}" openshift_protect_installed_version: False - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - - name: Verify masters are already upgraded hosts: oo_masters_to_config tags: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml index db0c8f886..1d4d1919c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -1,6 +1,11 @@ --- - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' yaml_value: service-signer.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/roles b/playbooks/common/openshift-cluster/upgrades/v3_9/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index bda245fe1..94c16cae0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -12,8 +12,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' # Pre-upgrade @@ -21,6 +21,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos and initialize facts on all hosts hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config tags: @@ -47,6 +51,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -59,7 +67,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -75,7 +83,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade @@ -113,7 +121,21 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_5/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 6cdea7b84..2045f6379 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -21,14 +21,18 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: @@ -55,6 +59,10 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../pre/verify_control_plane_running.yml tags: - pre_upgrade @@ -63,7 +71,7 @@ tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: @@ -79,7 +87,7 @@ # docker is configured and running. skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml tags: - pre_upgrade @@ -117,6 +125,20 @@ - include: ../upgrade_control_plane.yml vars: - master_config_hook: "v3_5/master_config_upgrade.yml" + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started - include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml index e29d0f8e6..6134f8653 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -14,8 +14,8 @@ - pre_upgrade tasks: - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.7' # Pre-upgrade - include: ../initialize_nodes_to_upgrade.yml @@ -48,11 +48,15 @@ tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade -- include: ../../initialize_openshift_version.yml +- include: ../../../../init/version.yml tags: - pre_upgrade vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml new file mode 100644 index 000000000..4bd2d87b1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.9 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml deleted file mode 100644 index be2e6a15a..000000000 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Validate node hostnames - hosts: oo_nodes_to_config - tasks: - - name: Query DNS for IP address of {{ openshift.common.hostname }} - shell: - getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' - register: lookupip - changed_when: false - failed_when: false - - name: Warn user about bad openshift_hostname values - pause: - prompt: - The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} - doesn't resolve to an IP address owned by this host. Please set - openshift_hostname variable to a hostname that when resolved on the host - in question resolves to an IP address matching an interface on this - host. This host will fail liveness checks for pods utilizing hostPorts, - press ENTER to continue or CTRL-C to abort. - seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" - when: - - lookupip.stdout != '127.0.0.1' - - lookupip.stdout not in ansible_all_ipv4_addresses diff --git a/playbooks/common/openshift-etcd/ca.yml b/playbooks/common/openshift-etcd/ca.yml deleted file mode 100644 index ac5543be9..000000000 --- a/playbooks/common/openshift-etcd/ca.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_facts - tasks: - - include_role: - name: etcd - tasks_from: ca - vars: - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - when: - - etcd_ca_setup | default(True) | bool diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml deleted file mode 100644 index eb6b94f33..000000000 --- a/playbooks/common/openshift-etcd/certificates.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: server_certificates.yml - -- include: master_etcd_certificates.yml diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml deleted file mode 100644 index 48d46bbb0..000000000 --- a/playbooks/common/openshift-etcd/config.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- name: etcd Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set etcd install 'In Progress' - set_stats: - data: - installer_phase_etcd: "In Progress" - aggregate: false - -- include: ca.yml - -- include: certificates.yml - -- name: Configure etcd - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: os_firewall - - role: openshift_etcd - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - - role: nickhammond.logrotate - -- name: etcd Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set etcd install 'Complete' - set_stats: - data: - installer_phase_etcd: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml deleted file mode 100644 index b16b78c4f..000000000 --- a/playbooks/common/openshift-etcd/embedded2external.yml +++ /dev/null @@ -1,172 +0,0 @@ ---- -- name: Pre-migrate checks - hosts: localhost - tasks: - # Check there is only one etcd host - - assert: - that: groups.oo_etcd_to_config | default([]) | length == 1 - msg: "[etcd] group must contain only one host" - # Check there is only one master - - assert: - that: groups.oo_masters_to_config | default([]) | length == 1 - msg: "[master] group must contain only one host" - -# 1. stop a master -- name: Prepare masters for etcd data migration - hosts: oo_first_master - roles: - - role: openshift_facts - tasks: - - name: Check the master API is ready - include_role: - name: openshift_master - tasks_from: check_master_api_is_ready - - set_fact: - master_service: "{{ openshift.common.service_type + '-master' }}" - embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - - debug: - msg: "master service name: {{ master_service }}" - - name: Stop master - service: - name: "{{ master_service }}" - state: stopped - # 2. backup embedded etcd - # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 - - include_role: - name: etcd - tasks_from: backup - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_embedded_etcd: "{{ true }}" - r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" - - - include_role: - name: etcd - tasks_from: backup.archive - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_embedded_etcd: "{{ true }}" - r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" - -# 3. deploy certificates (for etcd and master) -- include: ca.yml - -- include: server_certificates.yml - -- name: Backup etcd client certificates for master host - hosts: oo_first_master - tasks: - - include_role: - name: etcd - tasks_from: backup_master_etcd_certificates - -- name: Redeploy master etcd certificates - include: master_etcd_certificates.yml - vars: - etcd_certificates_redeploy: "{{ true }}" - -# 4. deploy external etcd -- include: ../openshift-etcd/config.yml - -# 5. stop external etcd -- name: Cleanse etcd - hosts: oo_etcd_to_config[0] - gather_facts: no - pre_tasks: - - include_role: - name: etcd - tasks_from: disable_etcd - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - - include_role: - name: etcd - tasks_from: clean_data - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -# 6. copy the embedded etcd backup to the external host -# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory -- name: Copy embedded etcd backup to the external host - hosts: localhost - tasks: - - name: Create local temp directory for syncing etcd backup - local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX - register: g_etcd_client_mktemp - changed_when: False - become: no - - - include_role: - name: etcd - tasks_from: backup.fetch - vars: - r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}" - etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_embedded_etcd: "{{ true }}" - r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - delegate_to: "{{ groups.oo_first_master[0] }}" - - - include_role: - name: etcd - tasks_from: backup.copy - vars: - r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}" - etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - delegate_to: "{{ groups.oo_etcd_to_config[0] }}" - - - debug: - msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}" - - - name: Delete temporary directory - local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent - changed_when: False - become: no - -# 7. force new cluster from the backup -- name: Force new etcd cluster - hosts: oo_etcd_to_config[0] - tasks: - - include_role: - name: etcd - tasks_from: backup.unarchive - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - - - include_role: - name: etcd - tasks_from: backup.force_new_cluster - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_backup_tag: pre-migrate - r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - etcd_peer: "{{ openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" - -# 8. re-configure master to use the external etcd -- name: Configure master to use external etcd - hosts: oo_first_master - tasks: - - include_role: - name: openshift_master - tasks_from: configure_external_etcd - vars: - etcd_peer_url_scheme: "https" - etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}" - etcd_peer_port: 2379 - - # 9. start the master - - name: Start master - service: - name: "{{ master_service }}" - state: started - register: service_status - until: service_status.state is defined and service_status.state == "started" - retries: 5 - delay: 10 diff --git a/playbooks/common/openshift-etcd/filter_plugins b/playbooks/common/openshift-etcd/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-etcd/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-etcd/lookup_plugins b/playbooks/common/openshift-etcd/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-etcd/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml deleted file mode 100644 index 0a25aac57..000000000 --- a/playbooks/common/openshift-etcd/master_etcd_certificates.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create etcd client certificates for master hosts - hosts: oo_masters_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml deleted file mode 100644 index 31362f2f6..000000000 --- a/playbooks/common/openshift-etcd/migrate.yml +++ /dev/null @@ -1,169 +0,0 @@ ---- -- name: Check if the master has embedded etcd - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - fail: - msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first." - when: - - groups.oo_etcd_to_config | default([]) | length == 0 - -- name: Run pre-checks - hosts: oo_etcd_to_migrate - tasks: - - include_role: - name: etcd - tasks_from: migrate.pre_check - vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ ansible_default_ipv4.address }}" - -# TODO: This will be different for release-3.6 branch -- name: Prepare masters for etcd data migration - hosts: oo_masters_to_config - tasks: - - set_fact: - master_services: - - "{{ openshift.common.service_type + '-master-controllers' }}" - - "{{ openshift.common.service_type + '-master-api' }}" - - debug: - msg: "master service name: {{ master_services }}" - - name: Stop masters - service: - name: "{{ item }}" - state: stopped - with_items: "{{ master_services }}" - -- name: Backup v2 data - hosts: oo_etcd_to_migrate - gather_facts: no - roles: - - role: openshift_facts - post_tasks: - - include_role: - name: etcd - tasks_from: backup - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_backup_tag: pre-migration - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - -- name: Gate on etcd backup - hosts: localhost - connection: local - become: no - tasks: - - set_fact: - etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.oo_etcd_to_migrate) - | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - - set_fact: - etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) | list }}" - - fail: - msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" - when: - - etcd_backup_failed | length > 0 - -- name: Stop etcd - hosts: oo_etcd_to_migrate - gather_facts: no - pre_tasks: - - include_role: - name: etcd - tasks_from: disable_etcd - vars: - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -- name: Migrate data on first etcd - hosts: oo_etcd_to_migrate[0] - gather_facts: no - tasks: - - include_role: - name: etcd - tasks_from: migrate - vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" - -- name: Clean data stores on remaining etcd hosts - hosts: oo_etcd_to_migrate[1:] - gather_facts: no - tasks: - - include_role: - name: etcd - tasks_from: clean_data - vars: - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - etcd_peer: "{{ openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" - - name: Add etcd hosts - delegate_to: localhost - add_host: - name: "{{ item }}" - groups: oo_new_etcd_to_config - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}" - changed_when: no - - name: Set success - set_fact: - r_etcd_migrate_success: true - -- include: ./scaleup.yml - -- name: Gate on etcd migration - hosts: oo_masters_to_config - gather_facts: no - tasks: - - set_fact: - etcd_migration_completed: "{{ hostvars - | oo_select_keys(groups.oo_etcd_to_migrate) - | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" - - set_fact: - etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) | list }}" - -- name: Add TTLs on the first master - hosts: oo_first_master[0] - tasks: - - include_role: - name: etcd - tasks_from: migrate.add_ttls - vars: - etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}" - etcd_url_scheme: "https" - etcd_peer_url_scheme: "https" - when: etcd_migration_failed | length == 0 - -- name: Configure masters if etcd data migration is succesfull - hosts: oo_masters_to_config - tasks: - - include_role: - name: etcd - tasks_from: migrate.configure_master - when: etcd_migration_failed | length == 0 - - debug: - msg: "Skipping master re-configuration since migration failed." - when: - - etcd_migration_failed | length > 0 - - name: Start master services - service: - name: "{{ item }}" - state: started - register: service_status - # Sometimes the master-api, resp. master-controllers fails to start for the first time - until: service_status.state is defined and service_status.state == "started" - retries: 5 - delay: 10 - with_items: "{{ master_services[::-1] }}" - - fail: - msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" - when: - - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml deleted file mode 100644 index 5eaea5ae8..000000000 --- a/playbooks/common/openshift-etcd/restart.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Restart etcd - hosts: oo_etcd_to_config - serial: 1 - tasks: - - name: restart etcd - service: - name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" - state: restarted - when: - - not g_etcd_certificates_expired | default(false) | bool - -- name: Restart etcd - hosts: oo_etcd_to_config - tasks: - - name: stop etcd - service: - name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" - state: stopped - when: - - g_etcd_certificates_expired | default(false) | bool - - name: start etcd - service: - name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" - state: started - when: - - g_etcd_certificates_expired | default(false) | bool diff --git a/playbooks/common/openshift-etcd/roles b/playbooks/common/openshift-etcd/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-etcd/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml deleted file mode 100644 index 20061366c..000000000 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ /dev/null @@ -1,83 +0,0 @@ ---- -- name: Gather facts - hosts: oo_etcd_to_config:oo_new_etcd_to_config - roles: - - openshift_etcd_facts - post_tasks: - - set_fact: - etcd_hostname: "{{ etcd_hostname }}" - etcd_ip: "{{ etcd_ip }}" - -- name: Configure etcd - hosts: oo_new_etcd_to_config - serial: 1 - any_errors_fatal: true - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - pre_tasks: - - name: Add new etcd members to cluster - command: > - /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} - --key-file {{ etcd_peer_key_file }} - --ca-file {{ etcd_peer_ca_file }} - -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_ip }}:{{ etcd_client_port }} - member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} - delegate_to: "{{ etcd_ca_host }}" - failed_when: - - etcd_add_check.rc == 1 - - ("peerURL exists" not in etcd_add_check.stderr) - register: etcd_add_check - retries: 3 - delay: 10 - until: etcd_add_check.rc == 0 - - include_role: - name: etcd - tasks_from: server_certificates - vars: - etcd_peers: "{{ groups.oo_new_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_new_etcd_to_config | default([], true) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - roles: - - role: os_firewall - when: etcd_add_check.rc == 0 - - role: openshift_etcd - when: etcd_add_check.rc == 0 - etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_initial_cluster_state: "existing" - etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" - etcd_ca_setup: False - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - - role: nickhammond.logrotate - when: etcd_add_check.rc == 0 - post_tasks: - - name: Verify cluster is stable - command: > - /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} - --key-file {{ etcd_peer_key_file }} - --ca-file {{ etcd_peer_ca_file }} - -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} - cluster-health - register: scaleup_health - retries: 3 - delay: 30 - until: scaleup_health.rc == 0 - -- name: Update master etcd client urls - hosts: oo_masters_to_config - serial: 1 - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) )) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" - roles: - - role: openshift_master_facts - post_tasks: - - include_role: - name: openshift_master - tasks_from: update_etcd_client_urls diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml deleted file mode 100644 index 10e06747b..000000000 --- a/playbooks/common/openshift-etcd/server_certificates.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Create etcd server certificates for etcd hosts - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_facts - post_tasks: - - include_role: - name: etcd - tasks_from: server_certificates - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml deleted file mode 100644 index c2ae5f313..000000000 --- a/playbooks/common/openshift-glusterfs/config.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: GlusterFS Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set GlusterFS install 'In Progress' - set_stats: - data: - installer_phase_glusterfs: "In Progress" - aggregate: false - -- name: Open firewall ports for GlusterFS nodes - hosts: glusterfs - tasks: - - include_role: - name: openshift_storage_glusterfs - tasks_from: firewall.yml - when: - - openshift_storage_glusterfs_is_native | default(True) | bool - - include_role: - name: openshift_storage_glusterfs - tasks_from: kernel_modules.yml - when: - - openshift_storage_glusterfs_is_native | default(True) | bool - -- name: Open firewall ports for GlusterFS registry nodes - hosts: glusterfs_registry - tasks: - - include_role: - name: openshift_storage_glusterfs - tasks_from: firewall.yml - when: - - openshift_storage_glusterfs_registry_is_native | default(True) | bool - - include_role: - name: openshift_storage_glusterfs - tasks_from: kernel_modules.yml - when: - - openshift_storage_glusterfs_registry_is_native | default(True) | bool - -- name: Configure GlusterFS - hosts: oo_first_master - tasks: - - name: setup glusterfs - include_role: - name: openshift_storage_glusterfs - when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- name: GlusterFS Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set GlusterFS install 'Complete' - set_stats: - data: - installer_phase_glusterfs: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-glusterfs/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-glusterfs/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml deleted file mode 100644 index 80cf7529e..000000000 --- a/playbooks/common/openshift-glusterfs/registry.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- include: config.yml - -- name: Initialize GlusterFS registry PV and PVC vars - hosts: oo_first_master - tags: hosted - tasks: - - set_fact: - glusterfs_pv: [] - glusterfs_pvc: [] - - - set_fact: - glusterfs_pv: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - storage: - glusterfs: - endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" - path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" - readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" - glusterfs_pvc: - - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" - capacity: "{{ openshift.hosted.registry.storage.volume.size }}" - access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" - when: openshift.hosted.registry.storage.glusterfs.swap - -- name: Create persistent volumes - hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" - roles: - - role: openshift_persistent_volumes - when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 - -- name: Create Hosted Resources - hosts: oo_first_master - tags: - - hosted - pre_tasks: - - set_fact: - openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - roles: - - role: openshift_hosted diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-glusterfs/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml deleted file mode 100644 index 2a703cb61..000000000 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Load Balancer Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set load balancer install 'In Progress' - set_stats: - data: - installer_phase_loadbalancer: "In Progress" - aggregate: false - -- name: Configure firewall and docker for load balancers - hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config - vars: - openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" - roles: - - role: os_firewall - - role: openshift_docker - when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool - -- name: Configure load balancers - hosts: oo_lb_to_config - vars: - openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8443) - | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']), - openshift_use_nuage | default(false), - nuage_mon_rest_server_port | default(none))) - + openshift_loadbalancer_additional_frontends | default([]) }}" - openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8443) - | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']), - openshift_use_nuage | default(false), - nuage_mon_rest_server_port | default(none))) - + openshift_loadbalancer_additional_backends | default([]) }}" - openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" - roles: - - role: openshift_loadbalancer - - role: tuned - -- name: Load Balancer Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set load balancer install 'Complete' - set_stats: - data: - installer_phase_loadbalancer: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-loadbalancer/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-loadbalancer/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-loadbalancer/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-loadbalancer/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml deleted file mode 100644 index facb3a5b9..000000000 --- a/playbooks/common/openshift-management/add_container_provider.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Add Container Provider to Management - hosts: oo_first_master - tasks: - - name: Run the Management Integration Tasks - include_role: - name: openshift_management - tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml deleted file mode 100644 index 908679e81..000000000 --- a/playbooks/common/openshift-management/config.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Management Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Management install 'In Progress' - set_stats: - data: - installer_phase_management: "In Progress" - aggregate: false - -- name: Setup CFME - hosts: oo_first_master - pre_tasks: - - name: Create a temporary place to evaluate the PV templates - command: mktemp -d /tmp/openshift-ansible-XXXXXXX - register: r_openshift_management_mktemp - changed_when: false - - tasks: - - name: Run the CFME Setup Role - include_role: - name: openshift_management - vars: - template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" - -- name: Management Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Management install 'Complete' - set_stats: - data: - installer_phase_management: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-management/filter_plugins b/playbooks/common/openshift-management/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-management/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-management/library b/playbooks/common/openshift-management/library deleted file mode 120000 index ba40d2f56..000000000 --- a/playbooks/common/openshift-management/library +++ /dev/null @@ -1 +0,0 @@ -../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-management/roles b/playbooks/common/openshift-management/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/common/openshift-management/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml deleted file mode 100644 index 9f35cc276..000000000 --- a/playbooks/common/openshift-management/uninstall.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Uninstall CFME - hosts: masters[0] - tasks: - - name: Run the CFME Uninstall Role Tasks - include_role: - name: openshift_management - tasks_from: uninstall diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml deleted file mode 100644 index 350557f19..000000000 --- a/playbooks/common/openshift-master/additional_config.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Master Additional Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Master Additional install 'In Progress' - set_stats: - data: - installer_phase_master_additional: "In Progress" - aggregate: false - -- name: Additional master configuration - hosts: oo_first_master - vars: - cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" - etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" - omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" - roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - role: openshift_examples - when: openshift_install_examples | default(true, true) | bool - registry_url: "{{ openshift.master.registry_url }}" - - role: openshift_hosted_templates - registry_url: "{{ openshift.master.registry_url }}" - - role: openshift_manageiq - when: openshift_use_manageiq | default(true) | bool - - role: cockpit - when: - - not openshift.common.is_atomic | bool - - deployment_type == 'openshift-enterprise' - - osm_use_cockpit is undefined or osm_use_cockpit | bool - - openshift.common.deployment_subtype != 'registry' - - role: flannel_register - when: openshift_use_flannel | default(false) | bool - -- name: Master Additional Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Master Additional install 'Complete' - set_stats: - data: - installer_phase_master_additional: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml deleted file mode 100644 index f6afbc36f..000000000 --- a/playbooks/common/openshift-master/certificates.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create OpenShift certificates for master hosts - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - roles: - - role: openshift_master_facts - - role: openshift_named_certificates - - role: openshift_ca - - role: openshift_master_certificates - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml deleted file mode 100644 index b359919ba..000000000 --- a/playbooks/common/openshift-master/config.yml +++ /dev/null @@ -1,242 +0,0 @@ ---- -- name: Master Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Master install 'In Progress' - set_stats: - data: - installer_phase_master: "In Progress" - aggregate: false - -- include: certificates.yml - -- name: Disable excluders - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Gather and set facts for master hosts - hosts: oo_masters_to_config - pre_tasks: - # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 - # - # When scaling up a cluster upgraded from OCP <= 3.5, ensure that - # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing - # masters, or absent if such is the case. - - name: Detect if this host is a new master in a scale up - set_fact: - g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" - - - name: Scaleup Detection - debug: - var: g_openshift_master_is_scaleup - - - name: Check for RPM generated config marker file .config_managed - stat: - path: /etc/origin/.config_managed - register: rpmgenerated_config - - - name: Remove RPM generated config files if present - file: - path: "/etc/origin/{{ item }}" - state: absent - when: - - rpmgenerated_config.stat.exists == true - - deployment_type == 'openshift-enterprise' - with_items: - - master - - node - - .config_managed - - - set_fact: - openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] - | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - roles: - - openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - api_port: "{{ openshift_master_api_port | default(None) }}" - api_url: "{{ openshift_master_api_url | default(None) }}" - api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" - controllers_port: "{{ openshift_master_controllers_port | default(None) }}" - public_api_url: "{{ openshift_master_public_api_url | default(None) }}" - cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" - cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - console_path: "{{ openshift_master_console_path | default(None) }}" - console_port: "{{ openshift_master_console_port | default(None) }}" - console_url: "{{ openshift_master_console_url | default(None) }}" - console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" - public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" - master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - -- name: Inspect state of first master config settings - hosts: oo_first_master - roles: - - role: openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" - session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" - - name: Check for existing configuration - stat: - path: /etc/origin/master/master-config.yaml - register: master_config_stat - - - name: Set clean install fact - set_fact: - l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" - - - name: Determine if etcd3 storage is in use - command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q - register: etcd3_grep - failed_when: false - changed_when: false - - - name: Set etcd3 fact - set_fact: - l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - - - name: Check if atomic-openshift-master sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master - register: l_aom_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master - register: l_default_registry_defined - when: l_aom_exists.stat.exists | bool - - - name: Check if atomic-openshift-master-api sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master-api - register: l_aom_api_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api - register: l_default_registry_defined_api - when: l_aom_api_exists.stat.exists | bool - - - name: Check if atomic-openshift-master-controllers sysconfig exists yet - stat: - path: /etc/sysconfig/atomic-openshift-master-controllers - register: l_aom_controllers_exists - - - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present - command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers - register: l_default_registry_defined_controllers - when: l_aom_controllers_exists.stat.exists | bool - - - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value - set_fact: - l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" - l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" - l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - -- name: Generate master session secrets - hosts: oo_first_master - vars: - g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" - g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" - g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" - roles: - - role: openshift_facts - tasks: - - openshift_facts: - role: master - local_facts: - session_auth_secrets: "{{ g_session_auth_secrets }}" - session_encryption_secrets: "{{ g_session_encryption_secrets }}" - when: not g_session_secrets_present | bool - -- name: Configure masters - hosts: oo_masters_to_config - any_errors_fatal: true - vars: - openshift_master_ha: "{{ openshift.master.ha }}" - openshift_master_count: "{{ openshift.master.master_count }}" - openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" - openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.ip') | default([]) | join(',') - }}" - roles: - - role: os_firewall - - role: openshift_master_facts - - role: openshift_hosted_facts - - role: openshift_clock - - role: openshift_cloud_provider - - role: openshift_builddefaults - - role: openshift_buildoverrides - - role: nickhammond.logrotate - - role: contiv - contiv_role: netmaster - when: openshift_use_contiv | default(False) | bool - - role: openshift_master - openshift_master_hosts: "{{ groups.oo_masters_to_config }}" - r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" - r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" - openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" - openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" - openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" - openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" - - role: tuned - - role: nuage_ca - when: openshift_use_nuage | default(false) | bool - - role: nuage_common - when: openshift_use_nuage | default(false) | bool - - role: nuage_master - when: openshift_use_nuage | default(false) | bool - - role: calico_master - when: openshift_use_calico | default(false) | bool - tasks: - - include_role: - name: kuryr - tasks_from: master - when: openshift_use_kuryr | default(false) | bool - - post_tasks: - - name: Create group for deployment type - group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} - changed_when: False - -- name: Configure API Aggregation on masters - hosts: oo_masters - serial: 1 - tasks: - - include: tasks/wire_aggregator.yml - -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Master Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Master install 'Complete' - set_stats: - data: - installer_phase_master: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js deleted file mode 100644 index d0a9f11dc..000000000 --- a/playbooks/common/openshift-master/files/openshift-ansible-catalog-console.js +++ /dev/null @@ -1,2 +0,0 @@ -// empty file so that the master-config can still point to a file that exists -// this file will be replaced by the template service broker role if enabled diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library deleted file mode 120000 index d0b7393d3..000000000 --- a/playbooks/common/openshift-master/library +++ /dev/null @@ -1 +0,0 @@ -../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-master/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml deleted file mode 100644 index 4d73b8124..000000000 --- a/playbooks/common/openshift-master/restart.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- include: validate_restart.yml - -- name: Restart masters - hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" - serial: 1 - handlers: - - include: ../../../roles/openshift_master/handlers/main.yml - static: yes - roles: - - openshift_facts - post_tasks: - - include: restart_hosts.yml - when: openshift_rolling_restart_mode | default('services') == 'system' - - - include: restart_services.yml - when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml deleted file mode 100644 index a5dbe0590..000000000 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Restart master system - # https://github.com/ansible/ansible/issues/10616 - shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" - async: 1 - poll: 0 - ignore_errors: true - become: yes - -# WARNING: This process is riddled with weird behavior. - -# Workaround for https://github.com/ansible/ansible/issues/21269 -- set_fact: - wait_for_host: "{{ ansible_host }}" - -# Ansible's blog documents this *without* the port, which appears to now -# just wait until the timeout value and then proceed without checking anything. -# port is now required. -# -# However neither ansible_ssh_port or ansible_port are reliably defined, likely -# only if overridden. Assume a default of 22. -- name: Wait for master to restart - local_action: - module: wait_for - host="{{ wait_for_host }}" - state=started - delay=10 - timeout=600 - port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" - become: no - -# Now that ssh is back up we can wait for API on the remote system, -# avoiding some potential connection issues from local system: -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml deleted file mode 100644 index 4f8b758fd..000000000 --- a/playbooks/common/openshift-master/restart_services.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Restart master API - service: - name: "{{ openshift.common.service_type }}-master-api" - state: restarted - when: openshift_master_ha | bool -- name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml deleted file mode 100644 index f4dc9df8a..000000000 --- a/playbooks/common/openshift-master/scaleup.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -- name: Update master count - hosts: oo_masters:!oo_masters_to_config - serial: 1 - roles: - - openshift_facts - post_tasks: - - openshift_facts: - role: master - local_facts: - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" - master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - - name: Update master count - modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.masterCount' - yaml_value: "{{ openshift.master.master_count }}" - notify: - - restart master api - - restart master controllers - handlers: - - name: restart master api - service: name={{ openshift.common.service_type }}-master-controllers state=restarted - notify: verify api server - - name: restart master controllers - service: name={{ openshift.common.service_type }}-master-controllers state=restarted - - name: verify api server - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - -- include: ../openshift-master/set_network_facts.yml - -- include: ../openshift-etcd/certificates.yml - -- include: ../openshift-master/config.yml - -- include: ../openshift-loadbalancer/config.yml - -- include: ../openshift-node/certificates.yml - -- include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml deleted file mode 100644 index 9a6cf26fc..000000000 --- a/playbooks/common/openshift-master/set_network_facts.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Read first master\'s config - hosts: oo_first_master - gather_facts: no - tasks: - - stat: - path: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_stat - - slurp: - src: "{{ openshift.common.config_base }}/master/master-config.yaml" - register: g_master_config_slurp - -- name: Set network facts for masters - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - post_tasks: - - block: - - set_fact: - osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" - when: osm_cluster_network_cidr is not defined - - set_fact: - osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" - when: osm_host_subnet_length is not defined - - set_fact: - openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" - when: openshift_portal_net is not defined - - openshift_facts: - role: common - local_facts: - portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" - when: - - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml deleted file mode 100644 index 560eea785..000000000 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ /dev/null @@ -1,215 +0,0 @@ ---- -- name: Make temp cert dir - command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX - register: certtemp - changed_when: False - -- name: Check for First Master Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_crt - changed_when: false - delegate_to: "{{ groups.oo_first_master.0 }}" - -- name: Check for First Master Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: first_proxy_ca_key - changed_when: false - delegate_to: "{{ groups.oo_first_master.0 }}" - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert - --cert=/etc/origin/master/front-proxy-ca.crt - --key=/etc/origin/master/front-proxy-ca.key - --serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ groups.oo_first_master.0 }}" - when: - - not first_proxy_ca_crt.stat.exists - - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_crt - changed_when: false - -- name: Check for Aggregator Signer key - stat: - path: /etc/origin/master/front-proxy-ca.crt - register: proxy_ca_key - changed_when: false - -- name: Copy Aggregator Signer certs from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - delegate_to: "{{ groups.oo_first_master.0 }}" - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - front-proxy-ca.crt - - front-proxy-ca.key - when: - - not proxy_ca_key.stat.exists - - not proxy_ca_crt.stat.exists - -# oc_adm_ca_server_cert: -# cert: /etc/origin/master/front-proxy-ca.crt -# key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: first_front_proxy_kubeconfig - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - -# create-api-client-config generates a ca.crt file which will -# overwrite the OpenShift CA certificate. Generate the aggregator -# kubeconfig in a temporary directory and then copy files into the -# master config dir to avoid overwriting ca.crt. -- block: - - name: Create first master api-client config for Aggregator - command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config - --certificate-authority=/etc/origin/master/front-proxy-ca.crt - --signer-cert=/etc/origin/master/front-proxy-ca.crt - --signer-key=/etc/origin/master/front-proxy-ca.key - --user aggregator-front-proxy - --client-dir={{ certtemp.stdout }} - --signer-serial=/etc/origin/master/ca.serial.txt - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - - name: Copy first master api-client config for Aggregator - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/" - remote_src: true - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - delegate_to: "{{ groups.oo_first_master.0 }}" - run_once: true - when: - - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config - stat: - path: /etc/origin/master/aggregator-front-proxy.kubeconfig - register: front_proxy_kubeconfig - -- name: Copy api-client config from first master - fetch: - src: "/etc/origin/master/{{ item }}" - dest: "{{ certtemp.stdout }}/{{ item }}" - flat: yes - delegate_to: "{{ groups.oo_first_master.0 }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host - copy: - src: "{{ certtemp.stdout }}/{{ item }}" - dest: "/etc/origin/master/{{ item }}" - with_items: - - aggregator-front-proxy.crt - - aggregator-front-proxy.key - - aggregator-front-proxy.kubeconfig - when: - - not front_proxy_kubeconfig.stat.exists - -- name: copy tech preview extension file for service console UI - copy: - src: openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config - yedit: - state: present - src: /etc/origin/master/master-config.yaml - edits: - - key: aggregatorConfig.proxyClientInfo.certFile - value: aggregator-front-proxy.crt - - key: aggregatorConfig.proxyClientInfo.keyFile - value: aggregator-front-proxy.key - - key: authConfig.requestHeader.clientCA - value: front-proxy-ca.crt - - key: authConfig.requestHeader.clientCommonNames - value: [aggregator-front-proxy] - - key: authConfig.requestHeader.usernameHeaders - value: [X-Remote-User] - - key: authConfig.requestHeader.groupHeaders - value: [X-Remote-Group] - - key: authConfig.requestHeader.extraHeaderPrefixes - value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - - key: kubernetesMasterConfig.apiServerArguments.runtime-config - value: [apis/settings.k8s.io/v1alpha1=true] - - key: admissionConfig.pluginConfig.PodPreset.configuration.kind - value: DefaultAdmissionConfig - - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion - value: v1 - - key: admissionConfig.pluginConfig.PodPreset.configuration.disable - value: false - register: yedit_output - -#restart master serially here -- name: restart master api - systemd: name={{ openshift.common.service_type }}-master-api state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: restart master controllers - systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted - when: - - yedit_output.changed - - openshift.master.cluster_method == 'native' - -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - {% if openshift.common.version_gte_3_2_or_1_2 | bool %} - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {% else %} - --cacert {{ openshift.common.config_base }}/master/ca.crt - {% endif %} - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed - -- name: Delete temp directory - file: - name: "{{ certtemp.stdout }}" - state: absent - changed_when: False diff --git a/playbooks/common/openshift-master/validate_restart.yml b/playbooks/common/openshift-master/validate_restart.yml deleted file mode 100644 index 5dbb21502..000000000 --- a/playbooks/common/openshift-master/validate_restart.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Validate configuration for rolling restart - hosts: oo_masters_to_config - roles: - - openshift_facts - tasks: - - fail: - msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" - when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" - - role: master - local_facts: - cluster_method: "{{ openshift_master_cluster_method | default(None) }}" - -# Creating a temp file on localhost, we then check each system that will -# be rebooted to see if that file exists, if so we know we're running -# ansible on a machine that needs a reboot, and we need to error out. -- name: Create temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - local_action: command mktemp - register: mktemp - changed_when: false - -- name: Check if temp file exists on any masters - hosts: oo_masters_to_config - tasks: - - stat: path="{{ hostvars.localhost.mktemp.stdout }}" - register: exists - changed_when: false - -- name: Cleanup temp file on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent - changed_when: false - -- name: Warn if restarting the system where ansible is running - hosts: oo_masters_to_config - tasks: - - pause: - prompt: > - Warning: Running playbook from a host that will be restarted! - Press CTRL+C and A to abort playbook execution. You may - continue by pressing ENTER but the playbook will stop - executing after this system has been restarted and services - must be verified manually. To only restart services, set - openshift_master_rolling_restart_mode=services in host - inventory and relaunch the playbook. - when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' - - set_fact: - current_host: "{{ exists.stat.exists }}" - when: openshift.common.rolling_restart_mode == 'system' diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml deleted file mode 100644 index ce672daf5..000000000 --- a/playbooks/common/openshift-nfs/config.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: NFS Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set NFS install 'In Progress' - set_stats: - data: - installer_phase_nfs: "In Progress" - aggregate: false - -- name: Configure nfs - hosts: oo_nfs_to_config - roles: - - role: os_firewall - - role: openshift_storage_nfs - -- name: NFS Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set NFS install 'Complete' - set_stats: - data: - installer_phase_nfs: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-nfs/filter_plugins b/playbooks/common/openshift-nfs/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-nfs/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-nfs/lookup_plugins b/playbooks/common/openshift-nfs/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-nfs/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-nfs/roles b/playbooks/common/openshift-nfs/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-nfs/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml deleted file mode 100644 index ac757397b..000000000 --- a/playbooks/common/openshift-node/additional_config.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -- name: create additional node network plugin groups - hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}" - tasks: - # Creating these node groups will prevent a ton of skipped tasks. - # Create group for flannel nodes - - group_by: - key: oo_nodes_use_{{ (openshift_use_flannel | default(False)) | ternary('flannel','nothing') }} - changed_when: False - # Create group for calico nodes - - group_by: - key: oo_nodes_use_{{ (openshift_use_calico | default(False)) | ternary('calico','nothing') }} - changed_when: False - # Create group for nuage nodes - - group_by: - key: oo_nodes_use_{{ (openshift_use_nuage | default(False)) | ternary('nuage','nothing') }} - changed_when: False - # Create group for contiv nodes - - group_by: - key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }} - changed_when: False - # Create group for kuryr nodes - - group_by: - key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }} - changed_when: False - -- include: etcd_client_config.yml - vars: - openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr" - -- name: Additional node config - hosts: oo_nodes_use_flannel - roles: - - role: flannel - etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}" - embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" - when: openshift_use_flannel | default(false) | bool - -- name: Additional node config - hosts: oo_nodes_use_calico - roles: - - role: calico - when: openshift_use_calico | default(false) | bool - -- name: Additional node config - hosts: oo_nodes_use_nuage - roles: - - role: nuage_node - when: openshift_use_nuage | default(false) | bool - -- name: Additional node config - hosts: oo_nodes_use_contiv - roles: - - role: contiv - contiv_role: netplugin - when: openshift_use_contiv | default(false) | bool - -- name: Configure Kuryr node - hosts: oo_nodes_use_kuryr - tasks: - - include_role: - name: kuryr - tasks_from: node - when: openshift_use_kuryr | default(false) | bool diff --git a/playbooks/common/openshift-node/certificates.yml b/playbooks/common/openshift-node/certificates.yml deleted file mode 100644 index 908885ee6..000000000 --- a/playbooks/common/openshift-node/certificates.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Create OpenShift certificates for node hosts - hosts: oo_nodes_to_config - gather_facts: no - roles: - - role: openshift_node_certificates - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - when: not openshift_node_bootstrap | default(false) | bool diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml deleted file mode 100644 index 38753d0af..000000000 --- a/playbooks/common/openshift-node/clean_image.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- -- name: Configure nodes - hosts: oo_nodes_to_config:!oo_containerized_master_nodes - tasks: - - name: Remove any ansible facts created during AMI creation - file: - path: "/etc/ansible/facts.d/{{ item }}" - state: absent - with_items: - - openshift.fact diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml deleted file mode 100644 index 4f8f98aef..000000000 --- a/playbooks/common/openshift-node/config.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Node Install Checkpoint Start - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Node install 'In Progress' - set_stats: - data: - installer_phase_node: "In Progress" - aggregate: false - -- include: certificates.yml - -- include: setup.yml - -- include: containerized_nodes.yml - -- include: configure_nodes.yml - -- include: additional_config.yml - -- include: manage_node.yml - -- include: enable_excluders.yml - -- name: Node Install Checkpoint End - hosts: oo_all_hosts - gather_facts: false - tasks: - - name: Set Node install 'Complete' - set_stats: - data: - installer_phase_node: "Complete" - aggregate: false diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml deleted file mode 100644 index 17259422d..000000000 --- a/playbooks/common/openshift-node/configure_nodes.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Configure nodes - hosts: oo_nodes_to_config:!oo_containerized_master_nodes - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - roles: - - role: os_firewall - - role: openshift_node - - role: tuned - - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/containerized_nodes.yml b/playbooks/common/openshift-node/containerized_nodes.yml deleted file mode 100644 index 6fac937e3..000000000 --- a/playbooks/common/openshift-node/containerized_nodes.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- name: Configure containerized nodes - hosts: oo_containerized_master_nodes - serial: 1 - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - - roles: - - role: os_firewall - - role: openshift_node - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-node/enable_excluders.yml b/playbooks/common/openshift-node/enable_excluders.yml deleted file mode 100644 index 5288b14f9..000000000 --- a/playbooks/common/openshift-node/enable_excluders.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: oo_nodes_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/etcd_client_config.yml b/playbooks/common/openshift-node/etcd_client_config.yml deleted file mode 100644 index c3fa38a81..000000000 --- a/playbooks/common/openshift-node/etcd_client_config.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: etcd_client node config - hosts: "{{ openshift_node_scale_up_group | default('this_group_does_not_exist') }}" - roles: - - role: openshift_facts - - role: openshift_etcd_facts - - role: openshift_etcd_client_certificates - etcd_cert_prefix: flannel.etcd- - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml deleted file mode 100644 index 30651a1df..000000000 --- a/playbooks/common/openshift-node/image_prep.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: normalize groups - include: ../../byo/openshift-cluster/initialize_groups.yml - -- name: evaluate the groups - include: ../openshift-cluster/evaluate_groups.yml - -- name: initialize the facts - include: ../openshift-cluster/initialize_facts.yml - -- name: initialize the repositories - include: ../openshift-cluster/initialize_openshift_repos.yml - -- name: run node config setup - include: setup.yml - -- name: run node config - include: configure_nodes.yml - -- name: Re-enable excluders - include: enable_excluders.yml - -- name: Remove any undesired artifacts from build - include: clean_image.yml diff --git a/playbooks/common/openshift-node/lookup_plugins b/playbooks/common/openshift-node/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-node/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-node/manage_node.yml b/playbooks/common/openshift-node/manage_node.yml deleted file mode 100644 index f48a19a9c..000000000 --- a/playbooks/common/openshift-node/manage_node.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -- name: Additional node config - hosts: "{{ openshift_node_scale_up_group | default('oo_nodes_to_config') }}" - vars: - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - roles: - - role: openshift_manage_node - openshift_master_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Create group for deployment type - group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} - changed_when: False diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml deleted file mode 100644 index b3a7399dc..000000000 --- a/playbooks/common/openshift-node/network_manager.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Install and configure NetworkManager - hosts: oo_all_hosts - become: yes - tasks: - - name: install NetworkManager - package: - name: 'NetworkManager' - state: present - - - name: configure NetworkManager - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" - regexp: '^{{ item }}=' - line: '{{ item }}=yes' - state: present - create: yes - with_items: - - 'USE_PEERDNS' - - 'NM_CONTROLLED' - - - name: enable and start NetworkManager - service: - name: 'NetworkManager' - state: started - enabled: yes diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml deleted file mode 100644 index c3beb59b7..000000000 --- a/playbooks/common/openshift-node/restart.yml +++ /dev/null @@ -1,61 +0,0 @@ ---- -- name: Restart nodes - hosts: oo_nodes_to_config - serial: "{{ openshift_restart_nodes_serial | default(1) }}" - - roles: - - lib_openshift - - tasks: - - name: Restart docker - service: - name: docker - state: restarted - register: l_docker_restart_docker_in_node_result - until: not l_docker_restart_docker_in_node_result | failed - retries: 3 - delay: 30 - - - name: Update docker facts - openshift_facts: - role: docker - - - name: Restart containerized services - service: - name: "{{ item }}" - state: started - with_items: - - etcd_container - - openvswitch - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - failed_when: false - when: openshift.common.is_containerized | bool - - - name: Wait for master API to come back online - wait_for: - host: "{{ openshift.common.hostname }}" - state: started - delay: 10 - port: "{{ openshift.master.api_port }}" - timeout: 600 - when: inventory_hostname in groups.oo_masters_to_config - - - name: restart node - service: - name: "{{ openshift.common.service_type }}-node" - state: restarted - - - name: Wait for node to be ready - oc_obj: - state: list - kind: node - name: "{{ openshift.common.hostname | lower }}" - register: node_output - delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config - until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True - # Give the node two minutes to come back online. - retries: 24 - delay: 5 diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-node/setup.yml b/playbooks/common/openshift-node/setup.yml deleted file mode 100644 index 794c03a67..000000000 --- a/playbooks/common/openshift-node/setup.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Disable excluders - hosts: oo_nodes_to_config - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Evaluate node groups - hosts: localhost - become: no - connection: local - tasks: - - name: Evaluate oo_containerized_master_nodes - add_host: - name: "{{ item }}" - groups: oo_containerized_master_nodes - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: - - hostvars[item].openshift is defined - - hostvars[item].openshift.common is defined - - hostvars[item].openshift.common.is_containerized | bool - - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) - changed_when: False |