diff options
Diffstat (limited to 'playbooks/common')
47 files changed, 1089 insertions, 371 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 1ac78468a..a34322754 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -17,6 +17,7 @@ - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" when: openshift.common.install_examples | bool - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool @@ -27,30 +28,5 @@ (osm_use_cockpit | bool or osm_use_cockpit is undefined ) - role: flannel_register when: openshift.common.use_flannel | bool - - role: pods - when: openshift.common.deployment_type == 'online' - - role: os_env_extras - when: openshift.common.deployment_type == 'online' -- name: Create persistent volumes and create hosted services - hosts: oo_first_master - vars: - attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" - deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - roles: - - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 - - role: openshift_serviceaccounts - openshift_serviceaccounts_names: - - router - - registry - openshift_serviceaccounts_namespace: default - openshift_serviceaccounts_sccs: - - privileged - - role: openshift_router - when: deploy_infra | bool - - role: openshift_registry - registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" - when: deploy_infra | bool and attach_registry_volume | bool + diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 23c8f039e..5fec11541 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,16 +1,42 @@ --- - include: evaluate_groups.yml +- include: initialize_facts.yml + - include: validate_hostnames.yml -- include: ../openshift-docker/config.yml +- name: Set oo_options + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" + when: openshift_docker_additional_registries is not defined + - set_fact: + openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" + when: openshift_docker_insecure_registries is not defined + - set_fact: + openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" + when: openshift_docker_blocked_registries is not defined + - set_fact: + openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" + when: openshift_docker_options is not defined + - set_fact: + openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" + when: openshift_docker_log_driver is not defined + - set_fact: + openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" + when: openshift_docker_log_options is not defined - include: ../openshift-etcd/config.yml - include: ../openshift-nfs/config.yml +- include: ../openshift-loadbalancer/config.yml + - include: ../openshift-master/config.yml - include: additional_config.yml - include: ../openshift-node/config.yml + +- include: openshift_hosted.yml diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml new file mode 100644 index 000000000..f2bcc872f --- /dev/null +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -0,0 +1,66 @@ +--- +- include: evaluate_groups.yml + +- name: Load openshift_facts + hosts: oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + post_tasks: + - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1" + when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool + +- name: Reconfigure masters to listen on our new dns_port + hosts: oo_masters_to_config + handlers: + - include: ../../../roles/openshift_master/handlers/main.yml + vars: + os_firewall_allow: + - service: skydns tcp + port: "{{ openshift.master.dns_port }}/tcp" + - service: skydns udp + port: "{{ openshift.master.dns_port }}/udp" + roles: + - os_firewall + tasks: + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + use_dnsmasq: True + - role: master + local_facts: + dns_port: '8053' + - modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: dnsConfig.bindAddress + yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" + notify: restart master + - meta: flush_handlers + +- name: Configure nodes for dnsmasq + hosts: oo_nodes_to_config + handlers: + - include: ../../../roles/openshift_node/handlers/main.yml + pre_tasks: + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + use_dnsmasq: True + - role: node + local_facts: + dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" + vars: + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_node_dnsmasq + post_tasks: + - modify_yaml: + dest: "{{ openshift.common.config_base }}/node/node-config.yaml" + yaml_key: dnsIP + yaml_value: "{{ openshift.node.dns_ip }}" + notify: restart node diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 432a92b49..c5273b08f 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -29,12 +29,20 @@ msg: The nfs group must be limited to one host when: (groups[g_nfs_hosts] | default([])) | length > 1 + - name: Evaluate oo_all_hosts + add_host: + name: "{{ item }}" + groups: oo_all_hosts + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: g_all_hosts | default([]) + - name: Evaluate oo_masters add_host: name: "{{ item }}" groups: oo_masters ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" - name: Evaluate oo_etcd_to_config @@ -42,7 +50,7 @@ name: "{{ item }}" groups: oo_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_etcd_hosts | default([]) }}" - name: Evaluate oo_masters_to_config @@ -50,7 +58,7 @@ name: "{{ item }}" groups: oo_masters_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" - name: Evaluate oo_nodes_to_config @@ -58,7 +66,7 @@ name: "{{ item }}" groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}" # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is @@ -67,7 +75,7 @@ name: "{{ item }}" groups: oo_nodes_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | default([]) }}" when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined @@ -83,7 +91,7 @@ name: "{{ g_master_hosts[0] }}" groups: oo_first_master ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" when: g_master_hosts|length > 0 - name: Evaluate oo_lb_to_config @@ -91,7 +99,7 @@ name: "{{ item }}" groups: oo_lb_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_lb_hosts | default([]) }}" - name: Evaluate oo_nfs_to_config @@ -99,5 +107,5 @@ name: "{{ item }}" groups: oo_nfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_nfs_hosts | default([]) }}" diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml new file mode 100644 index 000000000..37f523246 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -0,0 +1,11 @@ +--- +- name: Initialize host facts + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_facts + tasks: + - openshift_facts: + role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/common/openshift-cluster/library @@ -0,0 +1 @@ +../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml new file mode 100644 index 000000000..811b3d685 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -0,0 +1,30 @@ +- name: Create persistent volumes and create hosted services + hosts: oo_first_master + vars: + attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}" + deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}" + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 + - role: openshift_serviceaccounts + openshift_serviceaccounts_names: + - router + - registry + openshift_serviceaccounts_namespace: default + openshift_serviceaccounts_sccs: + - privileged + - role: openshift_registry + registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim" + when: deploy_infra | bool and attach_registry_volume | bool + - role: openshift_metrics + when: openshift.hosted.metrics.deploy | bool + +- name: Create Hosted Resources + hosts: oo_first_master + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 1474bb3ca..e3d16d359 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -1,8 +1,15 @@ --- +- include: evaluate_groups.yml + - hosts: oo_hosts_to_update vars: openshift_deployment_type: "{{ deployment_type }}" roles: + # Explicitly calling openshift_facts because it appears that when + # rhel_subscribe is skipped that the openshift_facts dependency for + # openshift_repos is also skipped (this is the case at least for Ansible + # 2.0.2) + - openshift_facts - role: rhel_subscribe when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and ansible_distribution == "RedHat" and diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh new file mode 100644 index 000000000..9bbeff660 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Here we don't really care if this is a master, api, controller or node image. +# We just need to know the version of one of them. +unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1) + +if [ ${1} == "origin" ]; then + image_name="openshift/origin" +elif grep aep $unit_file 2>&1 > /dev/null; then + image_name="aep3/node" +elif grep openshift3 $unit_file 2>&1 > /dev/null; then + image_name="openshift3/node" +fi + +installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + +docker pull ${image_name} 2>&1 > /dev/null +available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v') + +echo "---" +echo "curr_version: ${installed}" +echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh new file mode 100644 index 000000000..7bf249742 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh @@ -0,0 +1,12 @@ +#!/bin/bash +if [ `which dnf 2> /dev/null` ]; then + installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) + available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null) +else + installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null) + available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null) +fi + +echo "---" +echo "curr_version: ${installed}" +echo "avail_version: ${available}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh deleted file mode 100644 index 3a1a8ebb1..000000000 --- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ') - -yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ') - -echo "---" -echo "curr_version: ${yum_installed}" -echo "avail_version: ${yum_available}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 63c8ef756..e31e7f8a3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -36,16 +36,17 @@ - name: Ensure AOS 3.0.2 or Origin 1.0.6 hosts: oo_first_master tasks: - fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later + - fail: + msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later" when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) - name: Update cluster policy hosts: oo_first_master tasks: - - name: oadm policy reconcile-cluster-roles --confirm + - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm command: > {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --confirm + policy reconcile-cluster-roles --additive-only=true --confirm - name: Upgrade default router hosts: oo_first_master @@ -108,5 +109,6 @@ vars: openshift_examples_import_command: "update" openshift_deployment_type: "{{ deployment_type }}" + registry_url: "{{ openshift.master.registry_url }}" roles: - openshift_examples diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 0fb38f32e..c3c1240d8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -54,7 +54,7 @@ - script: ../files/pre-upgrade-check -- name: Verify upgrade can proceed +- name: Verify upgrade targets hosts: oo_masters_to_config:oo_nodes_to_config vars: target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}" @@ -66,7 +66,7 @@ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - name: Determine available versions - script: ../files/versions.sh {{ g_new_service_name }} openshift + script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift register: g_versions_result - set_fact: @@ -212,13 +212,10 @@ - name: Update deployment type hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + vars: + openshift_deployment_type: "{{ deployment_type }}" roles: - openshift_facts - post_tasks: - - openshift_facts: - role: common - local_facts: - deployment_type: "{{ deployment_type }}" - name: Update master facts hosts: oo_masters_to_config @@ -493,7 +490,7 @@ - name: Reconcile Cluster Roles command: > {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --confirm + policy reconcile-cluster-roles --additive-only=true --confirm run_once: true - name: Reconcile Cluster Role Bindings @@ -572,6 +569,7 @@ # Update the existing templates - role: openshift_examples openshift_examples_import_command: replace + registry_url: "{{ openshift.master.registry_url }}" pre_tasks: - name: Collect all routers command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml index 196393b2a..f030eed18 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -19,6 +19,7 @@ # Update the existing templates - role: openshift_examples openshift_examples_import_command: replace + registry_url: "{{ openshift.master.registry_url }}" pre_tasks: - name: Collect all routers command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml index 12b9c84d3..85d7073f2 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml @@ -29,19 +29,20 @@ valid version for a {{ target_version }} upgrade when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') -- name: Verify upgrade can proceed +- name: Verify upgrade targets hosts: oo_masters_to_config:oo_nodes_to_config vars: target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}" tasks: - name: Clean package cache command: "{{ ansible_pkg_mgr }} clean all" + when: not openshift.common.is_atomic | bool - set_fact: g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - name: Determine available versions - script: ../files/versions.sh {{ g_new_service_name }} + script: ../files/rpm_versions.sh {{ g_new_service_name }} register: g_versions_result - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml index 54bb251f7..e5cfa58aa 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml @@ -12,7 +12,7 @@ openshift_version: "{{ openshift_pkg_version | default('') }}" tasks: - name: Upgrade master packages - command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}" + command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}" when: not openshift.common.is_containerized | bool - name: Ensure python-yaml present for config upgrade @@ -63,7 +63,7 @@ - openshift_facts tasks: - name: Upgrade node packages - command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}" + command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}" when: not openshift.common.is_containerized | bool - name: Restart node service @@ -103,7 +103,7 @@ - name: Reconcile Cluster Roles command: > {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --confirm + policy reconcile-cluster-roles --additive-only=true --confirm run_once: true - name: Reconcile Cluster Role Bindings diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 new file mode 120000 index 000000000..cf20e8959 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/atomic-openshift-master.j2
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml new file mode 100644 index 000000000..319758a06 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml @@ -0,0 +1,11 @@ +- include_vars: ../../../../../roles/openshift_node/vars/main.yml + +- name: Update systemd units + include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }} + +- name: Verifying the correct version was configured + shell: grep {{ verify_upgrade_version }} {{ item }} + with_items: + - /etc/sysconfig/openvswitch + - /etc/sysconfig/{{ openshift.common.service_type }}* + when: verify_upgrade_version is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker new file mode 120000 index 000000000..5a3dd12b3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster new file mode 120000 index 000000000..3ee319365 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/docker-cluster
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml new file mode 100644 index 000000000..c7b18f51b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml @@ -0,0 +1,14 @@ +- name: Check if Docker is installed + command: rpm -q docker + register: pkg_check + failed_when: pkg_check.rc > 1 + changed_when: no + +- name: Upgrade Docker + command: "{{ ansible_pkg_mgr}} update -y docker" + when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<') + register: docker_upgrade + +- name: Restart Docker + command: systemctl restart docker + when: docker_upgrade | changed diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins new file mode 120000 index 000000000..27ddaa18b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library new file mode 120000 index 000000000..53bed9684 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library @@ -0,0 +1 @@ +../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins new file mode 120000 index 000000000..cf407f69b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins @@ -0,0 +1 @@ +../../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster new file mode 120000 index 000000000..f44f8eb4f --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster @@ -0,0 +1 @@ +../../../../../roles/openshift_master/templates/native-cluster
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml new file mode 100644 index 000000000..a911f12be --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml @@ -0,0 +1,24 @@ +- name: Prepare for Node evacuation + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false + delegate_to: "{{ groups.oo_first_master.0 }}" + +- name: Evacuate Node for Kubelet upgrade + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force + delegate_to: "{{ groups.oo_first_master.0 }}" + +- include: rpm_upgrade.yml + vars: + component: "node" + openshift_version: "{{ openshift_pkg_version | default('') }}" + when: not openshift.common.is_containerized | bool + +- include: containerized_upgrade.yml + when: openshift.common.is_containerized | bool + +- name: Set node schedulability + command: > + {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true + delegate_to: "{{ groups.oo_first_master.0 }}" + when: openshift.node.schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml new file mode 100644 index 000000000..c16965a35 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml @@ -0,0 +1,59 @@ +--- +############################################################################### +# Post upgrade - Upgrade default router, default registry and examples +############################################################################### +- name: Upgrade default router and default registry + hosts: oo_first_master + vars: + openshift_deployment_type: "{{ deployment_type }}" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}" + oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + roles: + - openshift_manageiq + # Create the new templates shipped in 3.2, existing templates are left + # unmodified. This prevents the subsequent role definition for + # openshift_examples from failing when trying to replace templates that do + # not already exist. We could have potentially done a replace --force to + # create and update in one step. + - openshift_examples + # Update the existing templates + - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" + openshift_examples_import_command: replace + pre_tasks: + - name: Collect all routers + command: > + {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json + register: all_routers + failed_when: false + changed_when: false + + - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + when: all_routers.rc == 0 + + - set_fact: haproxy_routers=[] + when: all_routers.rc != 0 + + - name: Update router image to current version + when: all_routers.rc == 0 + command: > + {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' + --api-version=v1 + with_items: haproxy_routers + + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -n default -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + --api-version=v1 + diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml new file mode 100644 index 000000000..f163cca86 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -0,0 +1,345 @@ +--- +############################################################################### +# Evaluate host groups and gather facts +############################################################################### +- name: Load openshift_facts and update repos + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_facts + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + +- name: Evaluate additional groups for upgrade + hosts: localhost + connection: local + become: no + tasks: + - name: Evaluate etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: etcd_hosts_to_backup + with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master + +############################################################################### +# Pre-upgrade checks +############################################################################### +- name: Verify upgrade can proceed + hosts: oo_first_master + vars: + target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" + g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}" + gather_facts: no + tasks: + - fail: + msg: > + This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online + deployment types + when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online'] + + - fail: + msg: > + This upgrade does not support Pacemaker: + https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html + when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker' + + - fail: + msg: > + openshift_pkg_version is {{ openshift_pkg_version }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<') + + - fail: + msg: > + openshift_image_tag is {{ openshift_image_tag }} which is not a + valid version for a {{ target_version }} upgrade + when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<') + +- name: Verify master processes + hosts: oo_masters_to_config + roles: + - openshift_facts + tasks: + - openshift_facts: + role: master + local_facts: + ha: "{{ groups.oo_masters_to_config | length > 1 }}" + + - name: Ensure Master is running + service: + name: "{{ openshift.common.service_type }}-master" + state: started + enabled: yes + when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-api" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + + - name: Ensure HA Master is running + service: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + enabled: yes + when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + +- name: Verify node processes + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + - name: Ensure Node is running + service: + name: "{{ openshift.common.service_type }}-node" + state: started + enabled: yes + when: openshift.common.is_containerized | bool + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config + vars: + target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + upgrading: True + handlers: + - include: ../../../../../roles/openshift_master/handlers/main.yml + - include: ../../../../../roles/openshift_node/handlers/main.yml + roles: + # We want the cli role to evaluate so that the containerized oc/oadm wrappers + # are modified to use the correct image tag. However, this can trigger a + # docker restart if new configuration is laid down which would immediately + # pull the latest image and defeat the purpose of these tasks. + - { role: openshift_cli } + pre_tasks: + - name: Clean package cache + command: "{{ ansible_pkg_mgr }} clean all" + when: not openshift.common.is_atomic | bool + + - set_fact: + g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" + when: not openshift.common.is_containerized | bool + + - name: Determine available versions + script: ../files/rpm_versions.sh {{ g_new_service_name }} + register: g_rpm_versions_result + when: not openshift.common.is_containerized | bool + + - set_fact: + g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}" + when: not openshift.common.is_containerized | bool + + - name: Determine available versions + script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }} + register: g_containerized_versions_result + when: openshift.common.is_containerized | bool + + - set_fact: + g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}" + when: openshift.common.is_containerized | bool + + - set_fact: + g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}" + when: openshift_pkg_version is not defined + + - set_fact: + g_new_version: "{{ openshift_pkg_version | replace('-','') }}" + when: openshift_pkg_version is defined + + - set_fact: + g_new_version: "{{ openshift_image_tag | replace('v','') }}" + when: openshift_image_tag is defined + + - fail: + msg: Verifying the correct version was found + when: g_aos_versions.curr_version == "" + + - fail: + msg: Verifying the correct version was found + when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version + + - include_vars: ../../../../../roles/openshift_master/vars/main.yml + when: inventory_hostname in groups.oo_masters_to_config + + - name: Update systemd units + include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }} + when: inventory_hostname in groups.oo_masters_to_config + + - include_vars: ../../../../../roles/openshift_node/vars/main.yml + when: inventory_hostname in groups.oo_nodes_to_config + + - name: Update systemd units + include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }} + when: inventory_hostname in groups.oo_nodes_to_config + + # Note: the version number is hardcoded here in hopes of catching potential + # bugs in how g_aos_versions.curr_version is set + - name: Verifying the correct version is installed for upgrade + shell: grep 3.1.1.6 {{ item }} + with_items: + - /etc/sysconfig/openvswitch + - /etc/sysconfig/{{ openshift.common.service_type }}* + when: verify_upgrade_version is defined + + - name: Verifying the image version is used in the systemd unit + shell: grep IMAGE_VERSION {{ item }} + with_items: + - /etc/systemd/system/openvswitch.service + - /etc/systemd/system/{{ openshift.common.service_type }}*.service + when: openshift.common.is_containerized | bool + + - fail: + msg: This playbook requires Origin 1.1 or later + when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<') + + - fail: + msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later + when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<') + + - fail: + msg: Upgrade packages not found + when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<')) + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config + tasks: + - name: Determine available Docker + script: ../files/rpm_versions.sh docker + register: g_docker_version_result + when: not openshift.common.is_atomic | bool + + - name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}" + when: not openshift.common.is_atomic | bool + + - set_fact: + g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + + - fail: + msg: This playbook requires access to Docker 1.9 or later + when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<') + + # TODO: add check to upgrade ostree to get latest Docker + + - set_fact: + pre_upgrade_complete: True + + +############################################################################## +# Gate on pre-upgrade checks +############################################################################## +- name: Gate on pre-upgrade checks + hosts: localhost + connection: local + become: no + vars: + pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}" + tasks: + - set_fact: + pre_upgrade_completed: "{{ hostvars + | oo_select_keys(pre_upgrade_hosts) + | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}" + - set_fact: + pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}" + when: pre_upgrade_failed | length > 0 + +############################################################################### +# Backup etcd +############################################################################### +- name: Backup etcd + hosts: etcd_hosts_to_backup + vars: + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + roles: + - openshift_facts + tasks: + # Ensure we persist the etcd role for this host in openshift_facts + - openshift_facts: + role: etcd + local_facts: {} + when: "'etcd' not in openshift" + + - stat: path=/var/lib/openshift + register: var_lib_openshift + + - stat: path=/var/lib/origin + register: var_lib_origin + + - name: Create origin symlink if necessary + file: src=/var/lib/openshift/ dest=/var/lib/origin state=link + when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False + + # TODO: replace shell module with command and update later checks + # We assume to be using the data dir for all backups. + - name: Check available disk space for etcd backup + shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 + register: avail_disk + + # TODO: replace shell module with command and update later checks + - name: Check current embedded etcd disk usage + shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 + register: etcd_disk_usage + when: embedded_etcd | bool + + - name: Abort if insufficient disk space for etcd backup + fail: + msg: > + {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, + {{ avail_disk.stdout }} Kb available. + when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) + + - name: Install etcd (for etcdctl) + action: "{{ ansible_pkg_mgr }} name=etcd state=latest" + when: not openshift.common.is_atomic | bool + + - name: Generate etcd backup + command: > + etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }} + --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} + + - set_fact: + etcd_backup_complete: True + + - name: Display location of etcd backup + debug: + msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" + + +############################################################################## +# Gate on etcd backup +############################################################################## +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles new file mode 120000 index 000000000..6bc1a7aef --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles @@ -0,0 +1 @@ +../../../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml new file mode 100644 index 000000000..5c96ad094 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml @@ -0,0 +1,9 @@ +- name: Upgrade packages + command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}" + +- name: Ensure python-yaml present for config upgrade + action: "{{ ansible_pkg_mgr }} name=PyYAML state=present" + when: not openshift.common.is_atomic | bool + +- name: Restart node service + service: name="{{ openshift.common.service_type }}-node" state=restarted diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml new file mode 100644 index 000000000..964257af5 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml @@ -0,0 +1,192 @@ +--- +############################################################################### +# The restart playbook should be run after this playbook completes. +############################################################################### + +- name: Upgrade docker + hosts: oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + tasks: + - include: docker_upgrade.yml + when: not openshift.common.is_atomic | bool + - name: Set post docker install facts + openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: docker + local_facts: + openshift_image_tag: "v{{ g_new_version }}" + openshift_version: "{{ g_new_version }}" + +- name: Upgrade docker + hosts: oo_etcd_to_config + roles: + - openshift_facts + tasks: + # Upgrade docker when host is not atomic and host is not a non-containerized etcd node + - include: docker_upgrade.yml + when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized) + +# The cli image is used by openshift_docker_facts to determine the currently installed +# version. We need to explicitly pull the latest image to handle cases where +# the locally cached 'latest' tag is older the g_new_version. +- name: Download cli image + hosts: oo_masters_to_config:oo_nodes_to_config + roles: + - { role: openshift_docker_facts } + vars: + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + tasks: + - name: Pull Images + command: > + docker pull {{ item }}:latest + with_items: + - "{{ openshift.common.cli_image }}" + when: openshift.common.is_containerized | bool + +############################################################################### +# Upgrade Masters +############################################################################### +- name: Upgrade master + hosts: oo_masters_to_config + handlers: + - include: ../../../../../roles/openshift_master/handlers/main.yml + roles: + - openshift_facts + tasks: + - include: rpm_upgrade.yml component=master + when: not openshift.common.is_containerized | bool + + - include_vars: ../../../../../roles/openshift_master/vars/main.yml + + - name: Update systemd units + include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }} + +# - name: Upgrade master configuration +# openshift_upgrade_config: +# from_version: '3.1' +# to_version: '3.2' +# role: master +# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}" + +- name: Set master update status to complete + hosts: oo_masters_to_config + tasks: + - set_fact: + master_update_complete: True + +############################################################################## +# Gate on master update complete +############################################################################## +- name: Gate on master update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + master_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" + - set_fact: + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" + when: master_update_failed | length > 0 + +############################################################################### +# Upgrade Nodes +############################################################################### +- name: Upgrade nodes + hosts: oo_nodes_to_config + serial: 1 + roles: + - openshift_facts + handlers: + - include: ../../../../../roles/openshift_node/handlers/main.yml + tasks: + - include: node_upgrade.yml + + - set_fact: + node_update_complete: True + +############################################################################## +# Gate on nodes update +############################################################################## +- name: Gate on nodes update + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + node_update_completed: "{{ hostvars + | oo_select_keys(groups.oo_nodes_to_config) + | oo_collect('inventory_hostname', {'node_update_complete': true}) }}" + - set_fact: + node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}" + when: node_update_failed | length > 0 + +############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + upgrading: True + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################## +# Gate on reconcile +############################################################################## +- name: Gate on reconcile + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + reconcile_completed: "{{ hostvars + | oo_select_keys(groups.oo_masters_to_config) + | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" + - set_fact: + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + - fail: + msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" + when: reconcile_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index fd82997b9..50e25984f 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,19 +1,9 @@ --- -- include: evaluate_groups.yml - - name: Gather and set facts for node hosts hosts: oo_nodes_to_config roles: - openshift_facts tasks: - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml deleted file mode 100644 index 092d5533c..000000000 --- a/playbooks/common/openshift-docker/config.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: Configure docker hosts - hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/common/openshift-docker/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 93eb157cb..a95de8cf3 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -1,21 +1,14 @@ --- - name: Set etcd facts needed for generating certs hosts: oo_etcd_to_config + any_errors_fatal: true roles: - openshift_facts tasks: - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - deployment_type: "{{ openshift_deployment_type }}" - - role: etcd - local_facts: - etcd_image: "{{ osm_etcd_image | default(None) }}" + role: etcd + local_facts: + etcd_image: "{{ osm_etcd_image | default(None) }}" - name: Check status of etcd certificates stat: path: "{{ item }}" @@ -30,6 +23,8 @@ etcd_cert_subdir: etcd-{{ openshift.common.hostname }} etcd_cert_config_dir: /etc/etcd etcd_cert_prefix: + etcd_hostname: "{{ openshift.common.hostname }}" + etcd_ip: "{{ openshift.common.ip }}" - name: Create temp directory for syncing certs hosts: localhost @@ -51,7 +46,7 @@ | oo_filter_list(filter_attr='etcd_server_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" roles: - - etcd_certificates + - openshift_etcd_certificates post_tasks: - name: Create a tarball of the etcd certs command: > @@ -59,7 +54,7 @@ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . args: creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" - with_items: etcd_needing_server_certs + with_items: "{{ etcd_needing_server_certs | default([]) }}" - name: Retrieve the etcd cert tarballs fetch: src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" @@ -67,7 +62,7 @@ flat: yes fail_on_missing: yes validate_checksum: yes - with_items: etcd_needing_server_certs + with_items: "{{ etcd_needing_server_certs | default([]) }}" # Configure a first etcd host to avoid conflicts in choosing a leader # if other members come online too quickly. @@ -77,7 +72,7 @@ sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" etcd_url_scheme: https etcd_peer_url_scheme: https - etcd_peers_group: oo_etcd_to_config + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -89,8 +84,8 @@ dest: "{{ etcd_cert_config_dir }}" when: etcd_server_certs_missing roles: - - etcd - - role: nickhammond.logrotate + - openshift_etcd + - nickhammond.logrotate # Configure the remaining etcd hosts, skipping the first one we dealt with above. - name: Configure remaining etcd hosts @@ -99,7 +94,7 @@ sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}" etcd_url_scheme: https etcd_peer_url_scheme: https - etcd_peers_group: oo_etcd_to_config + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -111,7 +106,7 @@ dest: "{{ etcd_cert_config_dir }}" when: etcd_server_certs_missing roles: - - etcd + - openshift_etcd - role: nickhammond.logrotate - name: Delete temporary directory on localhost diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..f4392173a --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -0,0 +1,5 @@ +--- +- name: Configure load balancers + hosts: oo_lb_to_config + roles: + - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/common/openshift-docker/filter_plugins +++ b/playbooks/common/openshift-loadbalancer/filter_plugins diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins index ac79701db..ac79701db 120000 --- a/playbooks/common/openshift-docker/lookup_plugins +++ b/playbooks/common/openshift-loadbalancer/lookup_plugins diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml new file mode 100644 index 000000000..e06a14c89 --- /dev/null +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -0,0 +1,20 @@ +--- +- name: Populate g_service_nodes host group if needed + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - fail: msg="new_cluster_state is required to be injected in this playbook" + when: new_cluster_state is not defined + + - name: Evaluate g_service_lb + add_host: name={{ item }} groups=g_service_lb + with_items: oo_host_group_exp | default([]) + +- name: Change state on lb instance(s) + hosts: g_service_lb + connection: ssh + gather_facts: no + tasks: + - service: name=haproxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index acd2f5b11..7a59f3ea3 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,6 +1,9 @@ --- - name: Set master facts and determine if external etcd certs need to be generated hosts: oo_masters_to_config + vars: + t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" + pre_tasks: - name: Check for RPM generated config marker file .config_managed stat: @@ -28,42 +31,43 @@ | default([])) | oo_collect('openshift.common.hostname') | default(none, true) }}" - + - set_fact: - openshift_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') | default(openshift.common.debug_level, true) }}" - when: openshift_master_debug_level is not defined + openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}" + when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != "" + - set_fact: + openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" + when: openshift_master_default_subdomain is not defined + - set_fact: + openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}" + when: openshift_hosted_metrics_deploy is not defined + - set_fact: + openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}" + when: openshift_hosted_metrics_duration is not defined + - set_fact: + openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" + when: openshift_hosted_metrics_resolution is not defined roles: - openshift_facts post_tasks: - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - ip: "{{ openshift_ip | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - public_ip: "{{ openshift_public_ip | default(None) }}" - deployment_type: "{{ openshift_deployment_type }}" - - role: master - local_facts: - api_port: "{{ openshift_master_api_port | default(None) }}" - api_url: "{{ openshift_master_api_url | default(None) }}" - api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" - controllers_port: "{{ openshift_master_controllers_port | default(None) }}" - public_api_url: "{{ openshift_master_public_api_url | default(None) }}" - cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" - cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" - console_path: "{{ openshift_master_console_path | default(None) }}" - console_port: "{{ openshift_master_console_port | default(None) }}" - console_url: "{{ openshift_master_console_url | default(None) }}" - console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" - public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - portal_net: "{{ openshift_master_portal_net | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" - master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" + role: master + local_facts: + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + controllers_port: "{{ openshift_master_controllers_port | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" + cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" + master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - openshift_facts: role: hosted openshift_env: @@ -83,6 +87,8 @@ etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }} etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: master.etcd- + etcd_hostname: "{{ openshift.common.hostname }}" + etcd_ip: "{{ openshift.common.ip }}" when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config - name: Create temp directory for syncing certs @@ -106,7 +112,7 @@ | oo_filter_list(filter_attr='etcd_client_certs_missing') }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" roles: - - etcd_certificates + - openshift_etcd_certificates post_tasks: - name: Create a tarball of the etcd certs command: > @@ -114,7 +120,7 @@ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . args: creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" - with_items: etcd_needing_client_certs + with_items: "{{ etcd_needing_client_certs | default([]) }}" - name: Retrieve the etcd cert tarballs fetch: src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" @@ -122,7 +128,7 @@ flat: yes fail_on_missing: yes validate_checksum: yes - with_items: etcd_needing_client_certs + with_items: "{{ etcd_needing_client_certs | default([]) }}" - name: Copy the external etcd certs to the masters hosts: oo_masters_to_config @@ -172,7 +178,7 @@ - name: Check status of master certificates stat: path: "{{ openshift.common.config_base }}/master/{{ item }}" - with_items: openshift_master_certs + with_items: "{{ openshift_master_certs }}" register: g_master_cert_stat_result - set_fact: master_certs_missing: "{{ False in (g_master_cert_stat_result.results @@ -198,6 +204,7 @@ | oo_collect('openshift.common.all_hostnames') | oo_flatten | unique }}" sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" roles: - openshift_master_certificates post_tasks: @@ -207,7 +214,7 @@ state: absent when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config with_nested: - - masters_needing_certs + - "{{ masters_needing_certs | default([]) }}" - - master.etcd-client.crt - master.etcd-client.key @@ -217,7 +224,7 @@ -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} . args: creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz" - with_items: masters_needing_certs + with_items: "{{ masters_needing_certs | default([]) }}" - name: Retrieve the master cert tarball from the master fetch: @@ -226,31 +233,7 @@ flat: yes fail_on_missing: yes validate_checksum: yes - with_items: masters_needing_certs - -- name: Configure load balancers - hosts: oo_lb_to_config - vars: - sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}" - haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" - haproxy_frontends: - - name: atomic-openshift-api - mode: tcp - options: - - tcplog - binds: - - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}" - default_backend: atomic-openshift-api - haproxy_backends: - - name: atomic-openshift-api - mode: tcp - option: tcplog - balance: source - servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}" - roles: - - role: openshift_facts - - role: haproxy - when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool + with_items: "{{ masters_needing_certs | default([]) }}" - name: Check for cached session secrets hosts: oo_first_master @@ -316,13 +299,14 @@ file: path: "{{ named_certs_dir }}" state: directory + mode: 0700 when: named_certs_specified | bool - name: Land named certificates copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}" with_items: openshift_master_named_certificates when: named_certs_specified | bool - name: Land named certificate keys - copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" + copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600 with_items: openshift_master_named_certificates when: named_certs_specified | bool @@ -336,6 +320,14 @@ openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" pre_tasks: - name: Ensure certificate directory exists file: @@ -357,13 +349,6 @@ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False -# Additional instance config for online deployments -- name: Additional instance config - hosts: oo_masters_deployment_type_online - roles: - - pods - - os_env_extras - - name: Delete temporary directory on localhost hosts: localhost connection: local diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library new file mode 120000 index 000000000..d0b7393d3 --- /dev/null +++ b/playbooks/common/openshift-master/library @@ -0,0 +1 @@ +../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/library/modify_yaml.py b/playbooks/common/openshift-master/library/modify_yaml.py deleted file mode 100755 index a4be10ca3..000000000 --- a/playbooks/common/openshift-master/library/modify_yaml.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - -''' modify_yaml ansible module ''' - -import yaml - -DOCUMENTATION = ''' ---- -module: modify_yaml -short_description: Modify yaml key value pairs -author: Andrew Butcher -requirements: [ ] -''' -EXAMPLES = ''' -- modify_yaml: - dest: /etc/origin/master/master-config.yaml - yaml_key: 'kubernetesMasterConfig.masterCount' - yaml_value: 2 -''' - -def main(): - ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting - the key to the desired value. - ''' - - # disabling pylint errors for global-variable-undefined and invalid-name - # for 'global module' usage, since it is required to use ansible_facts - # pylint: disable=global-variable-undefined, invalid-name, - # redefined-outer-name - global module - - module = AnsibleModule( - argument_spec=dict( - dest=dict(required=True), - yaml_key=dict(required=True), - yaml_value=dict(required=True), - backup=dict(required=False, default=True, type='bool'), - ), - supports_check_mode=True, - ) - - dest = module.params['dest'] - yaml_key = module.params['yaml_key'] - yaml_value = module.safe_eval(module.params['yaml_value']) - backup = module.params['backup'] - - # Represent null values as an empty string. - # pylint: disable=missing-docstring, unused-argument - def none_representer(dumper, data): - return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') - yaml.add_representer(type(None), none_representer) - - try: - changes = [] - - yaml_file = open(dest) - yaml_data = yaml.safe_load(yaml_file.read()) - yaml_file.close() - - ptr = yaml_data - for key in yaml_key.split('.'): - if key not in ptr and key != yaml_key.split('.')[-1]: - ptr[key] = {} - elif key == yaml_key.split('.')[-1]: - if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): - ptr[key] = yaml_value - changes.append((yaml_key, yaml_value)) - else: - ptr = ptr[key] - - if len(changes) > 0: - if backup: - module.backup_local(dest) - yaml_file = open(dest, 'w') - yaml_string = yaml.dump(yaml_data, default_flow_style=False) - yaml_string = yaml_string.replace('\'\'', '""') - yaml_file.write(yaml_string) - yaml_file.close() - - return module.exit_json(changed=(len(changes) > 0), changes=changes) - - # ignore broad-except error to avoid stack trace to ansible user - # pylint: disable=broad-except - except Exception, e: - return module.fail_json(msg=str(e)) - -# ignore pylint errors related to the module_utils import -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 02449e40d..57a63cfee 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -97,7 +97,7 @@ name: "{{ item }}" groups: oo_active_masters ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_masters_to_config | default([]) }}" when: (hostvars[item]['is_active'] | default(false)) | bool - name: Evaluate oo_current_masters @@ -105,7 +105,7 @@ name: "{{ item }}" groups: oo_current_masters ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_masters_to_config | default([]) }}" when: (hostvars[item]['current_host'] | default(false)) | bool diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 6f8151d30..6e6cb3e01 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -33,9 +33,10 @@ service: name={{ openshift.common.service_type }}-master-controllers state=restarted - name: verify api server command: > - curl -k --head --silent {{ openshift.master.api_url }} + curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt + {{ openshift.master.api_url }}/healthz/ready register: api_available_output - until: api_available_output.stdout.find("200 OK") != -1 + until: api_available_output.stdout == 'ok' retries: 120 delay: 1 changed_when: false diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 7edea9160..b3491ef8d 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,10 +1,12 @@ --- - name: Gather and set facts for node hosts hosts: oo_nodes_to_config + vars: + t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" pre_tasks: - set_fact: - openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') | default(openshift.common.debug_level, true) }}" - when: openshift_node_debug_level is not defined + openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}" + when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != "" roles: - openshift_facts tasks: @@ -12,20 +14,11 @@ # configured, we need to make sure to set the node properties beforehand if # we do not want the defaults - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - deployment_type: "{{ openshift_deployment_type }}" - use_flannel: "{{ openshift_use_flannel | default(None) }}" - - role: node - local_facts: - labels: "{{ openshift_node_labels | default(None) }}" - annotations: "{{ openshift_node_annotations | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + role: node + local_facts: + labels: "{{ openshift_node_labels | default(None) }}" + annotations: "{{ openshift_node_annotations | default(None) }}" + schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" - name: Check status of node certificates stat: path: "{{ openshift.common.config_base }}/node/{{ item }}" @@ -43,22 +36,6 @@ node_subdir: node-{{ openshift.common.hostname }} config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}" node_cert_dir: "{{ openshift.common.config_base }}/node" - - name: Check status of flannel external etcd certificates - stat: - path: "{{ openshift.common.config_base }}/node/{{ item }}" - with_items: - - node.etcd-client.crt - - node.etcd-ca.crt - register: g_external_etcd_flannel_cert_stat_result - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) - - set_fact: - etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results - | oo_collect(attribute='stat.exists') - | list | intersect([false])}}" - etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }} - etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" - etcd_cert_prefix: node.etcd- - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) - name: Create temp directory for syncing certs hosts: localhost @@ -71,65 +48,6 @@ register: mktemp changed_when: False -- name: Configure flannel etcd certificates - hosts: oo_first_etcd - vars: - etcd_generated_certs_dir: /etc/etcd/generated_certs - sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - pre_tasks: - - set_fact: - etcd_needing_client_certs: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}" - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - roles: - - role: etcd_certificates - when: openshift_use_flannel | default(false) | bool - post_tasks: - - name: Create a tarball of the etcd flannel certs - command: > - tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz - -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . - args: - creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" - with_items: etcd_needing_client_certs - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - - name: Retrieve the etcd cert tarballs - fetch: - src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" - dest: "{{ sync_tmpdir }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - with_items: etcd_needing_client_certs - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - -- name: Copy the external etcd flannel certs to the nodes - hosts: oo_nodes_to_config - vars: - sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" - tasks: - - name: Ensure certificate directory exists - file: - path: "{{ openshift.common.config_base }}/node" - state: directory - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - - name: Unarchive the tarball on the master - unarchive: - src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz" - dest: "{{ etcd_cert_config_dir }}" - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - - file: - path: "{{ etcd_cert_config_dir }}/{{ item }}" - owner: root - group: root - mode: 0600 - with_items: - - node.etcd-client.crt - - node.etcd-client.key - - node.etcd-ca.crt - when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing - - name: Create node certificates hosts: oo_first_master vars: @@ -148,7 +66,7 @@ -C {{ item.config_dir }} . args: creates: "{{ item.config_dir }}.tgz" - with_items: nodes_needing_certs + with_items: "{{ nodes_needing_certs | default([]) }}" - name: Retrieve the node config tarballs from the master fetch: @@ -157,7 +75,7 @@ flat: yes fail_on_missing: yes validate_checksum: yes - with_items: nodes_needing_certs + with_items: "{{ nodes_needing_certs | default([]) }}" - name: Deploy node certificates hosts: oo_nodes_to_config @@ -187,7 +105,7 @@ name: "{{ item }}" groups: oo_containerized_master_nodes ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) @@ -197,6 +115,14 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: - openshift_node @@ -205,9 +131,96 @@ vars: openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}" + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: - openshift_node +- name: Gather and set facts for flannel certificatess + hosts: oo_nodes_to_config + tasks: + - name: Check status of flannel external etcd certificates + stat: + path: "{{ openshift.common.config_base }}/node/{{ item }}" + with_items: + - node.etcd-client.crt + - node.etcd-ca.crt + register: g_external_etcd_flannel_cert_stat_result + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool) + - set_fact: + etcd_client_flannel_certs_missing: "{{ False in g_external_etcd_flannel_cert_stat_result.results + | oo_collect(attribute='stat.exists') + | list }}" + etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }} + etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" + etcd_cert_prefix: node.etcd- + etcd_hostname: "{{ openshift.common.hostname }}" + etcd_ip: "{{ openshift.common.ip }}" + when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 and (openshift.common.use_flannel | bool) + +- name: Configure flannel etcd certificates + hosts: oo_first_etcd + vars: + etcd_generated_certs_dir: /etc/etcd/generated_certs + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + pre_tasks: + - set_fact: + etcd_needing_client_certs: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_filter_list('etcd_client_flannel_certs_missing') | default([]) }}" + roles: + - role: openshift_etcd_certificates + when: openshift_use_flannel | default(false) | bool + post_tasks: + - name: Create a tarball of the etcd flannel certs + command: > + tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz + -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} . + args: + creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + with_items: etcd_needing_client_certs | default([]) + - name: Retrieve the etcd cert tarballs + fetch: + src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: etcd_needing_client_certs | default([]) + +- name: Copy the external etcd flannel certs to the nodes + hosts: oo_nodes_to_config + vars: + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + tasks: + - name: Ensure certificate directory exists + file: + path: "{{ openshift.common.config_base }}/node" + state: directory + when: etcd_client_flannel_certs_missing | default(false) | bool + - name: Unarchive the tarball on the master + unarchive: + src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz" + dest: "{{ etcd_cert_config_dir }}" + when: etcd_client_flannel_certs_missing | default(false) | bool + - file: + path: "{{ etcd_cert_config_dir }}/{{ item }}" + owner: root + group: root + mode: 0600 + with_items: + - node.etcd-client.crt + - node.etcd-client.key + - node.etcd-ca.crt + when: etcd_client_flannel_certs_missing | default(false) | bool + + - name: Additional node config hosts: oo_nodes_to_config vars: @@ -235,14 +248,6 @@ - file: name={{ mktemp.stdout }} state=absent changed_when: False -# Additional config for online type deployments -- name: Additional instance config - hosts: oo_nodes_deployment_type_online - gather_facts: no - roles: - - os_env_extras - - os_env_extras_node - - name: Set schedulability hosts: oo_first_master vars: @@ -258,9 +263,10 @@ # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. command: > - curl -k --head --silent {{ openshift.master.api_url }} + curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt + {{ openshift.master.api_url }}/healthz/ready register: api_available_output - until: api_available_output.stdout.find("200 OK") != -1 + until: api_available_output.stdout == 'ok' retries: 120 delay: 1 changed_when: false diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml index d36f7acea..1d79db353 100644 --- a/playbooks/common/openshift-node/scaleup.yml +++ b/playbooks/common/openshift-node/scaleup.yml @@ -1,6 +1,11 @@ --- - include: ../openshift-cluster/evaluate_groups.yml +- name: Gather facts + hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config + roles: + - openshift_facts + - name: Configure docker hosts hosts: oo_nodes_to_config vars: |