diff options
Diffstat (limited to 'playbooks')
11 files changed, 140 insertions, 53 deletions
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 869e185af..c8f397186 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -12,3 +12,5 @@ # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # - import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml + +- import_playbook: ../../../../openshift-master/private/restart.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index f790fd98d..de612da21 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -6,7 +6,9 @@ hosts: oo_first_master roles: - role: openshift_web_console - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift_upgrade_target is version_compare('3.9','>=') - name: Upgrade default router and default registry hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 693ab2d96..5ee8a9d78 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -92,3 +92,25 @@ state: started enabled: yes with_items: "{{ master_services }}" + +# Until openshift-ansible is determining which host is the CA host we +# must (unfortunately) ensure that the first host in the etcd group is +# the etcd CA host. +# https://bugzilla.redhat.com/show_bug.cgi?id=1469358 +- name: Verify we can proceed on first etcd + hosts: oo_first_etcd + gather_facts: no + tasks: + - name: Ensure CA exists on first etcd + stat: + path: /etc/etcd/generated_certs + register: __etcd_ca_stat + + - fail: + msg: > + In order to correct an etcd certificate signing problem + upgrading may require re-generating etcd certificates. Please + ensure that the /etc/etcd/generated_certs directory exists on + the first host defined in your [etcd] group. + when: + - not __etcd_ca_stat.stat.exists | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 080372c81..a10fd4bee 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,6 +2,30 @@ ############################################################################### # Upgrade Masters ############################################################################### + +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN + hosts: oo_etcd_to_config + tasks: + - slurp: + src: /etc/etcd/server.crt + register: etcd_serving_cert + - set_fact: + __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 1dcc38def..fe1fdefff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -25,10 +25,18 @@ openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' openshift_release: '3.8' - _requested_pkg_version: "{{openshift_pkg_version if openshift_pkg_version is defined else omit }}" - _requested_image_tag: "{{openshift_image_tag if openshift_image_tag is defined else omit }}" + _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" + l_double_upgrade_cp: True when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') + - name: set l_force_image_tag_to_version = True + set_fact: + # Need to set this during 3.8 upgrade to ensure image_tag is set correctly + # to match 3.8 version + l_force_image_tag_to_version: True + when: _requested_image_tag is defined + - import_playbook: ../pre/config.yml # These vars a meant to exclude oo_nodes from plays that would otherwise include # them by default. @@ -69,7 +77,20 @@ openshift_upgrade_min: '3.8' openshift_release: '3.9' openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" - openshift_image_tag: "{{ _requested_image_tag | default('v3.9') }}" + # Set the user's specified image_tag for 3.9 upgrade if it was provided. + - set_fact: + openshift_image_tag: "{{ _requested_image_tag }}" + l_force_image_tag_to_version: False + when: _requested_image_tag is defined + # If the user didn't specify an image_tag, we need to force update image_tag + # because it will have already been set during 3.8. If we aren't running + # a double upgrade, then we can preserve image_tag because it will still + # be the user provided value. + - set_fact: + l_force_image_tag_to_version: True + when: + - l_double_upgrade_cp is defined and l_double_upgrade_cp + - _requested_image_tag is not defined - import_playbook: ../pre/config.yml # These vars a meant to exclude oo_nodes from plays that would otherwise include @@ -112,3 +133,9 @@ state: started - import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml index 361553ee4..c8e30ddbc 100644 --- a/playbooks/deploy_cluster.yml +++ b/playbooks/deploy_cluster.yml @@ -6,11 +6,3 @@ - import_playbook: openshift-node/private/config.yml - import_playbook: common/private/components.yml - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml index 75d0ddf9d..8e9b0024a 100644 --- a/playbooks/gcp/openshift-cluster/build_base_image.yml +++ b/playbooks/gcp/openshift-cluster/build_base_image.yml @@ -90,6 +90,8 @@ repo_gpgcheck: no state: present when: ansible_os_family == "RedHat" + - name: Accept GPG keys for the repos + command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static' - name: Install qemu-user-static package: name: qemu-user-static @@ -121,7 +123,6 @@ with_items: # required by Ansible - PyYAML - - docker - google-compute-engine - google-compute-engine-init - google-config diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index e1052fb6c..0a730a88a 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -16,6 +16,7 @@ - iproute - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" + - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}" - yum-utils register: result until: result is succeeded diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 59e2b515c..cc812c300 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -142,11 +142,6 @@ state: absent changed_when: False -- name: Setup extension file for service console UI - template: - src: ../templates/openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - - name: Update master config yedit: state: present @@ -166,8 +161,6 @@ value: [X-Remote-Group] - key: authConfig.requestHeader.extraHeaderPrefixes value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - key: kubernetesMasterConfig.apiServerArguments.runtime-config value: [apis/settings.k8s.io/v1alpha1=true] - key: admissionConfig.pluginConfig.PodPreset.configuration.kind @@ -178,37 +171,50 @@ value: false register: yedit_output -#restart master serially here -- name: restart master api - systemd: name={{ openshift_service_type }}-master-api state=restarted - when: - - yedit_output.changed - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift_service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: - - yedit_output.changed +# Only add the catalog extension script if not 3.9. From 3.9 on, the console +# can discover if template service broker is running. +- when: not openshift.common.version_gte_3_9 + block: + - name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + + - name: Update master config + yedit: + state: present + src: /etc/origin/master/master-config.yaml + key: assetConfig.extensionScripts + value: [/etc/origin/master/openshift-ansible-catalog-console.js] + register: yedit_asset_config_output -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed +#restart master serially here +- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed) + block: + - name: restart master api + systemd: name={{ openshift_service_type }}-master-api state=restarted + + # We retry the controllers because the API may not be 100% initialized yet. + - name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + + - name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml new file mode 100644 index 000000000..2df39c2a8 --- /dev/null +++ b/playbooks/openshift-prometheus/private/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall Prometheus + hosts: masters[0] + tasks: + - name: Run the Prometheus Uninstall Role Tasks + include_role: + name: openshift_prometheus + tasks_from: uninstall diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml new file mode 100644 index 000000000..c92ade786 --- /dev/null +++ b/playbooks/openshift-prometheus/uninstall.yml @@ -0,0 +1,2 @@ +--- +- import_playbook: private/uninstall.yml |