diff options
92 files changed, 431 insertions, 229 deletions
diff --git a/.papr.inventory b/.papr.inventory index c678e76aa..80ad81efa 100644 --- a/.papr.inventory +++ b/.papr.inventory @@ -22,6 +22,6 @@ ocp-master ocp-master [nodes] -ocp-master openshift_schedulable=false +ocp-master openshift_schedulable=true ocp-node1 openshift_node_labels="{'region':'infra'}" ocp-node2 openshift_node_labels="{'region':'infra'}" diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index bdfa06c4a..120ce408f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.35.0 ./ +3.9.0-0.38.0 ./ @@ -74,6 +74,16 @@ Fedora: dnf install -y ansible pyOpenSSL python-cryptography python-lxml ``` +Additional requirements: + +Logging: + +- java-1.8.0-openjdk-headless + +Metrics: + +- httpd-tools + ## Simple all-in-one localhost Installation This assumes that you've installed the base dependencies and you're running on Fedora or RHEL diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile index 22a0d06a0..c9ec8ba41 100644 --- a/images/installer/Dockerfile +++ b/images/installer/Dockerfile @@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root / # install ansible and deps RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \ && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \ - && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \ + && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which" \ && yum install -y epel-release \ && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \ && EPEL_TESTING_PKGS="python2-libcloud" \ diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 3b05c1aa6..5da950744 100644 --- a/images/installer/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com> USER root # Playbooks, roles, and their dependencies are installed from packages. -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ && yum repolist > /dev/null \ && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \ && yum-config-manager --enable rhel-7-server-rh-common-rpms \ diff --git a/inventory/hosts.example b/inventory/hosts.example index f9f331880..82c588100 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -325,7 +325,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # or to one or all of the masters defined in the inventory if no load # balancer is present. #openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# If an external load balancer is used public hostname should resolve to +# external load balancer address +#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com # Configure controller arguments #osm_controller_args={'resource-quota-sync-period': ['10s']} @@ -1114,10 +1117,9 @@ ose3-etcd[1:3]-ansible.test.example.com ose3-lb-ansible.test.example.com containerized=false # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. [nodes] -ose3-master[1:3]-ansible.test.example.com +# masters should be schedulable to run web console pods +ose3-master[1:3]-ansible.test.example.com openshift_schedulable=True ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}" [nfs] diff --git a/inventory/hosts.glusterfs.external.example b/inventory/hosts.glusterfs.external.example index bf2557cf0..e718e3280 100644 --- a/inventory/hosts.glusterfs.external.example +++ b/inventory/hosts.glusterfs.external.example @@ -35,7 +35,8 @@ openshift_storage_glusterfs_heketi_url=172.0.0.1 master [nodes] -master openshift_schedulable=False +# masters should be schedulable to run web console pods +master openshift_schedulable=True node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example index 8a20a037e..b2fc00c58 100644 --- a/inventory/hosts.glusterfs.mixed.example +++ b/inventory/hosts.glusterfs.mixed.example @@ -38,7 +38,8 @@ openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa master [nodes] -master openshift_schedulable=False +# masters should be schedulable to run web console pods +master openshift_schedulable=True node0 openshift_schedulable=True node1 openshift_schedulable=True node2 openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.native.example b/inventory/hosts.glusterfs.native.example index 59acf1194..e5f2453ff 100644 --- a/inventory/hosts.glusterfs.native.example +++ b/inventory/hosts.glusterfs.native.example @@ -28,7 +28,8 @@ openshift_deployment_type=origin master [nodes] -master openshift_schedulable=False +# masters should be schedulable to run web console pods +master openshift_schedulable=True # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". node0 openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example index 6f33e9f6d..dadb2c93e 100644 --- a/inventory/hosts.glusterfs.registry-only.example +++ b/inventory/hosts.glusterfs.registry-only.example @@ -34,7 +34,8 @@ openshift_hosted_registry_storage_kind=glusterfs master [nodes] -master openshift_schedulable=False +# masters should be schedulable to run web console pods +master openshift_schedulable=True # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example index 1f3a4282a..184cb600b 100644 --- a/inventory/hosts.glusterfs.storage-and-registry.example +++ b/inventory/hosts.glusterfs.storage-and-registry.example @@ -35,7 +35,8 @@ openshift_hosted_registry_storage_kind=glusterfs master [nodes] -master openshift_schedulable=False +# masters should be schedulable to run web console pods +master openshift_schedulable=True # It is recommended to not use a single cluster for both general and registry # storage, so two three-node clusters will be required. node0 openshift_schedulable=True diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 23f43dcd5..d14eb56cb 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.9.0 -Release: 0.35.0%{?dist} +Release: 0.38.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -28,6 +28,7 @@ Requires: java-1.8.0-openjdk-headless Requires: httpd-tools Requires: libselinux-python Requires: python-passlib +Requires: python2-crypto %description Openshift and Atomic Enterprise Ansible @@ -200,6 +201,56 @@ Atomic OpenShift Utilities includes %changelog +* Mon Feb 05 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.38.0 +- Moving upgrade sg playbook to 3.9 (kwoodson@redhat.com) +- remove openshift_upgrade_{pre,post}_storage_migration_enabled from + failed_when (nakayamakenjiro@gmail.com) +- Fix version handling in 3.8/3.9 control plane upgrades (rteague@redhat.com) +- add S3 bucket cleanup (jdiaz@redhat.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- Parameterize user and disable_root options in cloud config + (nelluri@redhat.com) +- Fix softlinks broken by d3fefc32a727fe3c13159c4e9fe4399f35b487a8 + (Klaas-@users.noreply.github.com) + +* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.37.0 +- Don't use 'omit' for package module (vrutkovs@redhat.com) +- Adding requirements for logging and metrics (ewolinet@redhat.com) +- Disable master controllers before upgrade and re-enable those when restart + mode is system (vrutkovs@redhat.com) +- upgrade: run upgrade_control_plane and upgrade_nodes playbooks during full + upgrade (vrutkovs@redhat.com) + +* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.36.0 +- Add missing tasks file (sdodson@redhat.com) +- Upgrade to migrate to using push to DNS for registries. (kwoodson@redhat.com) +- Adding defaults for the gcp variables to fix an undefined ansible exception. + (kwoodson@redhat.com) +- Fix vsphere sanitization (sdodson@redhat.com) +- Set a default for required vsphere variable (sdodson@redhat.com) +- Add python2-crypto package (ccoleman@redhat.com) +- hosts.example: clarify usage of openshift_master_cluster_public_hostname + (vrutkovs@redhat.com) +- Conditionally create pvcs for metrics depending on whether or not it already + exists (ewolinet@redhat.com) +- Update hosts examples with a note about scheduling on masters + (vrutkovs@redhat.com) +- Fixing file write issue. (kwoodson@redhat.com) +- Only perform console configmap ops when >= 3.9 (sdodson@redhat.com) +- Remove playbooks/adhoc/openshift_hosted_logging_efk.yaml (sdodson@redhat.com) +- upgrades: use openshift_version as a regexp when checking + openshift.common.version (vrutkovs@redhat.com) +- Don't update master-config.yaml with logging/metrics urls >= 3.9 + (sdodson@redhat.com) +- Make master schedulable (vrutkovs@redhat.com) +- Re-add openshift_aws_elb_cert_arn. (abutcher@redhat.com) +- Ignore openshift_pkg_version during 3.8 upgrade (rteague@redhat.com) +- bug 1537857. Fix retrieving prometheus metrics (jcantril@redhat.com) +- Remove master_ha bool checks (mgugino@redhat.com) +- Don't restart docker when re-deploying node certificates (sdodson@redhat.com) +- vsphere storage default add (davis.phillips@gmail.com) + * Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0 - add glusterblock support for ansible (m.judeikis@gmail.com) - Add a bare minimum localhost hosts file (sdodson@redhat.com) diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index bdc98d1e0..cf811ca84 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning. These will incl ## Uninstall / Deprovisioning -At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step. - -To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. ``` ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml @@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi This should result in removal of the security groups and VPC that were created. +Cleaning up the S3 bucket contents can be accomplished with: + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml +``` + NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml new file mode 100644 index 000000000..448b47aee --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Empty/delete s3 bucket + hosts: localhost + connection: local + tasks: + - name: empty/delete s3 bucket + include_role: + name: openshift_aws + tasks_from: uninstall_s3.yml + when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml index 23a3fcbb5..23a3fcbb5 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index de612da21..f44ab3580 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -113,6 +113,22 @@ registry_url: "{{ openshift.master.registry_url }}" openshift_hosted_templates_import_command: replace + post_tasks: + # we need to migrate customers to the new pattern of pushing to the registry via dns + # Step 1: verify the certificates have the docker registry service name + - shell: > + echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' + register: cert_output + + # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs + - name: set a fact to include the registry certs playbook if needed + set_fact: + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + +# Run the redeploy certs based upon the certificates +- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry + import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml + # Check for warnings to be printed at the end of the upgrade: - name: Clean up and display warnings hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index edc541ef9..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@ # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -53,6 +51,8 @@ # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml + # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 5ee8a9d78..463a05688 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -25,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c27118f6f..baec057f9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,29 +3,6 @@ # Upgrade Masters ############################################################################### -# Prior to 3.6, openshift-ansible created etcd serving certificates -# without a SubjectAlternativeName entry for the system hostname. The -# SAN list in Go 1.8 is now (correctly) authoritative and since -# openshift-ansible configures masters to talk to etcd hostnames -# rather than IP addresses, we must correct etcd certificates. -# -# This play examines the etcd serving certificate SANs on each etcd -# host and records whether or not the system hostname is missing. -- name: Examine etcd serving certificate SAN - hosts: oo_etcd_to_config - tasks: - - slurp: - src: /etc/etcd/server.crt - register: etcd_serving_cert - - set_fact: - __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" - -# Redeploy etcd certificates when hostnames were missing from etcd -# serving certificate SANs. -- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml - when: - - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) - - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -56,7 +33,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -94,6 +70,12 @@ - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined + - name: Disable master controller + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: false + when: openshift.common.rolling_restart_mode == 'system' + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' @@ -116,7 +98,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -252,7 +233,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index bf6e8605e..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: '3.9' - openshift_upgrade_min: '3.7' - openshift_release: '3.9' - -- import_playbook: ../pre/config.yml - vars: - l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" - l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" - openshift_protect_installed_version: False - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: - name: "{{ openshift_service_type }}-master-controllers" - state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index c8a42322d..8792295c6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -26,6 +26,7 @@ openshift_upgrade_min: '3.7' openshift_release: '3.8' _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" l_double_upgrade_cp: True when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') @@ -61,10 +62,8 @@ # Pre-upgrade completed -- import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.8' - openshift_pkg_version: '' +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') ## 3.8 upgrade complete we should now be able to upgrade to 3.9 @@ -77,7 +76,7 @@ openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.8' openshift_release: '3.9' - openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" # Set the user's specified image_tag for 3.9 upgrade if it was provided. - set_fact: openshift_image_tag: "{{ _requested_image_tag }}" @@ -106,6 +105,7 @@ l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" openshift_protect_installed_version: False + openshift_version_reinit: True - name: Flag pre-upgrade checks complete for hosts without errors hosts: oo_masters_to_config:oo_etcd_to_config @@ -114,8 +114,6 @@ pre_upgrade_complete: True - import_playbook: ../upgrade_control_plane.yml - vars: - openshift_release: '3.9' # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode @@ -124,14 +122,16 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift_service_type }}-master-controllers - systemd: + - name: Restart master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift_service_type }}-master-controllers - systemd: + state: restart + when: openshift.common.rolling_restart_mode == 'service' + - name: Re-enable master controllers to force new leader election mode + service: name: "{{ openshift_service_type }}-master-controllers" - state: started + enabled: true + when: openshift.common.rolling_restart_mode == 'system' - import_playbook: ../post_control_plane.yml diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index 0a730a88a..81f4dd183 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -16,8 +16,9 @@ - iproute - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}" + - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}" - yum-utils + when: item != '' register: result until: result is succeeded diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index 8997680f9..fea588260 100644 --- a/playbooks/openshift-etcd/private/upgrade_main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -1,4 +1,37 @@ --- +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN + hosts: oo_etcd_to_config + tasks: + - slurp: + src: /etc/etcd/server.crt + register: etcd_serving_cert + - set_fact: + __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: redeploy-certificates.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}" + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: ../../openshift-master/private/restart.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + # For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to # upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius # task for RHEL and CENTOS it's simply not possible in Fedora unless you've diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml new file mode 100644 index 000000000..2453329dd --- /dev/null +++ b/playbooks/openshift-hosted/deploy_registry.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_registry.yml diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml new file mode 100644 index 000000000..e832eeeea --- /dev/null +++ b/playbooks/openshift-hosted/deploy_router.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_router.yml diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index b817221b8..d88209593 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -88,8 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift_client_binary }} deploy dc/docker-registry - --latest + {{ openshift_client_binary }} rollout latest dc/docker-registry --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index 0df748f47..952a5f4ee 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -129,8 +129,7 @@ - name: Redeploy router command: > - {{ openshift_client_binary }} deploy dc/router - --latest + {{ openshift_client_binary }} rollout latest dc/router --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml new file mode 100644 index 000000000..cf2c282e3 --- /dev/null +++ b/playbooks/openshift-master/private/tasks/restart_services.yml @@ -0,0 +1,4 @@ +--- +- import_role: + name: openshift_master + tasks_from: restart.yml diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index 7249ced70..7371bd7ac 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,6 +16,7 @@ until: not (l_docker_restart_docker_in_node_result is failed) retries: 3 delay: 30 + when: openshift_node_restart_docker_required | default(True) - name: Restart containerized services service: diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml index 8b7272485..cdf816fbf 100644 --- a/playbooks/openshift-node/redeploy-certificates.yml +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -4,3 +4,5 @@ - import_playbook: private/redeploy-certificates.yml - import_playbook: private/restart.yml + vars: + openshift_node_restart_docker_required: False diff --git a/roles/container_runtime/templates/crio-network.j2 b/roles/container_runtime/templates/crio-network.j2 index 763be97d7..ae8a506fe 100644 --- a/roles/container_runtime/templates/crio-network.j2 +++ b/roles/container_runtime/templates/crio-network.j2 @@ -1,9 +1,9 @@ {% if 'http_proxy' in openshift.common %} -HTTP_PROXY={{ openshift.common.http_proxy }} +export HTTP_PROXY={{ openshift.common.http_proxy }} {% endif %} {% if 'https_proxy' in openshift.common %} -HTTPS_PROXY={{ openshift.common.https_proxy }} +export HTTPS_PROXY={{ openshift.common.https_proxy }} {% endif %} {% if 'no_proxy' in openshift.common %} -NO_PROXY={{ openshift.common.no_proxy }} +export NO_PROXY={{ openshift.common.no_proxy }} {% endif %} diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 05b2763d5..bfed58011 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1138,7 +1138,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 324f52689..c78e379d5 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1116,7 +1116,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 152f270ab..b1b2cb5b5 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 3082f5890..2773201d7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1110,7 +1110,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 92515889b..25cbed8b7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index fe565987c..e26214316 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1228,7 +1228,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 44de29592..62fca19e5 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1253,7 +1253,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 9761b4b4e..0c4bfa01f 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1102,7 +1102,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 047edffbb..36e6111eb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1108,7 +1108,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 0cea07256..ab4f153c7 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1152,7 +1152,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 1f52fba40..f334ddaa4 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1119,7 +1119,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 72023eaf7..7e9078339 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1092,7 +1092,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 94b08d9ce..e71e2eb5c 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1111,7 +1111,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index ad837fdb5..ac3279ef8 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1128,7 +1128,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 892546e56..ca53c4c97 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1131,7 +1131,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 38df585f0..877c78d93 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1063,7 +1063,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 70632f86d..507170424 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1120,7 +1120,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 4eee748d7..347e879ca 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1117,7 +1117,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 2e73a7645..93c96b817 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index e003770d8..3369cf134 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1168,7 +1168,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index c142f1f43..1b6202a26 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1106,7 +1106,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 62bda33ad..732299e48 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index c541e1bbd..a6cf764ff 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1171,7 +1171,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 646a39224..90d514292 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 99a8e8f3d..0d9acac0e 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 7e7d0fa60..6fb5a94e9 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1122,7 +1122,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 7bbe38819..feb69348b 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 63adbd6ac..0f024c048 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1076,7 +1076,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 3c07f8d4b..6f409f979 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1153,7 +1153,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1fb32164e..9a4ce3509 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -314,7 +314,7 @@ class Utils(object): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 8a208a9c2..3d966e34a 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -320,3 +320,8 @@ openshift_aws_masters_groups: masters,etcd,nodes # By default, don't delete things like the shared IAM instance # profile and uploaded ssh keys openshift_aws_enable_uninstall_shared_objects: False +# S3 bucket names are global by default and can take minutes/hours for the +# name to become available for re-use (assuming someone doesn't take the +# name in the meantime). Default to just emptying the contents of the S3 +# bucket if we've been asked to create the bucket during provisioning. +openshift_aws_really_delete_s3_bucket: False diff --git a/roles/openshift_aws/tasks/uninstall_s3.yml b/roles/openshift_aws/tasks/uninstall_s3.yml new file mode 100644 index 000000000..0b08cbeed --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_s3.yml @@ -0,0 +1,26 @@ +--- +- name: empty S3 bucket + block: + - name: get S3 object list + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: list + region: "{{ openshift_aws_region }}" + register: s3_out + + - name: delete S3 objects + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: delobj + object: "{{ item }}" + with_items: "{{ s3_out.s3_keys }}" + when: openshift_aws_create_s3 | bool + +- name: delete S3 bucket + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: delete + region: "{{ openshift_aws_region }}" + when: + - openshift_aws_create_s3 | bool + - openshift_aws_really_delete_s3_bucket | bool diff --git a/roles/openshift_certificate_expiry/examples/playbooks b/roles/openshift_certificate_expiry/examples/playbooks index 586afb0d5..751c3d14e 120000 --- a/roles/openshift_certificate_expiry/examples/playbooks +++ b/roles/openshift_certificate_expiry/examples/playbooks @@ -1 +1 @@ -../../../playbooks/certificate_expiry
\ No newline at end of file +../../../playbooks/openshift-checks/certificate_expiry
\ No newline at end of file diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml new file mode 100644 index 000000000..37cbf5603 --- /dev/null +++ b/roles/openshift_cloud_provider/defaults/main.yml @@ -0,0 +1,4 @@ +--- +openshift_gcp_project: '' +openshift_gcp_prefix: '' +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index 395bd304c..9e1c31b1d 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -1,4 +1,12 @@ --- +- name: check variables are passed + fail: + msg: "Ensure correct variables are defined for gcp. {{ item }}" + when: item == '' + with_items: + - "{{ openshift_gcp_project }}" + - "{{ openshift_gcp_prefix }}" + # Work around ini_file create option in 2.2 which defaults to no - name: Create cloud config file file: @@ -16,8 +24,8 @@ option: "{{ item.key }}" value: "{{ item.value }}" with_items: - - { key: 'project-id', value: '{{ openshift_gcp_project }}' } - - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } - - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } - - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } - - { key: 'multizone', value: 'false' } + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml index ebe5671d2..bb9803c2b 100644 --- a/roles/openshift_daemonset_config/defaults/main.yml +++ b/roles/openshift_daemonset_config/defaults/main.yml @@ -1,16 +1,19 @@ --- -openshift_daemonset_config_namespace: openshift-node -openshift_daemonset_config_daemonset_name: ops-node-config -openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}" +openshift_daemonset_config_image: "centos:7" +openshift_daemonset_config_monitoring_image: "openshifttools/oso-centos7-host-monitoring:latest" +openshift_daemonset_config_namespace: openshift-config +openshift_daemonset_config_daemonset_name: node-config +openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}-configmap" +openshift_daemonset_config_monitoring_pos: "false" openshift_daemonset_config_node_selector: config: config -openshift_daemonset_config_sa_name: ops +openshift_daemonset_config_sa_name: configurator openshift_daemonset_config_configmap_files: {} openshift_daemonset_config_configmap_literals: {} openshift_daemonset_config_monitoring: False openshift_daemonset_config_interval: 300 openshift_daemonset_config_script: config.sh -openshift_daemonset_config_secret_name: operations-config-secret +openshift_daemonset_config_secret_name: "{{ openshift_daemonset_config_daemonset_name }}-secret" openshift_daemonset_config_secrets: {} openshift_daemonset_config_runasuser: 0 openshift_daemonset_config_privileged: True diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml index 450cc9dca..f8f42b771 100644 --- a/roles/openshift_daemonset_config/tasks/main.yml +++ b/roles/openshift_daemonset_config/tasks/main.yml @@ -1,4 +1,9 @@ --- +- name: create the namespace + oc_project: + state: present + name: "{{ openshift_daemonset_config_namespace }}" + - name: add a sa oc_serviceaccount: name: "{{ openshift_daemonset_config_sa_name }}" @@ -25,11 +30,6 @@ dest: "{{ item.value }}" with_dict: "{{ openshift_daemonset_config_configmap_files }}" -- name: create the namespace - oc_project: - state: present - name: "{{ openshift_daemonset_config_namespace }}" - - name: lay down secrets oc_secret: state: present @@ -39,6 +39,7 @@ contents: "{{ openshift_daemonset_config_secrets }}" when: - openshift_daemonset_config_secrets != {} + register: secout - name: create the configmap oc_configmap: @@ -47,6 +48,7 @@ namespace: "{{ openshift_daemonset_config_namespace }}" from_literal: "{{ openshift_daemonset_config_configmap_literals }}" from_file: "{{ openshift_daemonset_config_configmap_files }}" + register: cmout - name: deploy daemonset oc_obj: @@ -56,3 +58,4 @@ kind: daemonset files: - /tmp/daemonset.yml + force: "{{ True if cmout.changed or secout.changed else False | bool }}" diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 index 9792f6d16..02cd5bcfd 100644 --- a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 +++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 @@ -33,7 +33,7 @@ spec: hostIPC: true containers: - name: config - image: centos:7 + image: "{{ openshift_daemonset_config_image }}" env: - name: RESYNC_INTERVAL value: "{{ openshift_daemonset_config_interval }}" @@ -50,8 +50,8 @@ spec: sh /opt/config/{{ openshift_daemonset_config_script }} # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again - echo "Success, sleeping for ${RESYNC_INTERVAL}s" - exec sleep ${RESYNC_INTERVAL} + echo "Success, sleeping for ${RESYNC_INTERVAL}s. Date: $(date)" + sleep ${RESYNC_INTERVAL} # Return to perform the config done @@ -68,6 +68,8 @@ spec: # Our node configuration - mountPath: /opt/config name: config + - mountPath: /opt/tmp_shared_config + name: tmp-shared-dir {% if openshift_daemonset_config_secrets != {} %} # Our delivered secrets - mountPath: /opt/secrets @@ -79,12 +81,14 @@ spec: memory: {{ openshift_daemonset_config_resources.memory }} {% if openshift_daemonset_config_monitoring %} - name: monitoring - image: openshifttools/oso-centos7-host-monitoring:latest + image: "{{ openshift_daemonset_config_monitoring_image }}" + env: + - name: OO_PAUSE_ON_START + value: "{{ openshift_daemonset_config_monitoring_pos }}" securityContext: # Must be root to read content runAsUser: 0 privileged: true - volumeMounts: - mountPath: /host name: host @@ -118,17 +122,23 @@ spec: - mountPath: /host/var/cache/yum subPath: var/cache/yum name: host - - mountPath: /container_setup/monitoring-config.yml - subPath: monitoring-config.yaml - name: config + readOnly: true + - mountPath: /container_setup + name: tmp-shared-dir - mountPath: /opt/config name: config +{% if openshift_daemonset_config_secrets != {} %} + - mountPath: /opt/secrets + name: secrets +{% endif %} resources: requests: cpu: 10m memory: 10Mi {% endif %} volumes: + - name: tmp-shared-dir + emptyDir: {} - name: config configMap: name: {{ openshift_daemonset_config_configmap_name }} diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 7ca122fc9..687d60171 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -1,4 +1,7 @@ --- +# Must not be blank if you're using vsphere +openshift_cloudprovider_vsphere_datacenter: '' + openshift_storageclass_defaults: aws: provisioner: aws-ebs diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index b39c44b01..7223a5afe 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" + src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0 %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index ff5ad1045..b731d93a0 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -137,6 +137,16 @@ - "prometheus_out.stderr | length > 0" - "'already exists' not in prometheus_out.stderr" +- set_fact: + _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}" + +- template: + src: passwd.j2 + dest: "{{mktemp.stdout}}/passwd.yml" + vars: + logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}" + logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}" + # View role and binding - name: Generate logging-elasticsearch-view-role template: @@ -255,6 +265,8 @@ path: "{{ generated_certs_dir }}/ca.crt" - name: admin.jks path: "{{ generated_certs_dir }}/system.admin.jks" + - name: passwd.yml + path: "{{mktemp.stdout}}/passwd.yml" # services - name: Set logging-{{ es_component }}-cluster service @@ -391,6 +403,7 @@ es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 + basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}" - name: Set ES dc oc_obj: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 4b189f255..b1d6a4489 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -51,6 +51,7 @@ spec: - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -basic-auth-password={{ basic_auth_passwd }} - -upstream=https://localhost:9200 - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2 new file mode 100644 index 000000000..a22151eef --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2 @@ -0,0 +1,2 @@ +"{{logging_user_name}}": + passwd: "{{logging_user_passwd}}" diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml index 00e04b9f2..b7a89a723 100644 --- a/roles/openshift_manage_node/defaults/main.yml +++ b/roles/openshift_manage_node/defaults/main.yml @@ -1,9 +1,5 @@ --- # openshift_manage_node_is_master is set at the play level. openshift_manage_node_is_master: False - -# Default is to be schedulable except for master nodes. -l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" - openshift_master_node_labels: node-role.kubernetes.io/master: 'true' diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml index 4f00351b5..e5753d185 100644 --- a/roles/openshift_manage_node/tasks/config.yml +++ b/roles/openshift_manage_node/tasks/config.yml @@ -2,7 +2,7 @@ - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" - schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" + schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}" retries: 10 delay: 5 register: node_schedulable @@ -23,5 +23,5 @@ delegate_to: "{{ openshift_master_host }}" vars: l_node_labels: "{{ openshift_node_labels | default({}) }}" - l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}" + l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}" l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}" diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 41f2ee2a5..680e4a4ff 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -137,17 +137,8 @@ - item.clientCA | default('') != '' with_items: "{{ openshift.master.identity_providers }}" -# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. -# The template file will stomp any other settings made. -- block: - - name: check whether our docker-registry setting exists in the env file - command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master" - failed_when: false - changed_when: false - register: l_already_set - - - set_fact: - openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +- name: Include push_via_dns.yml + include_tasks: push_via_dns.yml - name: Set fact of all etcd host IPs openshift_facts: diff --git a/roles/openshift_master/tasks/push_via_dns.yml b/roles/openshift_master/tasks/push_via_dns.yml new file mode 100644 index 000000000..c5876130a --- /dev/null +++ b/roles/openshift_master/tasks/push_via_dns.yml @@ -0,0 +1,13 @@ +--- +# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. +# The template file will stomp any other settings made. +- when: openshift_push_via_dns is not defined + block: + - name: check whether our docker-registry setting exists in the env file + shell: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master*" + failed_when: false + changed_when: false + register: l_already_set + + - set_fact: + openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 870ab7c57..aeff64983 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,6 +1,8 @@ --- # systemd_units.yml is included both in the openshift_master role and in the upgrade # playbooks. +- name: include push_via_dns.yml tasks + include_tasks: push_via_dns.yml - name: Set HA Service Info for containerized installs set_fact: @@ -9,7 +11,8 @@ when: - openshift_is_containerized | bool -- include_tasks: registry_auth.yml +- name: include registry_auth tasks + include_tasks: registry_auth.yml - name: Disable the legacy master service if it exists systemd: diff --git a/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml new file mode 100644 index 000000000..6aa48f9c3 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml @@ -0,0 +1,46 @@ +--- +- name: Check to see if PVC already exists + oc_obj: + state: list + kind: pvc + name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + namespace: "{{ openshift_metrics_project }}" + register: _metrics_pvc + +# _metrics_pvc.results.results | length > 0 returns a false positive +# so we check for the presence of 'stderr' to determine if the obj exists or not +# the RC for existing and not existing is both 0 +- when: + - _metrics_pvc.results.stderr is defined + block: + - name: generate hawkular-cassandra persistent volume claims + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" + vars: + obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + labels: + metrics-infra: hawkular-cassandra + access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" + size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" + when: + - openshift_metrics_cassandra_storage_type != 'emptydir' + - openshift_metrics_cassandra_storage_type != 'dynamic' + changed_when: false + + - name: generate hawkular-cassandra persistent volume claims (dynamic) + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" + vars: + obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + labels: + metrics-infra: hawkular-cassandra + access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" + size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" + when: openshift_metrics_cassandra_storage_type == 'dynamic' + changed_when: false diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 9026cc897..158e596ec 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -25,36 +25,7 @@ - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics" when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" -- name: generate hawkular-cassandra persistent volume claims - template: - src: pvc.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" - vars: - obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" - labels: - metrics-infra: hawkular-cassandra - access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" - size: "{{ openshift_metrics_cassandra_pvc_size }}" - pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" - storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" - with_sequence: count={{ openshift_metrics_cassandra_replicas }} - when: - - openshift_metrics_cassandra_storage_type != 'emptydir' - - openshift_metrics_cassandra_storage_type != 'dynamic' - changed_when: false - -- name: generate hawkular-cassandra persistent volume claims (dynamic) - template: - src: pvc.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" - vars: - obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" - labels: - metrics-infra: hawkular-cassandra - access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" - size: "{{ openshift_metrics_cassandra_pvc_size }}" - pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" - storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" +- include_tasks: generate_cassandra_pvcs.yaml with_sequence: count={{ openshift_metrics_cassandra_replicas }} - when: openshift_metrics_cassandra_storage_type == 'dynamic' - changed_when: false + loop_control: + loop_var: metrics_pvc_index diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index 72415f9a6..e31433dbc 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -15,6 +15,7 @@ name: "{{ item }}" state: started enabled: True + when: not openshift_is_atomic | bool with_items: - multipathd - rpcbind diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 77be1f2b1..2bdb81632 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -93,3 +93,8 @@ openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size openshift_openstack_etcd_volume_size: 2 openshift_openstack_lb_volume_size: 5 openshift_openstack_ephemeral_volumes: false + + +# cloud-config +openshift_openstack_disable_root: true +openshift_openstack_user: openshift diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 index eb65f7cec..ccaa5d464 100644 --- a/roles/openshift_openstack/templates/user_data.j2 +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -1,9 +1,9 @@ #cloud-config -disable_root: true +disable_root: {{ openshift_openstack_disable_root }} system_info: default_user: - name: openshift + name: {{ openshift_openstack_user }} sudo: ["ALL=(ALL) NOPASSWD: ALL"] write_files: diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index c0abd483b..e86de1eab 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -219,7 +219,7 @@ spec: - name: alertmanager args: - - -config.file=/etc/alertmanager/alertmanager.yml + - --config.file=/etc/alertmanager/alertmanager.yml image: "{{ l_openshift_prometheus_alertmanager_image_prefix }}prometheus-alertmanager:{{ l_openshift_prometheus_alertmanager_image_version }}" imagePullPolicy: IfNotPresent resources: diff --git a/roles/openshift_prometheus/vars/default_images.yml b/roles/openshift_prometheus/vars/default_images.yml index 31f6c1bb1..2c46b5700 100644 --- a/roles/openshift_prometheus/vars/default_images.yml +++ b/roles/openshift_prometheus/vars/default_images.yml @@ -8,5 +8,5 @@ l_openshift_prometheus_alertbuffer_image_prefix: "{{ openshift_prometheus_alertb # image version defaults l_openshift_prometheus_image_version: "{{ openshift_prometheus_image_version | default('v2.0.0') }}" l_openshift_prometheus_proxy_image_version: "{{ openshift_prometheus_proxy_image_version | default('v1.0.0') }}" -l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.9.1') }}" +l_openshift_prometheus_alertmanager_image_version: "{{ openshift_prometheus_alertmanager_image_version | default('v0.13.0') }}" l_openshift_prometheus_alertbuffer_image_version: "{{ openshift_prometheus_alertbuffer_image_version | default('v0.0.2') }}" diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 62d460272..08dfd8284 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -61,3 +61,17 @@ when: - template_service_broker_remove | default(false) | bool - template_service_broker_install | default(true) | bool + +- name: Ensure that all requires vsphere configuration variables are set + fail: + msg: > + When the vSphere cloud provider is configured you must define all of these variables: + openshift_cloudprovider_vsphere_username, openshift_cloudprovider_vsphere_password, + openshift_cloudprovider_vsphere_host, openshift_cloudprovider_vsphere_datacenter, + openshift_cloudprovider_vsphere_datastore + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind == 'vsphere' + - ( openshift_cloudprovider_vsphere_username is undefined or openshift_cloudprovider_vsphere_password is undefined or + openshift_cloudprovider_vsphere_host is undefined or openshift_cloudprovider_vsphere_datacenter is undefined or + openshift_cloudprovider_vsphere_datastore is undefined ) diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a5fdae803..e6e261b52 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -313,7 +313,10 @@ - glusterfs_storageclass or glusterfs_s3_deploy - include_tasks: glusterblock_deploy.yml - when: glusterfs_block_deploy + when: + - glusterfs_block_deploy + #TODO: Remove this when multipathd will be available on atomic + - not openshift_is_atomic | bool - block: - name: Create heketi block secret diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index befacb04f..10c29fd37 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -48,7 +48,7 @@ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}" glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}" glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}" - glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" + glusterfs_nodes: "{% if groups.glusterfs_registry is defined and groups['glusterfs_registry'] | length > 0 %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined and groups['glusterfs'] | length > 0 %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" - include_tasks: glusterfs_common.yml when: diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml index e01a56dc1..b0d155c2c 100644 --- a/roles/openshift_version/tasks/first_master.yml +++ b/roles/openshift_version/tasks/first_master.yml @@ -19,7 +19,7 @@ - set_fact: openshift_pkg_version: -{{ openshift_version }} when: - - openshift_pkg_version is not defined + - openshift_pkg_version is not defined or openshift_pkg_version == "" - openshift_upgrade_target is not defined - block: @@ -28,5 +28,5 @@ - set_fact: openshift_image_tag: v{{ openshift_version }} when: > - openshift_image_tag is not defined + openshift_image_tag is not defined or openshift_image_tag == "" or l_force_image_tag_to_version | bool diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index 3ed1d2cfe..9eb38cb2b 100644 --- a/roles/openshift_version/tasks/first_master_containerized_version.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml @@ -6,6 +6,7 @@ openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}" when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_version is not defined - not (openshift_version_reinit | default(false)) diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml index 5d92f90c6..85e440513 100644 --- a/roles/openshift_version/tasks/first_master_rpm_version.yml +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -5,6 +5,7 @@ openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_version is not defined - not (openshift_version_reinit | default(false)) |