diff options
37 files changed, 253 insertions, 146 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index b2155c30f..ef4d512ac 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.7.0-0.127.0 ./ +3.7.0-0.128.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index b5673cda1..da7210fd0 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@ Name: openshift-ansible Version: 3.7.0 -Release: 0.127.0%{?dist} +Release: 0.128.0%{?dist} Summary: Openshift and Atomic Enterprise Ansible License: ASL 2.0 URL: https://github.com/openshift/openshift-ansible @@ -280,6 +280,60 @@ Atomic OpenShift Utilities includes %changelog +* Tue Sep 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.128.0 +- check if the storage backend is set to etcd3 before upgrading to 3.7 + (jchaloup@redhat.com) +- crio: detect the correct version of the images (gscrivan@redhat.com) +- crio: set the correct image name with OSE (gscrivan@redhat.com) +- resolve #5428: python-dbus not found (ltheisen@mitre.org) +- Updating default behavior for installing metrics and logging. Separating out + uninstall to own variable (ewolinet@redhat.com) +- Add booleans to prevent unwanted install of nuage roles. (mgugino@redhat.com) +- Set master facts prior to adding new etcd client urls to master config. + (abutcher@redhat.com) +- Remove debugging statements and pause module (sdodson@redhat.com) +- Fix registry_auth logic for upgrades (mgugino@redhat.com) +- crio: skip installation on lbs and nfs nodes (gscrivan@redhat.com) +- Remove override default.py callback plugin (rteague@redhat.com) +- consolidate etcd_migrate role (jchaloup@redhat.com) +- Add python3-PyYAML for Fedora installs (mgugino@redhat.com) +- Do a full stop/start when etcd certificates had expired. + (abutcher@redhat.com) +- Move additional/block/insecure registires to /etc/containers/registries.conf + (mgugino@redhat.com) +- Improve CA playbook restart logic and skip restarts when related services had + previously expired certificates. (abutcher@redhat.com) +- health checks: add diagnostics check (lmeyer@redhat.com) +- Remove unused openshift_hosted_logging role (mgugino@redhat.com) +- consolidate etcd_upgrade role (jchaloup@redhat.com) +- disable excluders after all pre-checks (jchaloup@redhat.com) +- Fixed AnsibleUnsafeText by converting to int (edu@redhat.com) +- Ensure that hostname is lowercase (sdodson@redhat.com) +- Fix deprecated subscription-manager command + (bliemli@users.noreply.github.com) +- Returning actual results of yedit query. Empty list was returning empty + dict. (kwoodson@redhat.com) +- Default openshift_pkg_version to full version-release during upgrades + (sdodson@redhat.com) +- Creating structure to warn for use of deprecated variables and set them in a + single location before they are no longer honored (ewolinet@redhat.com) +- Remove default value for oreg_url (mgugino@redhat.com) +- Creating initial tsb role to consume and apply templates provided for tsb + (ewolinet@redhat.com) +- Set network facts using first master's config during scaleup. + (abutcher@redhat.com) +- Use 3.7 RPM repo (ahaile@redhat.com) +- Changes for Nuage atomic ansible install + (rohan.s.parulekar@nuagenetworks.net) +- Add 3.7 scheduler predicates (jsafrane@redhat.com) +- Consolidate etcd certs roles (jchaloup@redhat.com) +- GlusterFS can now be run more than once. Ability to add devices to nodes + (ttindell@isenpai.com) +- Ensure valid search on resolv.conf (mateus.caruccio@getupcloud.com) +- move (and rename) get_dns_ip filter into openshift_node_facts + (jdiaz@redhat.com) +- cri-o: Allow full image override (smilner@redhat.com) + * Thu Sep 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.127.0 - Updating to always configure api aggregation with installation (ewolinet@redhat.com) diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 39e82498d..a3446ef84 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -98,7 +98,11 @@ serial: 1 tasks: - include_role: - name: etcd_upgrade + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' - not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml index 831ca8f57..e5e895775 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -5,13 +5,14 @@ - name: Upgrade containerized hosts to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: image - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml index 2e79451e0..a2a26bad4 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -5,13 +5,14 @@ - name: Upgrade to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: rpm - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "host" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_rpm + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') - ansible_distribution == 'RedHat' diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml new file mode 100644 index 000000000..f75ae3b15 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml @@ -0,0 +1,22 @@ +--- +- name: Verify all masters has etcd3 storage backend set + hosts: oo_masters_to_config + gather_facts: no + roles: + - lib_utils + tasks: + - name: Read master storage backend setting + yedit: + state: list + src: /etc/origin/master/master-config.yaml + key: kubernetesMasterConfig.apiServerArguments.storage-backend + register: _storage_backend + + - fail: + msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" + when: + # assuming the master-config.yml is properly configured, i.e. the value is a list + - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" + + - debug: + msg: "Storage backend is set to etcd3" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 3549cf6c3..6cd3bd3e5 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -29,6 +29,10 @@ tags: - pre_upgrade +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + - name: Update repos on control plane hosts hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tags: diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 81f3ee9e4..274fd8603 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -4,3 +4,17 @@ docker_cli_auth_config_path: '/root/.docker' # oreg_url is defined by user input. oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}" oreg_auth_credentials_replace: False + +openshift_docker_additional_registries: [] +openshift_docker_blocked_registries: [] +openshift_docker_insecure_registries: [] + +# The l2_docker_* variables convert csv strings to lists, if +# necessary. These variables should be used in place of their respective +# openshift_docker_* counterparts to ensure the properly formatted lists are +# utilized. +l2_docker_additional_registries: "{% if openshift_docker_additional_registries is string %}{% if openshift_docker_additional_registries == '' %}[]{% elif ',' in openshift_docker_additional_registries %}{{ openshift_docker_additional_registries.split(',') | list }}{% else %}{{ [ openshift_docker_additional_registries ] }}{% endif %}{% else %}{{ openshift_docker_additional_registries }}{% endif %}" +l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}" +l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}" + +containers_registries_conf_path: /etc/containers/registries.conf diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 16aea5067..0c5621259 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -53,22 +53,22 @@ - stat: path=/etc/sysconfig/docker register: docker_check -- name: Set registry params +- name: Comment old registry params in /etc/sysconfig/docker lineinfile: dest: /etc/sysconfig/docker regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg + line: "#{{ item.reg_conf_var }}=''# Moved to {{ containers_registries_conf_path }}" with_items: - reg_conf_var: ADD_REGISTRY - reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" - reg_flag: --add-registry - reg_conf_var: BLOCK_REGISTRY - reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" - reg_flag: --block-registry - reg_conf_var: INSECURE_REGISTRY - reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" - reg_flag: --insecure-registry + notify: + - restart docker + +- name: Place additional/blocked/insecure registies in /etc/containers/registries.conf + template: + dest: "{{ containers_registries_conf_path }}" + src: registries.conf notify: - restart docker diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml index e6fc2db06..5b02b72be 100644 --- a/roles/docker/tasks/systemcontainer_crio.yml +++ b/roles/docker/tasks/systemcontainer_crio.yml @@ -104,7 +104,7 @@ - name: Use RHEL based image when distribution is Red Hat set_fact: - l_crio_image_prepend: "registry.access.redhat.com" + l_crio_image_prepend: "registry.access.redhat.com/openshift3" l_crio_image_name: "cri-o" when: ansible_distribution == "RedHat" diff --git a/roles/docker/templates/registries.conf b/roles/docker/templates/registries.conf new file mode 100644 index 000000000..c55dbd84f --- /dev/null +++ b/roles/docker/templates/registries.conf @@ -0,0 +1,46 @@ +# {{ ansible_managed }} +# This is a system-wide configuration file used to +# keep track of registries for various container backends. +# It adheres to YAML format and does not support recursive +# lists of registries. + +# The default location for this configuration file is /etc/containers/registries.conf. + +# The only valid categories are: 'registries', 'insecure_registies', +# and 'block_registries'. + + +#registries: +# - registry.access.redhat.com + +{% if l2_docker_additional_registries %} +registries: +{% for reg in l2_docker_additional_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to access insecure registries, uncomment the section below +# and add the registries fully-qualified name. An insecure registry is one +# that does not have a valid SSL certificate or only does HTTP. +#insecure_registries: +# - + +{% if l2_docker_insecure_registries %} +insecure_registries: +{% for reg in l2_docker_insecure_registries %} + - {{ reg }} +{% endfor %} +{% endif %} + +# If you need to block pull access from a registry, uncomment the section below +# and add the registries fully-qualified name. +#block_registries: +# - + +{% if l2_docker_blocked_registries %} +block_registries: +{% for reg in l2_docker_blocked_registries %} + - {{ reg }} +{% endfor %} +{% endif %} diff --git a/roles/etcd_upgrade/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade/upgrade_image.yml index 136ec1142..cea95a1b3 100644 --- a/roles/etcd_upgrade/tasks/upgrade_image.yml +++ b/roles/etcd/tasks/upgrade/upgrade_image.yml @@ -29,8 +29,15 @@ ## TODO: probably should just move this into the backup playbooks, also this ## will fail on atomic host. We need to revisit how to do etcd backups there as ## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) +- name: Detecting Atomic Host Operating System + stat: + path: /run/ostree-booted + register: l_ostree_booted + - name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest + package: + name: etcd + state: latest when: not l_ostree_booted.stat.exists | bool - name: Verify cluster is healthy diff --git a/roles/etcd_upgrade/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade/upgrade_rpm.yml index 324b69605..324b69605 100644 --- a/roles/etcd_upgrade/tasks/upgrade_rpm.yml +++ b/roles/etcd/tasks/upgrade/upgrade_rpm.yml diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml new file mode 100644 index 000000000..9e69027eb --- /dev/null +++ b/roles/etcd/tasks/upgrade_image.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_image.yml diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml new file mode 100644 index 000000000..29603d2b6 --- /dev/null +++ b/roles/etcd/tasks/upgrade_rpm.yml @@ -0,0 +1,2 @@ +--- +- include: upgrade/upgrade_rpm.yml diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index b67411f40..6705e1ac5 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -73,3 +73,6 @@ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_clien etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" + +# etcd_peer needs to be set by a role caller +etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/etcd_upgrade/defaults/main.yml b/roles/etcd_upgrade/defaults/main.yml deleted file mode 100644 index 61bbba225..000000000 --- a/roles/etcd_upgrade/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -r_etcd_upgrade_action: upgrade -r_etcd_upgrade_mechanism: rpm diff --git a/roles/etcd_upgrade/meta/main.yml b/roles/etcd_upgrade/meta/main.yml deleted file mode 100644 index afdb0267f..000000000 --- a/roles/etcd_upgrade/meta/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -galaxy_info: - author: Jan Chaloupka - description: - company: Red Hat, Inc. - license: Apache License, Version 2.0 - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - categories: - - cloud - - system -dependencies: -- role: etcd_common - r_etcd_common_embedded_etcd: "{{ r_etcd_upgrade_embedded_etcd }}" diff --git a/roles/etcd_upgrade/tasks/main.yml b/roles/etcd_upgrade/tasks/main.yml deleted file mode 100644 index 129c69d6b..000000000 --- a/roles/etcd_upgrade/tasks/main.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# INPUT r_etcd_upgrade_action -- name: Fail if invalid etcd_upgrade_action provided - fail: - msg: "etcd_upgrade role can only be called with 'upgrade'" - when: - - r_etcd_upgrade_action not in ['upgrade'] - -- name: Detecting Atomic Host Operating System - stat: - path: /run/ostree-booted - register: l_ostree_booted - -- include: "{{ r_etcd_upgrade_action }}.yml" diff --git a/roles/etcd_upgrade/tasks/upgrade.yml b/roles/etcd_upgrade/tasks/upgrade.yml deleted file mode 100644 index 420c9638e..000000000 --- a/roles/etcd_upgrade/tasks/upgrade.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# INPUT r_etcd_upgrade_version -# INPUT r_etcd_upgrade_mechanism -- name: Failt if r_etcd_upgrade_mechanism is not set during upgrade - fail: - msg: "r_etcd_upgrade_mechanism can be only set to 'rpm' or 'image'" - when: - - r_etcd_upgrade_mechanism not in ['rpm', 'image'] - -- name: "Upgrade {{ r_etcd_upgrade_mechanism }} based etcd" - include: upgrade_{{ r_etcd_upgrade_mechanism }}.yml diff --git a/roles/etcd_upgrade/vars/main.yml b/roles/etcd_upgrade/vars/main.yml deleted file mode 100644 index 5ed919d42..000000000 --- a/roles/etcd_upgrade/vars/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# EXPECTS etcd_peer -etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}" diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 612d689c2..7be5d6743 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -62,16 +62,14 @@ spec: selector: matchLabels: k8s-app: nuage-master-config + updateStrategy: + type: RollingUpdate template: metadata: labels: k8s-app: nuage-master-config spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists nodeSelector: install-monitor: "true" containers: diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 02e9a1563..6a1267d94 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -23,7 +23,7 @@ data: # IP address and port number of master API server masterApiServer: {{ api_server_url }} # REST server URL - nuageMonRestServer: {{ nuage_mon_rest_server_url }} + nuageMonRestServer: https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }} # Bridge name for the docker bridge dockerBridgeName: docker0 # Certificate for connecting to the openshift monitor REST api @@ -32,11 +32,6 @@ data: nuageMonClientKey: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonClient.key # CA certificate for verifying the master's rest server nuageMonServerCA: {{ nuage_node_config_dsets_mount_dir }}/vsp-openshift/nuageMonCA.crt - # Nuage vport mtu size - interfaceMTU: {{ nuage_vport_mtu }} - # Logging level for the plugin - # allowed options are: "dbg", "info", "warn", "err", "emer", "off" - logLevel: 3 # This will generate the required Nuage CNI yaml configuration cni_yaml_config: | @@ -72,10 +67,6 @@ spec: k8s-app: nuage-cni-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage CNI binaries # and CNI network config file on each node. @@ -157,10 +148,6 @@ spec: k8s-app: nuage-vrs-ds spec: hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - operator: Exists containers: # This container installs Nuage VRS running as a # container on each worker node diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml index d8bfca62a..fdf01b7c2 100644 --- a/roles/nuage_node/vars/main.yaml +++ b/roles/nuage_node/vars/main.yaml @@ -24,4 +24,4 @@ cni_bin_dir: "/opt/cni/bin/" nuage_plugin_crt_dir: /usr/share/vsp-openshift openshift_atomic_node_config_file: /etc/sysconfig/{{ openshift.common.service_type }}-node -nuage_atomic_docker_additional_mounts: "DOCKER_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" +nuage_atomic_docker_additional_mounts: "NUAGE_ADDTL_BIND_MOUNTS=-v /var/usr/share/vsp-openshift:/var/usr/share/vsp-openshift -v /etc/default:/etc/default -v /var/run:/var/run -v /opt/cni/bin:/opt/cni/bin -v /etc/cni/net.d:/etc/cni/net.d" diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index f283261c4..de3d19858 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -62,7 +62,6 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'. - `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'. -- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'. - `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver. - `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false. - `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all']. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 6699e2062..db4262fed 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -74,7 +74,6 @@ openshift_logging_kibana_ops_ca: "" openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'} openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi -openshift_logging_fluentd_es_copy: false openshift_logging_fluentd_journal_source: "" openshift_logging_fluentd_journal_read_from_head: "" openshift_logging_fluentd_hosts: ['--all'] diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml index 30d3d854a..82326bdd1 100644 --- a/roles/openshift_logging_fluentd/defaults/main.yml +++ b/roles/openshift_logging_fluentd/defaults/main.yml @@ -50,8 +50,6 @@ openshift_logging_fluentd_aggregating_key_path: none openshift_logging_fluentd_aggregating_passphrase: none ### Deprecating in 3.6 -openshift_logging_fluentd_es_copy: false - # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly #fluentd_config_contents: #fluentd_throttle_contents: diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 74b4d7db4..37960afd1 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -1,5 +1,8 @@ --- - fail: + msg: The ES_COPY feature is no longer supported. Please remove the variable from your inventory + when: openshift_logging_fluentd_es_copy is defined +- fail: msg: Only one Fluentd nodeselector key pair should be provided when: openshift_logging_fluentd_nodeselector.keys() | count > 1 diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2 index a4afb6618..1c0d1089f 100644 --- a/roles/openshift_logging_fluentd/templates/fluentd.j2 +++ b/roles/openshift_logging_fluentd/templates/fluentd.j2 @@ -94,8 +94,6 @@ spec: value: "{{ openshift_logging_fluentd_ops_client_key }}" - name: "OPS_CA" value: "{{ openshift_logging_fluentd_ops_ca }}" - - name: "ES_COPY" - value: "false" - name: "JOURNAL_SOURCE" value: "{{ openshift_logging_fluentd_journal_source | default('') }}" - name: "JOURNAL_READ_FROM_HEAD" diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index 8734e7443..fa7238849 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service {% if openshift_use_crio|default(false) %}Wants=cri-o.service{% endif %} [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index 4ab10b95f..310d8b29d 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -34,6 +34,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node \ -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch \ -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni \ -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log \ + {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \ -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \ {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ {{ openshift.node.node_image }}:${IMAGE_VERSION} diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service index 4c47f8c0d..aae35719c 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro --volume=/etc/containers/registries:/etc/containers/registries:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index 9ebb0d5ec..7b705c2d4 100644 --- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml @@ -85,8 +85,6 @@ objects: volumeMounts: - name: db mountPath: /var/lib/heketi - - name: topology - mountPath: ${TOPOLOGY_PATH} - name: config mountPath: /etc/heketi readinessProbe: @@ -103,9 +101,6 @@ objects: port: 8080 volumes: - name: db - - name: topology - secret: - secretName: heketi-${CLUSTER_NAME}-topology-secret - name: config secret: secretName: heketi-${CLUSTER_NAME}-config-secret @@ -138,6 +133,3 @@ parameters: displayName: GlusterFS cluster name description: A unique name to identify this heketi service, useful for running multiple heketi instances value: glusterfs -- name: TOPOLOGY_PATH - displayName: heketi topology file location - required: True diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index bc0dde17d..3f6dab78b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -51,8 +51,8 @@ kind: pod state: list selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" + register: deploy_heketi_pod + until: "deploy_heketi_pod.results.results[0]['items'] | count == 0" delay: 10 retries: "{{ (glusterfs_timeout | int / 10) | int }}" when: glusterfs_heketi_wipe @@ -103,7 +103,7 @@ state: list kind: pod selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod + register: deploy_heketi_pod when: glusterfs_heketi_is_native - name: Check if need to deploy deploy-heketi @@ -111,9 +111,9 @@ glusterfs_heketi_deploy_is_missing: False when: - "glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - name: Check for existing heketi pod oc_obj: @@ -147,6 +147,21 @@ when: - glusterfs_heketi_is_native +- name: Get heketi admin secret + oc_secret: + state: list + namespace: "{{ glusterfs_namespace }}" + name: "heketi-{{ glusterfs_name }}-admin-secret" + decode: True + register: glusterfs_heketi_admin_secret + +- name: Set heketi admin key + set_fact: + glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}" + when: + - glusterfs_is_native + - glusterfs_heketi_admin_secret.results.results[0] + - name: Generate heketi admin key set_fact: glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" @@ -190,14 +205,37 @@ - glusterfs_heketi_deploy_is_missing - glusterfs_heketi_is_missing +- name: Wait for deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" + register: deploy_heketi_pod + until: + - "deploy_heketi_pod.results.results[0]['items'] | count > 0" + # Pod's 'Ready' status must be True + - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" + delay: 10 + retries: "{{ (glusterfs_timeout | int / 10) | int }}" + when: + - glusterfs_heketi_is_native + - not glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + - name: Set heketi-cli command set_fact: - glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" + glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" - name: Verify heketi service command: "{{ glusterfs_heketi_client }} cluster list" changed_when: False +- name: Place heketi topology on heketi Pod + shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_is_native + - name: Load heketi topology command: "{{ glusterfs_heketi_client }} topology load --json={{ mktemp.stdout }}/topology.json 2>&1" register: topology_load diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 3ba1eb2d2..73396c9af 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -6,16 +6,6 @@ with_items: - "deploy-heketi-template.yml" -- name: Create heketi topology secret - oc_secret: - namespace: "{{ glusterfs_namespace }}" - state: present - name: "heketi-{{ glusterfs_name }}-topology-secret" - force: True - files: - - name: topology.json - path: "{{ mktemp.stdout }}/topology.json" - - name: Create deploy-heketi template oc_obj: namespace: "{{ glusterfs_namespace }}" @@ -39,18 +29,7 @@ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}" HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}" CLUSTER_NAME: "{{ glusterfs_name }}" - TOPOLOGY_PATH: "{{ mktemp.stdout }}" -- name: Wait for deploy-heketi pod - oc_obj: - namespace: "{{ glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod" - register: heketi_pod - until: - - "heketi_pod.results.results[0]['items'] | count > 0" - # Pod's 'Ready' status must be True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" - delay: 10 - retries: "{{ (glusterfs_timeout | int / 10) | int }}" +- name: Set heketi Deployed fact + set_fact: + glusterfs_heketi_deploy_is_missing: False diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index afc04a537..54a6dd7c3 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@ register: setup_storage - name: Copy heketi-storage list - shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" + shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" # This is used in the subsequent task - name: Copy the admin client config diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index a2a579e9d..b727eb74d 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -1,6 +1,6 @@ --- - set_fact: - l_use_crio: "{{ openshift_use_crio | default(false) }}" + l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}" - name: Set containerized version to configure if openshift_image_tag specified set_fact: @@ -22,7 +22,9 @@ command: > docker run --rm {{ openshift.common.cli_image }}:latest version register: cli_image_version - when: openshift_version is not defined + when: + - openshift_version is not defined + - not l_use_crio_only # Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a) - set_fact: @@ -31,6 +33,7 @@ - openshift_version is not defined - openshift.common.deployment_type == 'origin' - cli_image_version.stdout_lines[0].split('-') | length > 1 + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" @@ -45,14 +48,14 @@ when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only - set_fact: openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}" when: - openshift_version is defined - openshift_version.split('.') | length == 2 - - not l_use_crio + - not l_use_crio_only # TODO: figure out a way to check for the openshift_version when using CRI-O. # We should do that using the images in the ostree storage so we don't have |